drm/i915: Drop the CONTEXT_CLONE API (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / gen8_ppgtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/log2.h>
7
8 #include "gem/i915_gem_lmem.h"
9
10 #include "gen8_ppgtt.h"
11 #include "i915_scatterlist.h"
12 #include "i915_trace.h"
13 #include "i915_pvinfo.h"
14 #include "i915_vgpu.h"
15 #include "intel_gt.h"
16 #include "intel_gtt.h"
17
18 static u64 gen8_pde_encode(const dma_addr_t addr,
19                            const enum i915_cache_level level)
20 {
21         u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
22
23         if (level != I915_CACHE_NONE)
24                 pde |= PPAT_CACHED_PDE;
25         else
26                 pde |= PPAT_UNCACHED;
27
28         return pde;
29 }
30
31 static u64 gen8_pte_encode(dma_addr_t addr,
32                            enum i915_cache_level level,
33                            u32 flags)
34 {
35         gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
36
37         if (unlikely(flags & PTE_READ_ONLY))
38                 pte &= ~_PAGE_RW;
39
40         if (flags & PTE_LM)
41                 pte |= GEN12_PPGTT_PTE_LM;
42
43         switch (level) {
44         case I915_CACHE_NONE:
45                 pte |= PPAT_UNCACHED;
46                 break;
47         case I915_CACHE_WT:
48                 pte |= PPAT_DISPLAY_ELLC;
49                 break;
50         default:
51                 pte |= PPAT_CACHED;
52                 break;
53         }
54
55         return pte;
56 }
57
58 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
59 {
60         struct drm_i915_private *i915 = ppgtt->vm.i915;
61         struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
62         enum vgt_g2v_type msg;
63         int i;
64
65         if (create)
66                 atomic_inc(px_used(ppgtt->pd)); /* never remove */
67         else
68                 atomic_dec(px_used(ppgtt->pd));
69
70         mutex_lock(&i915->vgpu.lock);
71
72         if (i915_vm_is_4lvl(&ppgtt->vm)) {
73                 const u64 daddr = px_dma(ppgtt->pd);
74
75                 intel_uncore_write(uncore,
76                                    vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
77                 intel_uncore_write(uncore,
78                                    vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
79
80                 msg = create ?
81                         VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
82                         VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
83         } else {
84                 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
85                         const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
86
87                         intel_uncore_write(uncore,
88                                            vgtif_reg(pdp[i].lo),
89                                            lower_32_bits(daddr));
90                         intel_uncore_write(uncore,
91                                            vgtif_reg(pdp[i].hi),
92                                            upper_32_bits(daddr));
93                 }
94
95                 msg = create ?
96                         VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
97                         VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
98         }
99
100         /* g2v_notify atomically (via hv trap) consumes the message packet. */
101         intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
102
103         mutex_unlock(&i915->vgpu.lock);
104 }
105
106 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
107 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
108 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
109 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
110 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
111 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
112 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
113 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
114
115 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
116
117 static unsigned int
118 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
119 {
120         const int shift = gen8_pd_shift(lvl);
121         const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
122
123         GEM_BUG_ON(start >= end);
124         end += ~mask >> gen8_pd_shift(1);
125
126         *idx = i915_pde_index(start, shift);
127         if ((start ^ end) & mask)
128                 return GEN8_PDES - *idx;
129         else
130                 return i915_pde_index(end, shift) - *idx;
131 }
132
133 static bool gen8_pd_contains(u64 start, u64 end, int lvl)
134 {
135         const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
136
137         GEM_BUG_ON(start >= end);
138         return (start ^ end) & mask && (start & ~mask) == 0;
139 }
140
141 static unsigned int gen8_pt_count(u64 start, u64 end)
142 {
143         GEM_BUG_ON(start >= end);
144         if ((start ^ end) >> gen8_pd_shift(1))
145                 return GEN8_PDES - (start & (GEN8_PDES - 1));
146         else
147                 return end - start;
148 }
149
150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
151 {
152         unsigned int shift = __gen8_pte_shift(vm->top);
153
154         return (vm->total + (1ull << shift) - 1) >> shift;
155 }
156
157 static struct i915_page_directory *
158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
159 {
160         struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
161
162         if (vm->top == 2)
163                 return ppgtt->pd;
164         else
165                 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
166 }
167
168 static struct i915_page_directory *
169 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
170 {
171         return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
172 }
173
174 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
175                                  struct i915_page_directory *pd,
176                                  int count, int lvl)
177 {
178         if (lvl) {
179                 void **pde = pd->entry;
180
181                 do {
182                         if (!*pde)
183                                 continue;
184
185                         __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
186                 } while (pde++, --count);
187         }
188
189         free_px(vm, &pd->pt, lvl);
190 }
191
192 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
193 {
194         struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
195
196         if (intel_vgpu_active(vm->i915))
197                 gen8_ppgtt_notify_vgt(ppgtt, false);
198
199         __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
200         free_scratch(vm);
201 }
202
203 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
204                               struct i915_page_directory * const pd,
205                               u64 start, const u64 end, int lvl)
206 {
207         const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
208         unsigned int idx, len;
209
210         GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
211
212         len = gen8_pd_range(start, end, lvl--, &idx);
213         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
214             __func__, vm, lvl + 1, start, end,
215             idx, len, atomic_read(px_used(pd)));
216         GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
217
218         do {
219                 struct i915_page_table *pt = pd->entry[idx];
220
221                 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
222                     gen8_pd_contains(start, end, lvl)) {
223                         DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
224                             __func__, vm, lvl + 1, idx, start, end);
225                         clear_pd_entry(pd, idx, scratch);
226                         __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
227                         start += (u64)I915_PDES << gen8_pd_shift(lvl);
228                         continue;
229                 }
230
231                 if (lvl) {
232                         start = __gen8_ppgtt_clear(vm, as_pd(pt),
233                                                    start, end, lvl);
234                 } else {
235                         unsigned int count;
236                         u64 *vaddr;
237
238                         count = gen8_pt_count(start, end);
239                         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
240                             __func__, vm, lvl, start, end,
241                             gen8_pd_index(start, 0), count,
242                             atomic_read(&pt->used));
243                         GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
244
245                         vaddr = px_vaddr(pt);
246                         memset64(vaddr + gen8_pd_index(start, 0),
247                                  vm->scratch[0]->encode,
248                                  count);
249
250                         atomic_sub(count, &pt->used);
251                         start += count;
252                 }
253
254                 if (release_pd_entry(pd, idx, pt, scratch))
255                         free_px(vm, pt, lvl);
256         } while (idx++, --len);
257
258         return start;
259 }
260
261 static void gen8_ppgtt_clear(struct i915_address_space *vm,
262                              u64 start, u64 length)
263 {
264         GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
265         GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
266         GEM_BUG_ON(range_overflows(start, length, vm->total));
267
268         start >>= GEN8_PTE_SHIFT;
269         length >>= GEN8_PTE_SHIFT;
270         GEM_BUG_ON(length == 0);
271
272         __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
273                            start, start + length, vm->top);
274 }
275
276 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
277                                struct i915_vm_pt_stash *stash,
278                                struct i915_page_directory * const pd,
279                                u64 * const start, const u64 end, int lvl)
280 {
281         unsigned int idx, len;
282
283         GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
284
285         len = gen8_pd_range(*start, end, lvl--, &idx);
286         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
287             __func__, vm, lvl + 1, *start, end,
288             idx, len, atomic_read(px_used(pd)));
289         GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
290
291         spin_lock(&pd->lock);
292         GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
293         do {
294                 struct i915_page_table *pt = pd->entry[idx];
295
296                 if (!pt) {
297                         spin_unlock(&pd->lock);
298
299                         DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
300                             __func__, vm, lvl + 1, idx);
301
302                         pt = stash->pt[!!lvl];
303                         __i915_gem_object_pin_pages(pt->base);
304                         i915_gem_object_make_unshrinkable(pt->base);
305
306                         if (lvl ||
307                             gen8_pt_count(*start, end) < I915_PDES ||
308                             intel_vgpu_active(vm->i915))
309                                 fill_px(pt, vm->scratch[lvl]->encode);
310
311                         spin_lock(&pd->lock);
312                         if (likely(!pd->entry[idx])) {
313                                 stash->pt[!!lvl] = pt->stash;
314                                 atomic_set(&pt->used, 0);
315                                 set_pd_entry(pd, idx, pt);
316                         } else {
317                                 pt = pd->entry[idx];
318                         }
319                 }
320
321                 if (lvl) {
322                         atomic_inc(&pt->used);
323                         spin_unlock(&pd->lock);
324
325                         __gen8_ppgtt_alloc(vm, stash,
326                                            as_pd(pt), start, end, lvl);
327
328                         spin_lock(&pd->lock);
329                         atomic_dec(&pt->used);
330                         GEM_BUG_ON(!atomic_read(&pt->used));
331                 } else {
332                         unsigned int count = gen8_pt_count(*start, end);
333
334                         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
335                             __func__, vm, lvl, *start, end,
336                             gen8_pd_index(*start, 0), count,
337                             atomic_read(&pt->used));
338
339                         atomic_add(count, &pt->used);
340                         /* All other pdes may be simultaneously removed */
341                         GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
342                         *start += count;
343                 }
344         } while (idx++, --len);
345         spin_unlock(&pd->lock);
346 }
347
348 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
349                              struct i915_vm_pt_stash *stash,
350                              u64 start, u64 length)
351 {
352         GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
353         GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
354         GEM_BUG_ON(range_overflows(start, length, vm->total));
355
356         start >>= GEN8_PTE_SHIFT;
357         length >>= GEN8_PTE_SHIFT;
358         GEM_BUG_ON(length == 0);
359
360         __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
361                            &start, start + length, vm->top);
362 }
363
364 static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
365                                  struct i915_page_directory *pd,
366                                  u64 *start, u64 end, int lvl,
367                                  void (*fn)(struct i915_address_space *vm,
368                                             struct i915_page_table *pt,
369                                             void *data),
370                                  void *data)
371 {
372         unsigned int idx, len;
373
374         len = gen8_pd_range(*start, end, lvl--, &idx);
375
376         spin_lock(&pd->lock);
377         do {
378                 struct i915_page_table *pt = pd->entry[idx];
379
380                 atomic_inc(&pt->used);
381                 spin_unlock(&pd->lock);
382
383                 if (lvl) {
384                         __gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
385                                              fn, data);
386                 } else {
387                         fn(vm, pt, data);
388                         *start += gen8_pt_count(*start, end);
389                 }
390
391                 spin_lock(&pd->lock);
392                 atomic_dec(&pt->used);
393         } while (idx++, --len);
394         spin_unlock(&pd->lock);
395 }
396
397 static void gen8_ppgtt_foreach(struct i915_address_space *vm,
398                                u64 start, u64 length,
399                                void (*fn)(struct i915_address_space *vm,
400                                           struct i915_page_table *pt,
401                                           void *data),
402                                void *data)
403 {
404         start >>= GEN8_PTE_SHIFT;
405         length >>= GEN8_PTE_SHIFT;
406
407         __gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
408                              &start, start + length, vm->top,
409                              fn, data);
410 }
411
412 static __always_inline u64
413 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
414                       struct i915_page_directory *pdp,
415                       struct sgt_dma *iter,
416                       u64 idx,
417                       enum i915_cache_level cache_level,
418                       u32 flags)
419 {
420         struct i915_page_directory *pd;
421         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
422         gen8_pte_t *vaddr;
423
424         pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
425         vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
426         do {
427                 GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
428                 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
429
430                 iter->dma += I915_GTT_PAGE_SIZE;
431                 if (iter->dma >= iter->max) {
432                         iter->sg = __sg_next(iter->sg);
433                         if (!iter->sg || sg_dma_len(iter->sg) == 0) {
434                                 idx = 0;
435                                 break;
436                         }
437
438                         iter->dma = sg_dma_address(iter->sg);
439                         iter->max = iter->dma + sg_dma_len(iter->sg);
440                 }
441
442                 if (gen8_pd_index(++idx, 0) == 0) {
443                         if (gen8_pd_index(idx, 1) == 0) {
444                                 /* Limited by sg length for 3lvl */
445                                 if (gen8_pd_index(idx, 2) == 0)
446                                         break;
447
448                                 pd = pdp->entry[gen8_pd_index(idx, 2)];
449                         }
450
451                         clflush_cache_range(vaddr, PAGE_SIZE);
452                         vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
453                 }
454         } while (1);
455         clflush_cache_range(vaddr, PAGE_SIZE);
456
457         return idx;
458 }
459
460 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
461                                    struct sgt_dma *iter,
462                                    enum i915_cache_level cache_level,
463                                    u32 flags)
464 {
465         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
466         unsigned int rem = sg_dma_len(iter->sg);
467         u64 start = vma->node.start;
468
469         GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
470
471         do {
472                 struct i915_page_directory * const pdp =
473                         gen8_pdp_for_page_address(vma->vm, start);
474                 struct i915_page_directory * const pd =
475                         i915_pd_entry(pdp, __gen8_pte_index(start, 2));
476                 gen8_pte_t encode = pte_encode;
477                 unsigned int maybe_64K = -1;
478                 unsigned int page_size;
479                 gen8_pte_t *vaddr;
480                 u16 index;
481
482                 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
483                     IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
484                     rem >= I915_GTT_PAGE_SIZE_2M &&
485                     !__gen8_pte_index(start, 0)) {
486                         index = __gen8_pte_index(start, 1);
487                         encode |= GEN8_PDE_PS_2M;
488                         page_size = I915_GTT_PAGE_SIZE_2M;
489
490                         vaddr = px_vaddr(pd);
491                 } else {
492                         struct i915_page_table *pt =
493                                 i915_pt_entry(pd, __gen8_pte_index(start, 1));
494
495                         index = __gen8_pte_index(start, 0);
496                         page_size = I915_GTT_PAGE_SIZE;
497
498                         if (!index &&
499                             vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
500                             IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
501                             (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
502                              rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
503                                 maybe_64K = __gen8_pte_index(start, 1);
504
505                         vaddr = px_vaddr(pt);
506                 }
507
508                 do {
509                         GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
510                         vaddr[index++] = encode | iter->dma;
511
512                         start += page_size;
513                         iter->dma += page_size;
514                         rem -= page_size;
515                         if (iter->dma >= iter->max) {
516                                 iter->sg = __sg_next(iter->sg);
517                                 if (!iter->sg)
518                                         break;
519
520                                 rem = sg_dma_len(iter->sg);
521                                 if (!rem)
522                                         break;
523
524                                 iter->dma = sg_dma_address(iter->sg);
525                                 iter->max = iter->dma + rem;
526
527                                 if (maybe_64K != -1 && index < I915_PDES &&
528                                     !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
529                                       (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
530                                        rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
531                                         maybe_64K = -1;
532
533                                 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
534                                         break;
535                         }
536                 } while (rem >= page_size && index < I915_PDES);
537
538                 clflush_cache_range(vaddr, PAGE_SIZE);
539
540                 /*
541                  * Is it safe to mark the 2M block as 64K? -- Either we have
542                  * filled whole page-table with 64K entries, or filled part of
543                  * it and have reached the end of the sg table and we have
544                  * enough padding.
545                  */
546                 if (maybe_64K != -1 &&
547                     (index == I915_PDES ||
548                      (i915_vm_has_scratch_64K(vma->vm) &&
549                       !iter->sg && IS_ALIGNED(vma->node.start +
550                                               vma->node.size,
551                                               I915_GTT_PAGE_SIZE_2M)))) {
552                         vaddr = px_vaddr(pd);
553                         vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
554                         page_size = I915_GTT_PAGE_SIZE_64K;
555
556                         /*
557                          * We write all 4K page entries, even when using 64K
558                          * pages. In order to verify that the HW isn't cheating
559                          * by using the 4K PTE instead of the 64K PTE, we want
560                          * to remove all the surplus entries. If the HW skipped
561                          * the 64K PTE, it will read/write into the scratch page
562                          * instead - which we detect as missing results during
563                          * selftests.
564                          */
565                         if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
566                                 u16 i;
567
568                                 encode = vma->vm->scratch[0]->encode;
569                                 vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
570
571                                 for (i = 1; i < index; i += 16)
572                                         memset64(vaddr + i, encode, 15);
573
574                         }
575                 }
576
577                 vma->page_sizes.gtt |= page_size;
578         } while (iter->sg && sg_dma_len(iter->sg));
579 }
580
581 static void gen8_ppgtt_insert(struct i915_address_space *vm,
582                               struct i915_vma *vma,
583                               enum i915_cache_level cache_level,
584                               u32 flags)
585 {
586         struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
587         struct sgt_dma iter = sgt_dma(vma);
588
589         if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
590                 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
591         } else  {
592                 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
593
594                 do {
595                         struct i915_page_directory * const pdp =
596                                 gen8_pdp_for_page_index(vm, idx);
597
598                         idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
599                                                     cache_level, flags);
600                 } while (idx);
601
602                 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
603         }
604 }
605
606 static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
607                                     dma_addr_t addr,
608                                     u64 offset,
609                                     enum i915_cache_level level,
610                                     u32 flags)
611 {
612         u64 idx = offset >> GEN8_PTE_SHIFT;
613         struct i915_page_directory * const pdp =
614                 gen8_pdp_for_page_index(vm, idx);
615         struct i915_page_directory *pd =
616                 i915_pd_entry(pdp, gen8_pd_index(idx, 2));
617         gen8_pte_t *vaddr;
618
619         vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
620         vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
621         clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
622 }
623
624 static int gen8_init_scratch(struct i915_address_space *vm)
625 {
626         u32 pte_flags;
627         int ret;
628         int i;
629
630         /*
631          * If everybody agrees to not to write into the scratch page,
632          * we can reuse it for all vm, keeping contexts and processes separate.
633          */
634         if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
635                 struct i915_address_space *clone = vm->gt->vm;
636
637                 GEM_BUG_ON(!clone->has_read_only);
638
639                 vm->scratch_order = clone->scratch_order;
640                 for (i = 0; i <= vm->top; i++)
641                         vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
642
643                 return 0;
644         }
645
646         ret = setup_scratch_page(vm);
647         if (ret)
648                 return ret;
649
650         pte_flags = vm->has_read_only;
651         if (i915_gem_object_is_lmem(vm->scratch[0]))
652                 pte_flags |= PTE_LM;
653
654         vm->scratch[0]->encode =
655                 gen8_pte_encode(px_dma(vm->scratch[0]),
656                                 I915_CACHE_LLC, pte_flags);
657
658         for (i = 1; i <= vm->top; i++) {
659                 struct drm_i915_gem_object *obj;
660
661                 obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
662                 if (IS_ERR(obj))
663                         goto free_scratch;
664
665                 ret = map_pt_dma(vm, obj);
666                 if (ret) {
667                         i915_gem_object_put(obj);
668                         goto free_scratch;
669                 }
670
671                 fill_px(obj, vm->scratch[i - 1]->encode);
672                 obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
673
674                 vm->scratch[i] = obj;
675         }
676
677         return 0;
678
679 free_scratch:
680         while (i--)
681                 i915_gem_object_put(vm->scratch[i]);
682         return -ENOMEM;
683 }
684
685 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
686 {
687         struct i915_address_space *vm = &ppgtt->vm;
688         struct i915_page_directory *pd = ppgtt->pd;
689         unsigned int idx;
690
691         GEM_BUG_ON(vm->top != 2);
692         GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
693
694         for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
695                 struct i915_page_directory *pde;
696                 int err;
697
698                 pde = alloc_pd(vm);
699                 if (IS_ERR(pde))
700                         return PTR_ERR(pde);
701
702                 err = map_pt_dma(vm, pde->pt.base);
703                 if (err) {
704                         free_pd(vm, pde);
705                         return err;
706                 }
707
708                 fill_px(pde, vm->scratch[1]->encode);
709                 set_pd_entry(pd, idx, pde);
710                 atomic_inc(px_used(pde)); /* keep pinned */
711         }
712         wmb();
713
714         return 0;
715 }
716
717 static struct i915_page_directory *
718 gen8_alloc_top_pd(struct i915_address_space *vm)
719 {
720         const unsigned int count = gen8_pd_top_count(vm);
721         struct i915_page_directory *pd;
722         int err;
723
724         GEM_BUG_ON(count > I915_PDES);
725
726         pd = __alloc_pd(count);
727         if (unlikely(!pd))
728                 return ERR_PTR(-ENOMEM);
729
730         pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
731         if (IS_ERR(pd->pt.base)) {
732                 err = PTR_ERR(pd->pt.base);
733                 pd->pt.base = NULL;
734                 goto err_pd;
735         }
736
737         err = map_pt_dma(vm, pd->pt.base);
738         if (err)
739                 goto err_pd;
740
741         fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
742         atomic_inc(px_used(pd)); /* mark as pinned */
743         return pd;
744
745 err_pd:
746         free_pd(vm, pd);
747         return ERR_PTR(err);
748 }
749
750 /*
751  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
752  * with a net effect resembling a 2-level page table in normal x86 terms. Each
753  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
754  * space.
755  *
756  */
757 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
758 {
759         struct i915_ppgtt *ppgtt;
760         int err;
761
762         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
763         if (!ppgtt)
764                 return ERR_PTR(-ENOMEM);
765
766         ppgtt_init(ppgtt, gt);
767         ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
768         ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
769
770         /*
771          * From bdw, there is hw support for read-only pages in the PPGTT.
772          *
773          * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
774          * for now.
775          *
776          * Gen12 has inherited the same read-only fault issue from gen11.
777          */
778         ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
779
780         if (HAS_LMEM(gt->i915))
781                 ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
782         else
783                 ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
784
785         err = gen8_init_scratch(&ppgtt->vm);
786         if (err)
787                 goto err_free;
788
789         ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
790         if (IS_ERR(ppgtt->pd)) {
791                 err = PTR_ERR(ppgtt->pd);
792                 goto err_free_scratch;
793         }
794
795         if (!i915_vm_is_4lvl(&ppgtt->vm)) {
796                 err = gen8_preallocate_top_level_pdp(ppgtt);
797                 if (err)
798                         goto err_free_pd;
799         }
800
801         ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
802         ppgtt->vm.insert_entries = gen8_ppgtt_insert;
803         ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
804         ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
805         ppgtt->vm.clear_range = gen8_ppgtt_clear;
806         ppgtt->vm.foreach = gen8_ppgtt_foreach;
807
808         ppgtt->vm.pte_encode = gen8_pte_encode;
809
810         if (intel_vgpu_active(gt->i915))
811                 gen8_ppgtt_notify_vgt(ppgtt, true);
812
813         ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
814
815         return ppgtt;
816
817 err_free_pd:
818         __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
819                              gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
820 err_free_scratch:
821         free_scratch(&ppgtt->vm);
822 err_free:
823         kfree(ppgtt);
824         return ERR_PTR(err);
825 }