Merge v5.16-rc5 into drm-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_gtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
7
8 #include <linux/fault-inject.h>
9 #include <linux/sched/mm.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "i915_trace.h"
15 #include "intel_gt.h"
16 #include "intel_gtt.h"
17
18 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
19 {
20         struct drm_i915_gem_object *obj;
21
22         /*
23          * To avoid severe over-allocation when dealing with min_page_size
24          * restrictions, we override that behaviour here by allowing an object
25          * size and page layout which can be smaller. In practice this should be
26          * totally fine, since GTT paging structures are not typically inserted
27          * into the GTT.
28          *
29          * Note that we also hit this path for the scratch page, and for this
30          * case it might need to be 64K, but that should work fine here since we
31          * used the passed in size for the page size, which should ensure it
32          * also has the same alignment.
33          */
34         obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
35                                                     vm->lmem_pt_obj_flags);
36         /*
37          * Ensure all paging structures for this vm share the same dma-resv
38          * object underneath, with the idea that one object_lock() will lock
39          * them all at once.
40          */
41         if (!IS_ERR(obj)) {
42                 obj->base.resv = i915_vm_resv_get(vm);
43                 obj->shares_resv_from = vm;
44         }
45
46         return obj;
47 }
48
49 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
50 {
51         struct drm_i915_gem_object *obj;
52
53         if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
54                 i915_gem_shrink_all(vm->i915);
55
56         obj = i915_gem_object_create_internal(vm->i915, sz);
57         /*
58          * Ensure all paging structures for this vm share the same dma-resv
59          * object underneath, with the idea that one object_lock() will lock
60          * them all at once.
61          */
62         if (!IS_ERR(obj)) {
63                 obj->base.resv = i915_vm_resv_get(vm);
64                 obj->shares_resv_from = vm;
65         }
66
67         return obj;
68 }
69
70 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
71 {
72         enum i915_map_type type;
73         void *vaddr;
74
75         type = i915_coherent_map_type(vm->i915, obj, true);
76         vaddr = i915_gem_object_pin_map_unlocked(obj, type);
77         if (IS_ERR(vaddr))
78                 return PTR_ERR(vaddr);
79
80         i915_gem_object_make_unshrinkable(obj);
81         return 0;
82 }
83
84 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
85 {
86         enum i915_map_type type;
87         void *vaddr;
88
89         type = i915_coherent_map_type(vm->i915, obj, true);
90         vaddr = i915_gem_object_pin_map(obj, type);
91         if (IS_ERR(vaddr))
92                 return PTR_ERR(vaddr);
93
94         i915_gem_object_make_unshrinkable(obj);
95         return 0;
96 }
97
98 void __i915_vm_close(struct i915_address_space *vm)
99 {
100         struct i915_vma *vma, *vn;
101
102         if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
103                 return;
104
105         list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
106                 struct drm_i915_gem_object *obj = vma->obj;
107
108                 /* Keep the obj (and hence the vma) alive as _we_ destroy it */
109                 if (!kref_get_unless_zero(&obj->base.refcount))
110                         continue;
111
112                 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
113                 WARN_ON(__i915_vma_unbind(vma));
114                 __i915_vma_put(vma);
115
116                 i915_gem_object_put(obj);
117         }
118         GEM_BUG_ON(!list_empty(&vm->bound_list));
119
120         mutex_unlock(&vm->mutex);
121 }
122
123 /* lock the vm into the current ww, if we lock one, we lock all */
124 int i915_vm_lock_objects(struct i915_address_space *vm,
125                          struct i915_gem_ww_ctx *ww)
126 {
127         if (vm->scratch[0]->base.resv == &vm->_resv) {
128                 return i915_gem_object_lock(vm->scratch[0], ww);
129         } else {
130                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
131
132                 /* We borrowed the scratch page from ggtt, take the top level object */
133                 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
134         }
135 }
136
137 void i915_address_space_fini(struct i915_address_space *vm)
138 {
139         drm_mm_takedown(&vm->mm);
140         mutex_destroy(&vm->mutex);
141 }
142
143 /**
144  * i915_vm_resv_release - Final struct i915_address_space destructor
145  * @kref: Pointer to the &i915_address_space.resv_ref member.
146  *
147  * This function is called when the last lock sharer no longer shares the
148  * &i915_address_space._resv lock.
149  */
150 void i915_vm_resv_release(struct kref *kref)
151 {
152         struct i915_address_space *vm =
153                 container_of(kref, typeof(*vm), resv_ref);
154
155         dma_resv_fini(&vm->_resv);
156         kfree(vm);
157 }
158
159 static void __i915_vm_release(struct work_struct *work)
160 {
161         struct i915_address_space *vm =
162                 container_of(work, struct i915_address_space, release_work);
163
164         vm->cleanup(vm);
165         i915_address_space_fini(vm);
166
167         i915_vm_resv_put(vm);
168 }
169
170 void i915_vm_release(struct kref *kref)
171 {
172         struct i915_address_space *vm =
173                 container_of(kref, struct i915_address_space, ref);
174
175         GEM_BUG_ON(i915_is_ggtt(vm));
176         trace_i915_ppgtt_release(vm);
177
178         queue_work(vm->i915->wq, &vm->release_work);
179 }
180
181 void i915_address_space_init(struct i915_address_space *vm, int subclass)
182 {
183         kref_init(&vm->ref);
184
185         /*
186          * Special case for GGTT that has already done an early
187          * kref_init here.
188          */
189         if (!kref_read(&vm->resv_ref))
190                 kref_init(&vm->resv_ref);
191
192         INIT_WORK(&vm->release_work, __i915_vm_release);
193         atomic_set(&vm->open, 1);
194
195         /*
196          * The vm->mutex must be reclaim safe (for use in the shrinker).
197          * Do a dummy acquire now under fs_reclaim so that any allocation
198          * attempt holding the lock is immediately reported by lockdep.
199          */
200         mutex_init(&vm->mutex);
201         lockdep_set_subclass(&vm->mutex, subclass);
202
203         if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
204                 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
205         } else {
206                 /*
207                  * CHV + BXT VTD workaround use stop_machine(),
208                  * which is allowed to allocate memory. This means &vm->mutex
209                  * is the outer lock, and in theory we can allocate memory inside
210                  * it through stop_machine().
211                  *
212                  * Add the annotation for this, we use trylock in shrinker.
213                  */
214                 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
215                 might_alloc(GFP_KERNEL);
216                 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
217         }
218         dma_resv_init(&vm->_resv);
219
220         GEM_BUG_ON(!vm->total);
221         drm_mm_init(&vm->mm, 0, vm->total);
222         vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
223
224         INIT_LIST_HEAD(&vm->bound_list);
225 }
226
227 void clear_pages(struct i915_vma *vma)
228 {
229         GEM_BUG_ON(!vma->pages);
230
231         if (vma->pages != vma->obj->mm.pages) {
232                 sg_free_table(vma->pages);
233                 kfree(vma->pages);
234         }
235         vma->pages = NULL;
236
237         memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
238 }
239
240 void *__px_vaddr(struct drm_i915_gem_object *p)
241 {
242         enum i915_map_type type;
243
244         GEM_BUG_ON(!i915_gem_object_has_pages(p));
245         return page_unpack_bits(p->mm.mapping, &type);
246 }
247
248 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
249 {
250         GEM_BUG_ON(!i915_gem_object_has_pages(p));
251         return sg_dma_address(p->mm.pages->sgl);
252 }
253
254 struct page *__px_page(struct drm_i915_gem_object *p)
255 {
256         GEM_BUG_ON(!i915_gem_object_has_pages(p));
257         return sg_page(p->mm.pages->sgl);
258 }
259
260 void
261 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
262 {
263         void *vaddr = __px_vaddr(p);
264
265         memset64(vaddr, val, count);
266         clflush_cache_range(vaddr, PAGE_SIZE);
267 }
268
269 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
270 {
271         void *vaddr = __px_vaddr(scratch);
272         u8 val;
273
274         val = 0;
275         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
276                 val = POISON_FREE;
277
278         memset(vaddr, val, scratch->base.size);
279         drm_clflush_virt_range(vaddr, scratch->base.size);
280 }
281
282 int setup_scratch_page(struct i915_address_space *vm)
283 {
284         unsigned long size;
285
286         /*
287          * In order to utilize 64K pages for an object with a size < 2M, we will
288          * need to support a 64K scratch page, given that every 16th entry for a
289          * page-table operating in 64K mode must point to a properly aligned 64K
290          * region, including any PTEs which happen to point to scratch.
291          *
292          * This is only relevant for the 48b PPGTT where we support
293          * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
294          * scratch (read-only) between all vm, we create one 64k scratch page
295          * for all.
296          */
297         size = I915_GTT_PAGE_SIZE_4K;
298         if (i915_vm_is_4lvl(vm) &&
299             HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
300                 size = I915_GTT_PAGE_SIZE_64K;
301
302         do {
303                 struct drm_i915_gem_object *obj;
304
305                 obj = vm->alloc_pt_dma(vm, size);
306                 if (IS_ERR(obj))
307                         goto skip;
308
309                 if (map_pt_dma(vm, obj))
310                         goto skip_obj;
311
312                 /* We need a single contiguous page for our scratch */
313                 if (obj->mm.page_sizes.sg < size)
314                         goto skip_obj;
315
316                 /* And it needs to be correspondingly aligned */
317                 if (__px_dma(obj) & (size - 1))
318                         goto skip_obj;
319
320                 /*
321                  * Use a non-zero scratch page for debugging.
322                  *
323                  * We want a value that should be reasonably obvious
324                  * to spot in the error state, while also causing a GPU hang
325                  * if executed. We prefer using a clear page in production, so
326                  * should it ever be accidentally used, the effect should be
327                  * fairly benign.
328                  */
329                 poison_scratch_page(obj);
330
331                 vm->scratch[0] = obj;
332                 vm->scratch_order = get_order(size);
333                 return 0;
334
335 skip_obj:
336                 i915_gem_object_put(obj);
337 skip:
338                 if (size == I915_GTT_PAGE_SIZE_4K)
339                         return -ENOMEM;
340
341                 size = I915_GTT_PAGE_SIZE_4K;
342         } while (1);
343 }
344
345 void free_scratch(struct i915_address_space *vm)
346 {
347         int i;
348
349         for (i = 0; i <= vm->top; i++)
350                 i915_gem_object_put(vm->scratch[i]);
351 }
352
353 void gtt_write_workarounds(struct intel_gt *gt)
354 {
355         struct drm_i915_private *i915 = gt->i915;
356         struct intel_uncore *uncore = gt->uncore;
357
358         /*
359          * This function is for gtt related workarounds. This function is
360          * called on driver load and after a GPU reset, so you can place
361          * workarounds here even if they get overwritten by GPU reset.
362          */
363         /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
364         if (IS_BROADWELL(i915))
365                 intel_uncore_write(uncore,
366                                    GEN8_L3_LRA_1_GPGPU,
367                                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
368         else if (IS_CHERRYVIEW(i915))
369                 intel_uncore_write(uncore,
370                                    GEN8_L3_LRA_1_GPGPU,
371                                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
372         else if (IS_GEN9_LP(i915))
373                 intel_uncore_write(uncore,
374                                    GEN8_L3_LRA_1_GPGPU,
375                                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
376         else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
377                 intel_uncore_write(uncore,
378                                    GEN8_L3_LRA_1_GPGPU,
379                                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
380
381         /*
382          * To support 64K PTEs we need to first enable the use of the
383          * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
384          * mmio, otherwise the page-walker will simply ignore the IPS bit. This
385          * shouldn't be needed after GEN10.
386          *
387          * 64K pages were first introduced from BDW+, although technically they
388          * only *work* from gen9+. For pre-BDW we instead have the option for
389          * 32K pages, but we don't currently have any support for it in our
390          * driver.
391          */
392         if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
393             GRAPHICS_VER(i915) <= 10)
394                 intel_uncore_rmw(uncore,
395                                  GEN8_GAMW_ECO_DEV_RW_IA,
396                                  0,
397                                  GAMW_ECO_ENABLE_64K_IPS_FIELD);
398
399         if (IS_GRAPHICS_VER(i915, 8, 11)) {
400                 bool can_use_gtt_cache = true;
401
402                 /*
403                  * According to the BSpec if we use 2M/1G pages then we also
404                  * need to disable the GTT cache. At least on BDW we can see
405                  * visual corruption when using 2M pages, and not disabling the
406                  * GTT cache.
407                  */
408                 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
409                         can_use_gtt_cache = false;
410
411                 /* WaGttCachingOffByDefault */
412                 intel_uncore_write(uncore,
413                                    HSW_GTT_CACHE_EN,
414                                    can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
415                 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
416                                  intel_uncore_read(uncore,
417                                                    HSW_GTT_CACHE_EN) == 0);
418         }
419 }
420
421 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
422 {
423         /* TGL doesn't support LLC or AGE settings */
424         intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
425         intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
426         intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
427         intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
428         intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
429         intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
430         intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
431         intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
432 }
433
434 static void icl_setup_private_ppat(struct intel_uncore *uncore)
435 {
436         intel_uncore_write(uncore,
437                            GEN10_PAT_INDEX(0),
438                            GEN8_PPAT_WB | GEN8_PPAT_LLC);
439         intel_uncore_write(uncore,
440                            GEN10_PAT_INDEX(1),
441                            GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
442         intel_uncore_write(uncore,
443                            GEN10_PAT_INDEX(2),
444                            GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
445         intel_uncore_write(uncore,
446                            GEN10_PAT_INDEX(3),
447                            GEN8_PPAT_UC);
448         intel_uncore_write(uncore,
449                            GEN10_PAT_INDEX(4),
450                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
451         intel_uncore_write(uncore,
452                            GEN10_PAT_INDEX(5),
453                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
454         intel_uncore_write(uncore,
455                            GEN10_PAT_INDEX(6),
456                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
457         intel_uncore_write(uncore,
458                            GEN10_PAT_INDEX(7),
459                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
460 }
461
462 /*
463  * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
464  * bits. When using advanced contexts each context stores its own PAT, but
465  * writing this data shouldn't be harmful even in those cases.
466  */
467 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
468 {
469         struct drm_i915_private *i915 = uncore->i915;
470         u64 pat;
471
472         pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |      /* for normal objects, no eLLC */
473               GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |  /* for something pointing to ptes? */
474               GEN8_PPAT(3, GEN8_PPAT_UC) |                      /* Uncached objects, mostly for scanout */
475               GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
476               GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
477               GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
478               GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
479
480         /* for scanout with eLLC */
481         if (GRAPHICS_VER(i915) >= 9)
482                 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
483         else
484                 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
485
486         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
487         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
488 }
489
490 static void chv_setup_private_ppat(struct intel_uncore *uncore)
491 {
492         u64 pat;
493
494         /*
495          * Map WB on BDW to snooped on CHV.
496          *
497          * Only the snoop bit has meaning for CHV, the rest is
498          * ignored.
499          *
500          * The hardware will never snoop for certain types of accesses:
501          * - CPU GTT (GMADR->GGTT->no snoop->memory)
502          * - PPGTT page tables
503          * - some other special cycles
504          *
505          * As with BDW, we also need to consider the following for GT accesses:
506          * "For GGTT, there is NO pat_sel[2:0] from the entry,
507          * so RTL will always use the value corresponding to
508          * pat_sel = 000".
509          * Which means we must set the snoop bit in PAT entry 0
510          * in order to keep the global status page working.
511          */
512
513         pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
514               GEN8_PPAT(1, 0) |
515               GEN8_PPAT(2, 0) |
516               GEN8_PPAT(3, 0) |
517               GEN8_PPAT(4, CHV_PPAT_SNOOP) |
518               GEN8_PPAT(5, CHV_PPAT_SNOOP) |
519               GEN8_PPAT(6, CHV_PPAT_SNOOP) |
520               GEN8_PPAT(7, CHV_PPAT_SNOOP);
521
522         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
523         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
524 }
525
526 void setup_private_pat(struct intel_uncore *uncore)
527 {
528         struct drm_i915_private *i915 = uncore->i915;
529
530         GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
531
532         if (GRAPHICS_VER(i915) >= 12)
533                 tgl_setup_private_ppat(uncore);
534         else if (GRAPHICS_VER(i915) >= 11)
535                 icl_setup_private_ppat(uncore);
536         else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
537                 chv_setup_private_ppat(uncore);
538         else
539                 bdw_setup_private_ppat(uncore);
540 }
541
542 struct i915_vma *
543 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
544 {
545         struct drm_i915_gem_object *obj;
546         struct i915_vma *vma;
547
548         obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
549         if (IS_ERR(obj))
550                 return ERR_CAST(obj);
551
552         i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
553
554         vma = i915_vma_instance(obj, vm, NULL);
555         if (IS_ERR(vma)) {
556                 i915_gem_object_put(obj);
557                 return vma;
558         }
559
560         return vma;
561 }
562
563 struct i915_vma *
564 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
565 {
566         struct i915_vma *vma;
567         int err;
568
569         vma = __vm_create_scratch_for_read(vm, size);
570         if (IS_ERR(vma))
571                 return vma;
572
573         err = i915_vma_pin(vma, 0, 0,
574                            i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
575         if (err) {
576                 i915_vma_put(vma);
577                 return ERR_PTR(err);
578         }
579
580         return vma;
581 }
582
583 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
584 #include "selftests/mock_gtt.c"
585 #endif