1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
8 #include <linux/fault-inject.h>
9 #include <linux/sched/mm.h>
11 #include <drm/drm_cache.h>
13 #include "gem/i915_gem_lmem.h"
14 #include "i915_trace.h"
16 #include "intel_gtt.h"
18 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
20 struct drm_i915_gem_object *obj;
23 * To avoid severe over-allocation when dealing with min_page_size
24 * restrictions, we override that behaviour here by allowing an object
25 * size and page layout which can be smaller. In practice this should be
26 * totally fine, since GTT paging structures are not typically inserted
29 * Note that we also hit this path for the scratch page, and for this
30 * case it might need to be 64K, but that should work fine here since we
31 * used the passed in size for the page size, which should ensure it
32 * also has the same alignment.
34 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
35 vm->lmem_pt_obj_flags);
37 * Ensure all paging structures for this vm share the same dma-resv
38 * object underneath, with the idea that one object_lock() will lock
42 obj->base.resv = i915_vm_resv_get(vm);
43 obj->shares_resv_from = vm;
49 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
51 struct drm_i915_gem_object *obj;
53 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
54 i915_gem_shrink_all(vm->i915);
56 obj = i915_gem_object_create_internal(vm->i915, sz);
58 * Ensure all paging structures for this vm share the same dma-resv
59 * object underneath, with the idea that one object_lock() will lock
63 obj->base.resv = i915_vm_resv_get(vm);
64 obj->shares_resv_from = vm;
70 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
72 enum i915_map_type type;
75 type = i915_coherent_map_type(vm->i915, obj, true);
76 vaddr = i915_gem_object_pin_map_unlocked(obj, type);
78 return PTR_ERR(vaddr);
80 i915_gem_object_make_unshrinkable(obj);
84 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
86 enum i915_map_type type;
89 type = i915_coherent_map_type(vm->i915, obj, true);
90 vaddr = i915_gem_object_pin_map(obj, type);
92 return PTR_ERR(vaddr);
94 i915_gem_object_make_unshrinkable(obj);
98 void __i915_vm_close(struct i915_address_space *vm)
100 struct i915_vma *vma, *vn;
102 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
105 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
106 struct drm_i915_gem_object *obj = vma->obj;
108 /* Keep the obj (and hence the vma) alive as _we_ destroy it */
109 if (!kref_get_unless_zero(&obj->base.refcount))
112 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
113 WARN_ON(__i915_vma_unbind(vma));
116 i915_gem_object_put(obj);
118 GEM_BUG_ON(!list_empty(&vm->bound_list));
120 mutex_unlock(&vm->mutex);
123 /* lock the vm into the current ww, if we lock one, we lock all */
124 int i915_vm_lock_objects(struct i915_address_space *vm,
125 struct i915_gem_ww_ctx *ww)
127 if (vm->scratch[0]->base.resv == &vm->_resv) {
128 return i915_gem_object_lock(vm->scratch[0], ww);
130 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
132 /* We borrowed the scratch page from ggtt, take the top level object */
133 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
137 void i915_address_space_fini(struct i915_address_space *vm)
139 drm_mm_takedown(&vm->mm);
140 mutex_destroy(&vm->mutex);
144 * i915_vm_resv_release - Final struct i915_address_space destructor
145 * @kref: Pointer to the &i915_address_space.resv_ref member.
147 * This function is called when the last lock sharer no longer shares the
148 * &i915_address_space._resv lock.
150 void i915_vm_resv_release(struct kref *kref)
152 struct i915_address_space *vm =
153 container_of(kref, typeof(*vm), resv_ref);
155 dma_resv_fini(&vm->_resv);
159 static void __i915_vm_release(struct work_struct *work)
161 struct i915_address_space *vm =
162 container_of(work, struct i915_address_space, release_work);
165 i915_address_space_fini(vm);
167 i915_vm_resv_put(vm);
170 void i915_vm_release(struct kref *kref)
172 struct i915_address_space *vm =
173 container_of(kref, struct i915_address_space, ref);
175 GEM_BUG_ON(i915_is_ggtt(vm));
176 trace_i915_ppgtt_release(vm);
178 queue_work(vm->i915->wq, &vm->release_work);
181 void i915_address_space_init(struct i915_address_space *vm, int subclass)
186 * Special case for GGTT that has already done an early
189 if (!kref_read(&vm->resv_ref))
190 kref_init(&vm->resv_ref);
192 INIT_WORK(&vm->release_work, __i915_vm_release);
193 atomic_set(&vm->open, 1);
196 * The vm->mutex must be reclaim safe (for use in the shrinker).
197 * Do a dummy acquire now under fs_reclaim so that any allocation
198 * attempt holding the lock is immediately reported by lockdep.
200 mutex_init(&vm->mutex);
201 lockdep_set_subclass(&vm->mutex, subclass);
203 if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
204 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
207 * CHV + BXT VTD workaround use stop_machine(),
208 * which is allowed to allocate memory. This means &vm->mutex
209 * is the outer lock, and in theory we can allocate memory inside
210 * it through stop_machine().
212 * Add the annotation for this, we use trylock in shrinker.
214 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
215 might_alloc(GFP_KERNEL);
216 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
218 dma_resv_init(&vm->_resv);
220 GEM_BUG_ON(!vm->total);
221 drm_mm_init(&vm->mm, 0, vm->total);
222 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
224 INIT_LIST_HEAD(&vm->bound_list);
227 void clear_pages(struct i915_vma *vma)
229 GEM_BUG_ON(!vma->pages);
231 if (vma->pages != vma->obj->mm.pages) {
232 sg_free_table(vma->pages);
237 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
240 void *__px_vaddr(struct drm_i915_gem_object *p)
242 enum i915_map_type type;
244 GEM_BUG_ON(!i915_gem_object_has_pages(p));
245 return page_unpack_bits(p->mm.mapping, &type);
248 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
250 GEM_BUG_ON(!i915_gem_object_has_pages(p));
251 return sg_dma_address(p->mm.pages->sgl);
254 struct page *__px_page(struct drm_i915_gem_object *p)
256 GEM_BUG_ON(!i915_gem_object_has_pages(p));
257 return sg_page(p->mm.pages->sgl);
261 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
263 void *vaddr = __px_vaddr(p);
265 memset64(vaddr, val, count);
266 clflush_cache_range(vaddr, PAGE_SIZE);
269 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
271 void *vaddr = __px_vaddr(scratch);
275 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
278 memset(vaddr, val, scratch->base.size);
279 drm_clflush_virt_range(vaddr, scratch->base.size);
282 int setup_scratch_page(struct i915_address_space *vm)
287 * In order to utilize 64K pages for an object with a size < 2M, we will
288 * need to support a 64K scratch page, given that every 16th entry for a
289 * page-table operating in 64K mode must point to a properly aligned 64K
290 * region, including any PTEs which happen to point to scratch.
292 * This is only relevant for the 48b PPGTT where we support
293 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
294 * scratch (read-only) between all vm, we create one 64k scratch page
297 size = I915_GTT_PAGE_SIZE_4K;
298 if (i915_vm_is_4lvl(vm) &&
299 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
300 size = I915_GTT_PAGE_SIZE_64K;
303 struct drm_i915_gem_object *obj;
305 obj = vm->alloc_pt_dma(vm, size);
309 if (map_pt_dma(vm, obj))
312 /* We need a single contiguous page for our scratch */
313 if (obj->mm.page_sizes.sg < size)
316 /* And it needs to be correspondingly aligned */
317 if (__px_dma(obj) & (size - 1))
321 * Use a non-zero scratch page for debugging.
323 * We want a value that should be reasonably obvious
324 * to spot in the error state, while also causing a GPU hang
325 * if executed. We prefer using a clear page in production, so
326 * should it ever be accidentally used, the effect should be
329 poison_scratch_page(obj);
331 vm->scratch[0] = obj;
332 vm->scratch_order = get_order(size);
336 i915_gem_object_put(obj);
338 if (size == I915_GTT_PAGE_SIZE_4K)
341 size = I915_GTT_PAGE_SIZE_4K;
345 void free_scratch(struct i915_address_space *vm)
349 for (i = 0; i <= vm->top; i++)
350 i915_gem_object_put(vm->scratch[i]);
353 void gtt_write_workarounds(struct intel_gt *gt)
355 struct drm_i915_private *i915 = gt->i915;
356 struct intel_uncore *uncore = gt->uncore;
359 * This function is for gtt related workarounds. This function is
360 * called on driver load and after a GPU reset, so you can place
361 * workarounds here even if they get overwritten by GPU reset.
363 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
364 if (IS_BROADWELL(i915))
365 intel_uncore_write(uncore,
367 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
368 else if (IS_CHERRYVIEW(i915))
369 intel_uncore_write(uncore,
371 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
372 else if (IS_GEN9_LP(i915))
373 intel_uncore_write(uncore,
375 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
376 else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
377 intel_uncore_write(uncore,
379 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
382 * To support 64K PTEs we need to first enable the use of the
383 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
384 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
385 * shouldn't be needed after GEN10.
387 * 64K pages were first introduced from BDW+, although technically they
388 * only *work* from gen9+. For pre-BDW we instead have the option for
389 * 32K pages, but we don't currently have any support for it in our
392 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
393 GRAPHICS_VER(i915) <= 10)
394 intel_uncore_rmw(uncore,
395 GEN8_GAMW_ECO_DEV_RW_IA,
397 GAMW_ECO_ENABLE_64K_IPS_FIELD);
399 if (IS_GRAPHICS_VER(i915, 8, 11)) {
400 bool can_use_gtt_cache = true;
403 * According to the BSpec if we use 2M/1G pages then we also
404 * need to disable the GTT cache. At least on BDW we can see
405 * visual corruption when using 2M pages, and not disabling the
408 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
409 can_use_gtt_cache = false;
411 /* WaGttCachingOffByDefault */
412 intel_uncore_write(uncore,
414 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
415 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
416 intel_uncore_read(uncore,
417 HSW_GTT_CACHE_EN) == 0);
421 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
423 /* TGL doesn't support LLC or AGE settings */
424 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
425 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
426 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
427 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
428 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
429 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
430 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
431 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
434 static void icl_setup_private_ppat(struct intel_uncore *uncore)
436 intel_uncore_write(uncore,
438 GEN8_PPAT_WB | GEN8_PPAT_LLC);
439 intel_uncore_write(uncore,
441 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
442 intel_uncore_write(uncore,
444 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
445 intel_uncore_write(uncore,
448 intel_uncore_write(uncore,
450 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
451 intel_uncore_write(uncore,
453 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
454 intel_uncore_write(uncore,
456 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
457 intel_uncore_write(uncore,
459 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
463 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
464 * bits. When using advanced contexts each context stores its own PAT, but
465 * writing this data shouldn't be harmful even in those cases.
467 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
469 struct drm_i915_private *i915 = uncore->i915;
472 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
473 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
474 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
475 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
476 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
477 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
478 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
480 /* for scanout with eLLC */
481 if (GRAPHICS_VER(i915) >= 9)
482 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
484 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
486 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
487 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
490 static void chv_setup_private_ppat(struct intel_uncore *uncore)
495 * Map WB on BDW to snooped on CHV.
497 * Only the snoop bit has meaning for CHV, the rest is
500 * The hardware will never snoop for certain types of accesses:
501 * - CPU GTT (GMADR->GGTT->no snoop->memory)
502 * - PPGTT page tables
503 * - some other special cycles
505 * As with BDW, we also need to consider the following for GT accesses:
506 * "For GGTT, there is NO pat_sel[2:0] from the entry,
507 * so RTL will always use the value corresponding to
509 * Which means we must set the snoop bit in PAT entry 0
510 * in order to keep the global status page working.
513 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
517 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
518 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
519 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
520 GEN8_PPAT(7, CHV_PPAT_SNOOP);
522 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
523 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
526 void setup_private_pat(struct intel_uncore *uncore)
528 struct drm_i915_private *i915 = uncore->i915;
530 GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
532 if (GRAPHICS_VER(i915) >= 12)
533 tgl_setup_private_ppat(uncore);
534 else if (GRAPHICS_VER(i915) >= 11)
535 icl_setup_private_ppat(uncore);
536 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
537 chv_setup_private_ppat(uncore);
539 bdw_setup_private_ppat(uncore);
543 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
545 struct drm_i915_gem_object *obj;
546 struct i915_vma *vma;
548 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
550 return ERR_CAST(obj);
552 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
554 vma = i915_vma_instance(obj, vm, NULL);
556 i915_gem_object_put(obj);
564 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
566 struct i915_vma *vma;
569 vma = __vm_create_scratch_for_read(vm, size);
573 err = i915_vma_pin(vma, 0, 0,
574 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
583 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
584 #include "selftests/mock_gtt.c"