2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
29 static u64 swizzle_bit(unsigned int bit, u64 offset)
31 return (offset & BIT_ULL(bit)) >> (bit - 6);
34 static u64 tiled_offset(const struct tile *tile, u64 v)
38 if (tile->tiling == I915_TILING_NONE)
41 y = div64_u64_rem(v, tile->stride, &x);
42 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
44 if (tile->tiling == I915_TILING_X) {
46 v += div64_u64_rem(x, tile->width, &x) << tile->size;
48 } else if (tile->width == 128) {
49 const unsigned int ytile_span = 16;
50 const unsigned int ytile_height = 512;
53 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
56 const unsigned int ytile_span = 32;
57 const unsigned int ytile_height = 256;
60 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
64 switch (tile->swizzle) {
65 case I915_BIT_6_SWIZZLE_9:
66 v ^= swizzle_bit(9, v);
68 case I915_BIT_6_SWIZZLE_9_10:
69 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
71 case I915_BIT_6_SWIZZLE_9_11:
72 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
74 case I915_BIT_6_SWIZZLE_9_10_11:
75 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83 const struct tile *tile,
84 struct rnd_state *prng)
86 const unsigned long npages = obj->base.size / PAGE_SIZE;
87 struct i915_ggtt_view view;
97 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
99 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
100 tile->tiling, tile->stride, err);
104 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
105 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
107 i915_gem_object_lock(obj, NULL);
108 err = i915_gem_object_set_to_gtt_domain(obj, true);
109 i915_gem_object_unlock(obj);
111 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
115 page = i915_prandom_u32_max_state(npages, prng);
116 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
118 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
120 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
121 page, (int)PTR_ERR(vma));
125 n = page - view.partial.offset;
126 GEM_BUG_ON(n >= view.partial.size);
128 io = i915_vma_pin_iomap(vma);
131 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
132 page, (int)PTR_ERR(io));
137 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
138 i915_vma_unpin_iomap(vma);
140 offset = tiled_offset(tile, page << PAGE_SHIFT);
141 if (offset >= obj->base.size)
144 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
146 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
147 cpu = kmap(p) + offset_in_page(offset);
148 drm_clflush_virt_range(cpu, sizeof(*cpu));
149 if (*cpu != (u32)page) {
150 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
154 vma->size >> PAGE_SHIFT,
155 tile->tiling ? tile_row_pages(obj) : 0,
156 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
157 offset >> PAGE_SHIFT,
158 (unsigned int)offset_in_page(offset),
164 drm_clflush_virt_range(cpu, sizeof(*cpu));
172 static int check_partial_mappings(struct drm_i915_gem_object *obj,
173 const struct tile *tile,
174 unsigned long end_time)
176 const unsigned int nreal = obj->scratch / PAGE_SIZE;
177 const unsigned long npages = obj->base.size / PAGE_SIZE;
178 struct i915_vma *vma;
182 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
184 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
185 tile->tiling, tile->stride, err);
189 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
190 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
192 i915_gem_object_lock(obj, NULL);
193 err = i915_gem_object_set_to_gtt_domain(obj, true);
194 i915_gem_object_unlock(obj);
196 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
200 for_each_prime_number_from(page, 1, npages) {
201 struct i915_ggtt_view view =
202 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
209 GEM_BUG_ON(view.partial.size > nreal);
212 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
214 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
215 page, (int)PTR_ERR(vma));
219 n = page - view.partial.offset;
220 GEM_BUG_ON(n >= view.partial.size);
222 io = i915_vma_pin_iomap(vma);
225 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
226 page, (int)PTR_ERR(io));
230 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
231 i915_vma_unpin_iomap(vma);
233 offset = tiled_offset(tile, page << PAGE_SHIFT);
234 if (offset >= obj->base.size)
237 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
239 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
240 cpu = kmap(p) + offset_in_page(offset);
241 drm_clflush_virt_range(cpu, sizeof(*cpu));
242 if (*cpu != (u32)page) {
243 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
247 vma->size >> PAGE_SHIFT,
248 tile->tiling ? tile_row_pages(obj) : 0,
249 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
250 offset >> PAGE_SHIFT,
251 (unsigned int)offset_in_page(offset),
257 drm_clflush_virt_range(cpu, sizeof(*cpu));
264 if (igt_timeout(end_time,
265 "%s: timed out after tiling=%d stride=%d\n",
266 __func__, tile->tiling, tile->stride))
274 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
276 if (GRAPHICS_VER(i915) <= 2) {
280 } else if (tile->tiling == I915_TILING_Y &&
281 HAS_128_BYTE_Y_TILING(i915)) {
291 if (GRAPHICS_VER(i915) < 4)
292 return 8192 / tile->width;
293 else if (GRAPHICS_VER(i915) < 7)
294 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
296 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
299 static int igt_partial_tiling(void *arg)
301 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
302 struct drm_i915_private *i915 = arg;
303 struct drm_i915_gem_object *obj;
304 intel_wakeref_t wakeref;
308 if (!i915_ggtt_has_aperture(&i915->ggtt))
311 /* We want to check the page mapping and fencing of a large object
312 * mmapped through the GTT. The object we create is larger than can
313 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
314 * We then check that a write through each partial GGTT vma ends up
315 * in the right set of pages within the object, and with the expected
316 * tiling, which we verify by manual swizzling.
319 obj = huge_gem_object(i915,
321 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
325 err = i915_gem_object_pin_pages_unlocked(obj);
327 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
328 nreal, obj->base.size / PAGE_SIZE, err);
332 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
342 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
343 tile.tiling = I915_TILING_NONE;
345 err = check_partial_mappings(obj, &tile, end);
346 if (err && err != -EINTR)
350 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
352 unsigned int max_pitch;
356 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
358 * The swizzling pattern is actually unknown as it
359 * varies based on physical address of each page.
360 * See i915_gem_detect_bit_6_swizzle().
364 tile.tiling = tiling;
367 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
370 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
374 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
375 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
376 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
379 max_pitch = setup_tile_size(&tile, i915);
381 for (pitch = max_pitch; pitch; pitch >>= 1) {
382 tile.stride = tile.width * pitch;
383 err = check_partial_mappings(obj, &tile, end);
389 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
390 tile.stride = tile.width * (pitch - 1);
391 err = check_partial_mappings(obj, &tile, end);
398 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
399 tile.stride = tile.width * (pitch + 1);
400 err = check_partial_mappings(obj, &tile, end);
408 if (GRAPHICS_VER(i915) >= 4) {
409 for_each_prime_number(pitch, max_pitch) {
410 tile.stride = tile.width * pitch;
411 err = check_partial_mappings(obj, &tile, end);
423 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
424 i915_gem_object_unpin_pages(obj);
426 i915_gem_object_put(obj);
430 static int igt_smoke_tiling(void *arg)
432 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
433 struct drm_i915_private *i915 = arg;
434 struct drm_i915_gem_object *obj;
435 intel_wakeref_t wakeref;
436 I915_RND_STATE(prng);
441 if (!i915_ggtt_has_aperture(&i915->ggtt))
445 * igt_partial_tiling() does an exhastive check of partial tiling
446 * chunking, but will undoubtably run out of time. Here, we do a
447 * randomised search and hope over many runs of 1s with different
448 * seeds we will do a thorough check.
450 * Remember to look at the st_seed if we see a flip-flop in BAT!
453 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
456 obj = huge_gem_object(i915,
458 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
462 err = i915_gem_object_pin_pages_unlocked(obj);
464 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
465 nreal, obj->base.size / PAGE_SIZE, err);
469 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
476 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
477 switch (tile.tiling) {
478 case I915_TILING_NONE:
483 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
487 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
490 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
494 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
495 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
498 if (tile.tiling != I915_TILING_NONE) {
499 unsigned int max_pitch = setup_tile_size(&tile, i915);
502 i915_prandom_u32_max_state(max_pitch, &prng);
503 tile.stride = (1 + tile.stride) * tile.width;
504 if (GRAPHICS_VER(i915) < 4)
505 tile.stride = rounddown_pow_of_two(tile.stride);
508 err = check_partial_mapping(obj, &tile, &prng);
513 } while (!__igt_timeout(end, NULL));
515 pr_info("%s: Completed %lu trials\n", __func__, count);
517 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
518 i915_gem_object_unpin_pages(obj);
520 i915_gem_object_put(obj);
524 static int make_obj_busy(struct drm_i915_gem_object *obj)
526 struct drm_i915_private *i915 = to_i915(obj->base.dev);
527 struct intel_engine_cs *engine;
529 for_each_uabi_engine(engine, i915) {
530 struct i915_request *rq;
531 struct i915_vma *vma;
532 struct i915_gem_ww_ctx ww;
535 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
539 i915_gem_ww_ctx_init(&ww, false);
541 err = i915_gem_object_lock(obj, &ww);
543 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
547 rq = intel_engine_create_kernel_request(engine);
553 err = i915_request_await_object(rq, vma->obj, true);
555 err = i915_vma_move_to_active(vma, rq,
558 i915_request_add(rq);
562 if (err == -EDEADLK) {
563 err = i915_gem_ww_ctx_backoff(&ww);
567 i915_gem_ww_ctx_fini(&ww);
572 i915_gem_object_put(obj); /* leave it only alive via its active ref */
576 static bool assert_mmap_offset(struct drm_i915_private *i915,
580 struct drm_i915_gem_object *obj;
584 obj = i915_gem_object_create_internal(i915, size);
586 return expected && expected == PTR_ERR(obj);
588 ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
589 i915_gem_object_put(obj);
591 return ret == expected;
594 static void disable_retire_worker(struct drm_i915_private *i915)
596 i915_gem_driver_unregister__shrinker(i915);
597 intel_gt_pm_get(&i915->gt);
598 cancel_delayed_work_sync(&i915->gt.requests.retire_work);
601 static void restore_retire_worker(struct drm_i915_private *i915)
603 igt_flush_test(i915);
604 intel_gt_pm_put(&i915->gt);
605 i915_gem_driver_register__shrinker(i915);
608 static void mmap_offset_lock(struct drm_i915_private *i915)
609 __acquires(&i915->drm.vma_offset_manager->vm_lock)
611 write_lock(&i915->drm.vma_offset_manager->vm_lock);
614 static void mmap_offset_unlock(struct drm_i915_private *i915)
615 __releases(&i915->drm.vma_offset_manager->vm_lock)
617 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
620 static int igt_mmap_offset_exhaustion(void *arg)
622 struct drm_i915_private *i915 = arg;
623 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
624 struct drm_i915_gem_object *obj;
625 struct drm_mm_node *hole, *next;
629 /* Disable background reaper */
630 disable_retire_worker(i915);
631 GEM_BUG_ON(!i915->gt.awake);
632 intel_gt_retire_requests(&i915->gt);
633 i915_gem_drain_freed_objects(i915);
635 /* Trim the device mmap space to only a page */
636 mmap_offset_lock(i915);
637 loop = 1; /* PAGE_SIZE units */
638 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
639 struct drm_mm_node *resv;
641 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
647 resv->start = drm_mm_hole_node_start(hole) + loop;
648 resv->size = hole->hole_size - loop;
657 pr_debug("Reserving hole [%llx + %llx]\n",
658 resv->start, resv->size);
660 err = drm_mm_reserve_node(mm, resv);
662 pr_err("Failed to trim VMA manager, err=%d\n", err);
667 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
668 mmap_offset_unlock(i915);
671 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
672 pr_err("Unable to insert object into single page hole\n");
678 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
679 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
684 /* Fill the hole, further allocation attempts should then fail */
685 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
688 pr_err("Unable to create object for reclaimed hole\n");
692 err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
694 pr_err("Unable to insert object into reclaimed hole\n");
698 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
699 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
704 i915_gem_object_put(obj);
706 /* Now fill with busy dead objects that we expect to reap */
707 for (loop = 0; loop < 3; loop++) {
708 if (intel_gt_is_wedged(&i915->gt))
711 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
717 err = make_obj_busy(obj);
719 pr_err("[loop %d] Failed to busy the object\n", loop);
725 mmap_offset_lock(i915);
727 drm_mm_for_each_node_safe(hole, next, mm) {
728 if (hole->color != -1ul)
731 drm_mm_remove_node(hole);
734 mmap_offset_unlock(i915);
735 restore_retire_worker(i915);
738 i915_gem_object_put(obj);
742 static int gtt_set(struct drm_i915_gem_object *obj)
744 struct i915_vma *vma;
748 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
752 intel_gt_pm_get(vma->vm->gt);
753 map = i915_vma_pin_iomap(vma);
760 memset_io(map, POISON_INUSE, obj->base.size);
761 i915_vma_unpin_iomap(vma);
764 intel_gt_pm_put(vma->vm->gt);
768 static int gtt_check(struct drm_i915_gem_object *obj)
770 struct i915_vma *vma;
774 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
778 intel_gt_pm_get(vma->vm->gt);
779 map = i915_vma_pin_iomap(vma);
786 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
787 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
788 obj->mm.region->name);
791 i915_vma_unpin_iomap(vma);
794 intel_gt_pm_put(vma->vm->gt);
798 static int wc_set(struct drm_i915_gem_object *obj)
802 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
804 return PTR_ERR(vaddr);
806 memset(vaddr, POISON_INUSE, obj->base.size);
807 i915_gem_object_flush_map(obj);
808 i915_gem_object_unpin_map(obj);
813 static int wc_check(struct drm_i915_gem_object *obj)
818 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
820 return PTR_ERR(vaddr);
822 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
823 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
824 obj->mm.region->name);
827 i915_gem_object_unpin_map(obj);
832 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
836 if (type == I915_MMAP_TYPE_GTT &&
837 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
840 i915_gem_object_lock(obj, NULL);
841 no_map = (type != I915_MMAP_TYPE_GTT &&
842 !i915_gem_object_has_struct_page(obj) &&
843 !i915_gem_object_has_iomem(obj));
844 i915_gem_object_unlock(obj);
849 static void object_set_placements(struct drm_i915_gem_object *obj,
850 struct intel_memory_region **placements,
851 unsigned int n_placements)
853 GEM_BUG_ON(!n_placements);
855 if (n_placements == 1) {
856 struct drm_i915_private *i915 = to_i915(obj->base.dev);
857 struct intel_memory_region *mr = placements[0];
859 obj->mm.placements = &i915->mm.regions[mr->id];
860 obj->mm.n_placements = 1;
862 obj->mm.placements = placements;
863 obj->mm.n_placements = n_placements;
867 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
868 static int __igt_mmap(struct drm_i915_private *i915,
869 struct drm_i915_gem_object *obj,
870 enum i915_mmap_type type)
872 struct vm_area_struct *area;
877 if (!can_mmap(obj, type))
886 err = __assign_mmap_offset(obj, type, &offset, NULL);
890 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
891 if (IS_ERR_VALUE(addr))
894 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
896 area = vma_lookup(current->mm, addr);
898 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
899 obj->mm.region->name);
904 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
905 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
908 if (get_user(x, ux)) {
909 pr_err("%s: Unable to read from mmap, offset:%zd\n",
910 obj->mm.region->name, i * sizeof(x));
915 if (x != expand32(POISON_INUSE)) {
916 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
917 obj->mm.region->name,
918 i * sizeof(x), x, expand32(POISON_INUSE));
923 x = expand32(POISON_FREE);
924 if (put_user(x, ux)) {
925 pr_err("%s: Unable to write to mmap, offset:%zd\n",
926 obj->mm.region->name, i * sizeof(x));
932 if (type == I915_MMAP_TYPE_GTT)
933 intel_gt_flush_ggtt_writes(&i915->gt);
937 err = gtt_check(obj);
939 vm_munmap(addr, obj->base.size);
943 static int igt_mmap(void *arg)
945 struct drm_i915_private *i915 = arg;
946 struct intel_memory_region *mr;
947 enum intel_region_id id;
949 for_each_memory_region(mr, i915, id) {
950 unsigned long sizes[] = {
957 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
958 struct drm_i915_gem_object *obj;
961 obj = i915_gem_object_create_region(mr, sizes[i], 0, I915_BO_ALLOC_USER);
962 if (obj == ERR_PTR(-ENODEV))
968 object_set_placements(obj, &mr, 1);
970 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
972 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
974 i915_gem_object_put(obj);
983 static const char *repr_mmap_type(enum i915_mmap_type type)
986 case I915_MMAP_TYPE_GTT: return "gtt";
987 case I915_MMAP_TYPE_WB: return "wb";
988 case I915_MMAP_TYPE_WC: return "wc";
989 case I915_MMAP_TYPE_UC: return "uc";
990 default: return "unknown";
994 static bool can_access(struct drm_i915_gem_object *obj)
998 i915_gem_object_lock(obj, NULL);
999 access = i915_gem_object_has_struct_page(obj) ||
1000 i915_gem_object_has_iomem(obj);
1001 i915_gem_object_unlock(obj);
1006 static int __igt_mmap_access(struct drm_i915_private *i915,
1007 struct drm_i915_gem_object *obj,
1008 enum i915_mmap_type type)
1010 unsigned long __user *ptr;
1017 memset(&A, 0xAA, sizeof(A));
1018 memset(&B, 0xBB, sizeof(B));
1020 if (!can_mmap(obj, type) || !can_access(obj))
1023 err = __assign_mmap_offset(obj, type, &offset, NULL);
1027 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1028 if (IS_ERR_VALUE(addr))
1030 ptr = (unsigned long __user *)addr;
1032 err = __put_user(A, ptr);
1034 pr_err("%s(%s): failed to write into user mmap\n",
1035 obj->mm.region->name, repr_mmap_type(type));
1039 intel_gt_flush_ggtt_writes(&i915->gt);
1041 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1042 if (err != sizeof(x)) {
1043 pr_err("%s(%s): access_process_vm() read failed\n",
1044 obj->mm.region->name, repr_mmap_type(type));
1048 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1049 if (err != sizeof(B)) {
1050 pr_err("%s(%s): access_process_vm() write failed\n",
1051 obj->mm.region->name, repr_mmap_type(type));
1055 intel_gt_flush_ggtt_writes(&i915->gt);
1057 err = __get_user(y, ptr);
1059 pr_err("%s(%s): failed to read from user mmap\n",
1060 obj->mm.region->name, repr_mmap_type(type));
1064 if (x != A || y != B) {
1065 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1066 obj->mm.region->name, repr_mmap_type(type),
1073 vm_munmap(addr, obj->base.size);
1077 static int igt_mmap_access(void *arg)
1079 struct drm_i915_private *i915 = arg;
1080 struct intel_memory_region *mr;
1081 enum intel_region_id id;
1083 for_each_memory_region(mr, i915, id) {
1084 struct drm_i915_gem_object *obj;
1087 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1088 if (obj == ERR_PTR(-ENODEV))
1092 return PTR_ERR(obj);
1094 object_set_placements(obj, &mr, 1);
1096 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1098 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1100 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1102 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1104 i915_gem_object_put(obj);
1112 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1113 struct drm_i915_gem_object *obj,
1114 enum i915_mmap_type type)
1116 struct intel_engine_cs *engine;
1124 * Verify that the mmap access into the backing store aligns with
1125 * that of the GPU, i.e. that mmap is indeed writing into the same
1126 * page as being read by the GPU.
1129 if (!can_mmap(obj, type))
1138 err = __assign_mmap_offset(obj, type, &offset, NULL);
1142 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1143 if (IS_ERR_VALUE(addr))
1146 ux = u64_to_user_ptr((u64)addr);
1147 bbe = MI_BATCH_BUFFER_END;
1148 if (put_user(bbe, ux)) {
1149 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1154 if (type == I915_MMAP_TYPE_GTT)
1155 intel_gt_flush_ggtt_writes(&i915->gt);
1157 for_each_uabi_engine(engine, i915) {
1158 struct i915_request *rq;
1159 struct i915_vma *vma;
1160 struct i915_gem_ww_ctx ww;
1162 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1168 i915_gem_ww_ctx_init(&ww, false);
1170 err = i915_gem_object_lock(obj, &ww);
1172 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1176 rq = i915_request_create(engine->kernel_context);
1182 err = i915_request_await_object(rq, vma->obj, false);
1184 err = i915_vma_move_to_active(vma, rq, 0);
1186 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1187 i915_request_get(rq);
1188 i915_request_add(rq);
1190 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1191 struct drm_printer p =
1192 drm_info_printer(engine->i915->drm.dev);
1194 pr_err("%s(%s, %s): Failed to execute batch\n",
1195 __func__, engine->name, obj->mm.region->name);
1196 intel_engine_dump(engine, &p,
1197 "%s\n", engine->name);
1199 intel_gt_set_wedged(engine->gt);
1202 i915_request_put(rq);
1205 i915_vma_unpin(vma);
1207 if (err == -EDEADLK) {
1208 err = i915_gem_ww_ctx_backoff(&ww);
1212 i915_gem_ww_ctx_fini(&ww);
1218 vm_munmap(addr, obj->base.size);
1222 static int igt_mmap_gpu(void *arg)
1224 struct drm_i915_private *i915 = arg;
1225 struct intel_memory_region *mr;
1226 enum intel_region_id id;
1228 for_each_memory_region(mr, i915, id) {
1229 struct drm_i915_gem_object *obj;
1232 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1233 if (obj == ERR_PTR(-ENODEV))
1237 return PTR_ERR(obj);
1239 object_set_placements(obj, &mr, 1);
1241 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1243 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1245 i915_gem_object_put(obj);
1253 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1255 if (!pte_present(*pte) || pte_none(*pte)) {
1256 pr_err("missing PTE:%lx\n",
1257 (addr - (unsigned long)data) >> PAGE_SHIFT);
1264 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1266 if (pte_present(*pte) && !pte_none(*pte)) {
1267 pr_err("present PTE:%lx; expected to be revoked\n",
1268 (addr - (unsigned long)data) >> PAGE_SHIFT);
1275 static int check_present(unsigned long addr, unsigned long len)
1277 return apply_to_page_range(current->mm, addr, len,
1278 check_present_pte, (void *)addr);
1281 static int check_absent(unsigned long addr, unsigned long len)
1283 return apply_to_page_range(current->mm, addr, len,
1284 check_absent_pte, (void *)addr);
1287 static int prefault_range(u64 start, u64 len)
1289 const char __user *addr, *end;
1290 char __maybe_unused c;
1293 addr = u64_to_user_ptr(start);
1296 for (; addr < end; addr += PAGE_SIZE) {
1297 err = __get_user(c, addr);
1302 return __get_user(c, end - 1);
1305 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1306 struct drm_i915_gem_object *obj,
1307 enum i915_mmap_type type)
1313 if (!can_mmap(obj, type))
1316 err = __assign_mmap_offset(obj, type, &offset, NULL);
1320 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1321 if (IS_ERR_VALUE(addr))
1324 err = prefault_range(addr, obj->base.size);
1328 err = check_present(addr, obj->base.size);
1330 pr_err("%s: was not present\n", obj->mm.region->name);
1335 * After unbinding the object from the GGTT, its address may be reused
1336 * for other objects. Ergo we have to revoke the previous mmap PTE
1337 * access as it no longer points to the same object.
1339 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1341 pr_err("Failed to unbind object!\n");
1345 if (type != I915_MMAP_TYPE_GTT) {
1346 i915_gem_object_lock(obj, NULL);
1347 __i915_gem_object_put_pages(obj);
1348 i915_gem_object_unlock(obj);
1349 if (i915_gem_object_has_pages(obj)) {
1350 pr_err("Failed to put-pages object!\n");
1356 if (!obj->ops->mmap_ops) {
1357 err = check_absent(addr, obj->base.size);
1359 pr_err("%s: was not absent\n", obj->mm.region->name);
1363 /* ttm allows access to evicted regions by design */
1365 err = check_present(addr, obj->base.size);
1367 pr_err("%s: was not present\n", obj->mm.region->name);
1373 vm_munmap(addr, obj->base.size);
1377 static int igt_mmap_revoke(void *arg)
1379 struct drm_i915_private *i915 = arg;
1380 struct intel_memory_region *mr;
1381 enum intel_region_id id;
1383 for_each_memory_region(mr, i915, id) {
1384 struct drm_i915_gem_object *obj;
1387 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1388 if (obj == ERR_PTR(-ENODEV))
1392 return PTR_ERR(obj);
1394 object_set_placements(obj, &mr, 1);
1396 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1398 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1400 i915_gem_object_put(obj);
1408 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1410 static const struct i915_subtest tests[] = {
1411 SUBTEST(igt_partial_tiling),
1412 SUBTEST(igt_smoke_tiling),
1413 SUBTEST(igt_mmap_offset_exhaustion),
1415 SUBTEST(igt_mmap_access),
1416 SUBTEST(igt_mmap_revoke),
1417 SUBTEST(igt_mmap_gpu),
1420 return i915_subtests(tests, i915);