2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
32 #include "i915_random.h"
33 #include "i915_selftest.h"
36 #include "mock_gem_device.h"
37 #include "igt_flush_test.h"
39 static void cleanup_freed_objects(struct drm_i915_private *i915)
41 i915_gem_drain_freed_objects(i915);
44 static void fake_free_pages(struct drm_i915_gem_object *obj,
45 struct sg_table *pages)
51 static int fake_get_pages(struct drm_i915_gem_object *obj)
53 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
54 #define PFN_BIAS 0x1000
55 struct sg_table *pages;
56 struct scatterlist *sg;
57 unsigned int sg_page_sizes;
58 typeof(obj->base.size) rem;
60 pages = kmalloc(sizeof(*pages), GFP);
64 rem = round_up(obj->base.size, BIT(31)) >> 31;
65 if (sg_alloc_table(pages, rem, GFP)) {
72 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
73 unsigned long len = min_t(typeof(rem), rem, BIT(31));
76 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
77 sg_dma_address(sg) = page_to_phys(sg_page(sg));
85 obj->mm.madv = I915_MADV_DONTNEED;
87 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94 struct sg_table *pages)
96 fake_free_pages(obj, pages);
97 obj->mm.dirty = false;
98 obj->mm.madv = I915_MADV_WILLNEED;
101 static const struct drm_i915_gem_object_ops fake_ops = {
102 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103 .get_pages = fake_get_pages,
104 .put_pages = fake_put_pages,
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
110 struct drm_i915_gem_object *obj;
113 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
115 if (overflows_type(size, obj->base.size))
116 return ERR_PTR(-E2BIG);
118 obj = i915_gem_object_alloc();
122 drm_gem_private_object_init(&i915->drm, &obj->base, size);
123 i915_gem_object_init(obj, &fake_ops);
125 obj->write_domain = I915_GEM_DOMAIN_CPU;
126 obj->read_domains = I915_GEM_DOMAIN_CPU;
127 obj->cache_level = I915_CACHE_NONE;
129 /* Preallocate the "backing storage" */
130 if (i915_gem_object_pin_pages(obj))
133 i915_gem_object_unpin_pages(obj);
137 i915_gem_object_put(obj);
139 return ERR_PTR(-ENOMEM);
142 static int igt_ppgtt_alloc(void *arg)
144 struct drm_i915_private *dev_priv = arg;
145 struct i915_ppgtt *ppgtt;
146 u64 size, last, limit;
149 /* Allocate a ppggt and try to fill the entire range */
151 if (!HAS_PPGTT(dev_priv))
154 ppgtt = __ppgtt_create(dev_priv);
156 return PTR_ERR(ppgtt);
158 if (!ppgtt->vm.allocate_va_range)
159 goto err_ppgtt_cleanup;
162 * While we only allocate the page tables here and so we could
163 * address a much larger GTT than we could actually fit into
164 * RAM, a practical limit is the amount of physical pages in the system.
165 * This should ensure that we do not run into the oomkiller during
166 * the test and take down the machine wilfully.
168 limit = totalram_pages() << PAGE_SHIFT;
169 limit = min(ppgtt->vm.total, limit);
171 /* Check we can allocate the entire range */
172 for (size = 4096; size <= limit; size <<= 2) {
173 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
175 if (err == -ENOMEM) {
176 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
178 err = 0; /* virtual space too large! */
180 goto err_ppgtt_cleanup;
185 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
188 /* Check we can incrementally allocate the entire range */
189 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
190 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
193 if (err == -ENOMEM) {
194 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
195 last, size - last, ilog2(size));
196 err = 0; /* virtual space too large! */
198 goto err_ppgtt_cleanup;
205 i915_vm_put(&ppgtt->vm);
209 static int lowlevel_hole(struct drm_i915_private *i915,
210 struct i915_address_space *vm,
211 u64 hole_start, u64 hole_end,
212 unsigned long end_time)
214 I915_RND_STATE(seed_prng);
216 struct i915_vma mock_vma;
218 memset(&mock_vma, 0, sizeof(struct i915_vma));
220 /* Keep creating larger objects until one cannot fit into the hole */
221 for (size = 12; (hole_end - hole_start) >> size; size++) {
222 I915_RND_SUBSTATE(prng, seed_prng);
223 struct drm_i915_gem_object *obj;
224 unsigned int *order, count, n;
227 hole_size = (hole_end - hole_start) >> size;
228 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
229 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
230 count = hole_size >> 1;
232 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
233 __func__, hole_start, hole_end, size, hole_size);
238 order = i915_random_order(count, &prng);
241 } while (count >>= 1);
246 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
247 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
249 /* Ignore allocation failures (i.e. don't report them as
250 * a test failure) as we are purposefully allocating very
251 * large objects without checking that we have sufficient
252 * memory. We expect to hit -ENOMEM.
255 obj = fake_dma_object(i915, BIT_ULL(size));
261 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
263 if (i915_gem_object_pin_pages(obj)) {
264 i915_gem_object_put(obj);
269 for (n = 0; n < count; n++) {
270 u64 addr = hole_start + order[n] * BIT_ULL(size);
271 intel_wakeref_t wakeref;
273 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
275 if (igt_timeout(end_time,
276 "%s timed out before %d/%d\n",
277 __func__, n, count)) {
278 hole_end = hole_start; /* quit */
282 if (vm->allocate_va_range &&
283 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
286 mock_vma.pages = obj->mm.pages;
287 mock_vma.node.size = BIT_ULL(size);
288 mock_vma.node.start = addr;
290 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
291 vm->insert_entries(vm, &mock_vma,
296 i915_random_reorder(order, count, &prng);
297 for (n = 0; n < count; n++) {
298 u64 addr = hole_start + order[n] * BIT_ULL(size);
299 intel_wakeref_t wakeref;
301 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
302 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
303 vm->clear_range(vm, addr, BIT_ULL(size));
306 i915_gem_object_unpin_pages(obj);
307 i915_gem_object_put(obj);
311 cleanup_freed_objects(i915);
317 static void close_object_list(struct list_head *objects,
318 struct i915_address_space *vm)
320 struct drm_i915_gem_object *obj, *on;
323 list_for_each_entry_safe(obj, on, objects, st_link) {
324 struct i915_vma *vma;
326 vma = i915_vma_instance(obj, vm, NULL);
328 ignored = i915_vma_unbind(vma);
329 /* Only ppgtt vma may be closed before the object is freed */
330 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
333 list_del(&obj->st_link);
334 i915_gem_object_put(obj);
338 static int fill_hole(struct drm_i915_private *i915,
339 struct i915_address_space *vm,
340 u64 hole_start, u64 hole_end,
341 unsigned long end_time)
343 const u64 hole_size = hole_end - hole_start;
344 struct drm_i915_gem_object *obj;
345 const unsigned long max_pages =
346 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
347 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
348 unsigned long npages, prime, flags;
349 struct i915_vma *vma;
353 /* Try binding many VMA working inwards from either edge */
355 flags = PIN_OFFSET_FIXED | PIN_USER;
356 if (i915_is_ggtt(vm))
359 for_each_prime_number_from(prime, 2, max_step) {
360 for (npages = 1; npages <= max_pages; npages *= prime) {
361 const u64 full_size = npages << PAGE_SHIFT;
367 { "top-down", hole_end, -1, },
368 { "bottom-up", hole_start, 1, },
372 obj = fake_dma_object(i915, full_size);
376 list_add(&obj->st_link, &objects);
378 /* Align differing sized objects against the edges, and
379 * check we don't walk off into the void when binding
382 for (p = phases; p->name; p++) {
386 list_for_each_entry(obj, &objects, st_link) {
387 vma = i915_vma_instance(obj, vm, NULL);
392 if (offset < hole_start + obj->base.size)
394 offset -= obj->base.size;
397 err = i915_vma_pin(vma, 0, 0, offset | flags);
399 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
400 __func__, p->name, err, npages, prime, offset);
404 if (!drm_mm_node_allocated(&vma->node) ||
405 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
406 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
407 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
416 if (offset + obj->base.size > hole_end)
418 offset += obj->base.size;
423 list_for_each_entry(obj, &objects, st_link) {
424 vma = i915_vma_instance(obj, vm, NULL);
429 if (offset < hole_start + obj->base.size)
431 offset -= obj->base.size;
434 if (!drm_mm_node_allocated(&vma->node) ||
435 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
436 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
437 __func__, p->name, vma->node.start, vma->node.size,
443 err = i915_vma_unbind(vma);
445 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
446 __func__, p->name, vma->node.start, vma->node.size,
452 if (offset + obj->base.size > hole_end)
454 offset += obj->base.size;
459 list_for_each_entry_reverse(obj, &objects, st_link) {
460 vma = i915_vma_instance(obj, vm, NULL);
465 if (offset < hole_start + obj->base.size)
467 offset -= obj->base.size;
470 err = i915_vma_pin(vma, 0, 0, offset | flags);
472 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
473 __func__, p->name, err, npages, prime, offset);
477 if (!drm_mm_node_allocated(&vma->node) ||
478 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
479 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
480 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
489 if (offset + obj->base.size > hole_end)
491 offset += obj->base.size;
496 list_for_each_entry_reverse(obj, &objects, st_link) {
497 vma = i915_vma_instance(obj, vm, NULL);
502 if (offset < hole_start + obj->base.size)
504 offset -= obj->base.size;
507 if (!drm_mm_node_allocated(&vma->node) ||
508 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
509 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
510 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
516 err = i915_vma_unbind(vma);
518 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
519 __func__, p->name, vma->node.start, vma->node.size,
525 if (offset + obj->base.size > hole_end)
527 offset += obj->base.size;
532 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
533 __func__, npages, prime)) {
539 close_object_list(&objects, vm);
540 cleanup_freed_objects(i915);
546 close_object_list(&objects, vm);
550 static int walk_hole(struct drm_i915_private *i915,
551 struct i915_address_space *vm,
552 u64 hole_start, u64 hole_end,
553 unsigned long end_time)
555 const u64 hole_size = hole_end - hole_start;
556 const unsigned long max_pages =
557 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
561 /* Try binding a single VMA in different positions within the hole */
563 flags = PIN_OFFSET_FIXED | PIN_USER;
564 if (i915_is_ggtt(vm))
567 for_each_prime_number_from(size, 1, max_pages) {
568 struct drm_i915_gem_object *obj;
569 struct i915_vma *vma;
573 obj = fake_dma_object(i915, size << PAGE_SHIFT);
577 vma = i915_vma_instance(obj, vm, NULL);
583 for (addr = hole_start;
584 addr + obj->base.size < hole_end;
585 addr += obj->base.size) {
586 err = i915_vma_pin(vma, 0, 0, addr | flags);
588 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
589 __func__, addr, vma->size,
590 hole_start, hole_end, err);
595 if (!drm_mm_node_allocated(&vma->node) ||
596 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
597 pr_err("%s incorrect at %llx + %llx\n",
598 __func__, addr, vma->size);
603 err = i915_vma_unbind(vma);
605 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
606 __func__, addr, vma->size, err);
610 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
612 if (igt_timeout(end_time,
613 "%s timed out at %llx\n",
621 if (!i915_vma_is_ggtt(vma))
624 i915_gem_object_put(obj);
628 cleanup_freed_objects(i915);
634 static int pot_hole(struct drm_i915_private *i915,
635 struct i915_address_space *vm,
636 u64 hole_start, u64 hole_end,
637 unsigned long end_time)
639 struct drm_i915_gem_object *obj;
640 struct i915_vma *vma;
645 flags = PIN_OFFSET_FIXED | PIN_USER;
646 if (i915_is_ggtt(vm))
649 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
653 vma = i915_vma_instance(obj, vm, NULL);
659 /* Insert a pair of pages across every pot boundary within the hole */
660 for (pot = fls64(hole_end - 1) - 1;
661 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
663 u64 step = BIT_ULL(pot);
666 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
667 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
669 err = i915_vma_pin(vma, 0, 0, addr | flags);
671 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
674 hole_start, hole_end,
679 if (!drm_mm_node_allocated(&vma->node) ||
680 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
681 pr_err("%s incorrect at %llx + %llx\n",
682 __func__, addr, vma->size);
684 err = i915_vma_unbind(vma);
690 err = i915_vma_unbind(vma);
694 if (igt_timeout(end_time,
695 "%s timed out after %d/%d\n",
696 __func__, pot, fls64(hole_end - 1) - 1)) {
703 if (!i915_vma_is_ggtt(vma))
706 i915_gem_object_put(obj);
710 static int drunk_hole(struct drm_i915_private *i915,
711 struct i915_address_space *vm,
712 u64 hole_start, u64 hole_end,
713 unsigned long end_time)
715 I915_RND_STATE(prng);
719 flags = PIN_OFFSET_FIXED | PIN_USER;
720 if (i915_is_ggtt(vm))
723 /* Keep creating larger objects until one cannot fit into the hole */
724 for (size = 12; (hole_end - hole_start) >> size; size++) {
725 struct drm_i915_gem_object *obj;
726 unsigned int *order, count, n;
727 struct i915_vma *vma;
731 hole_size = (hole_end - hole_start) >> size;
732 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
733 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
734 count = hole_size >> 1;
736 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
737 __func__, hole_start, hole_end, size, hole_size);
742 order = i915_random_order(count, &prng);
745 } while (count >>= 1);
750 /* Ignore allocation failures (i.e. don't report them as
751 * a test failure) as we are purposefully allocating very
752 * large objects without checking that we have sufficient
753 * memory. We expect to hit -ENOMEM.
756 obj = fake_dma_object(i915, BIT_ULL(size));
762 vma = i915_vma_instance(obj, vm, NULL);
768 GEM_BUG_ON(vma->size != BIT_ULL(size));
770 for (n = 0; n < count; n++) {
771 u64 addr = hole_start + order[n] * BIT_ULL(size);
773 err = i915_vma_pin(vma, 0, 0, addr | flags);
775 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
778 hole_start, hole_end,
783 if (!drm_mm_node_allocated(&vma->node) ||
784 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
785 pr_err("%s incorrect at %llx + %llx\n",
786 __func__, addr, BIT_ULL(size));
788 err = i915_vma_unbind(vma);
794 err = i915_vma_unbind(vma);
797 if (igt_timeout(end_time,
798 "%s timed out after %d/%d\n",
799 __func__, n, count)) {
806 if (!i915_vma_is_ggtt(vma))
809 i915_gem_object_put(obj);
814 cleanup_freed_objects(i915);
820 static int __shrink_hole(struct drm_i915_private *i915,
821 struct i915_address_space *vm,
822 u64 hole_start, u64 hole_end,
823 unsigned long end_time)
825 struct drm_i915_gem_object *obj;
826 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
827 unsigned int order = 12;
832 /* Keep creating larger objects until one cannot fit into the hole */
833 for (addr = hole_start; addr < hole_end; ) {
834 struct i915_vma *vma;
835 u64 size = BIT_ULL(order++);
837 size = min(size, hole_end - addr);
838 obj = fake_dma_object(i915, size);
844 list_add(&obj->st_link, &objects);
846 vma = i915_vma_instance(obj, vm, NULL);
852 GEM_BUG_ON(vma->size != size);
854 err = i915_vma_pin(vma, 0, 0, addr | flags);
856 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
857 __func__, addr, size, hole_start, hole_end, err);
861 if (!drm_mm_node_allocated(&vma->node) ||
862 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
863 pr_err("%s incorrect at %llx + %llx\n",
864 __func__, addr, size);
866 err = i915_vma_unbind(vma);
875 * Since we are injecting allocation faults at random intervals,
876 * wait for this allocation to complete before we change the
879 err = i915_vma_sync(vma);
883 if (igt_timeout(end_time,
884 "%s timed out at ofset %llx [%llx - %llx]\n",
885 __func__, addr, hole_start, hole_end)) {
891 close_object_list(&objects, vm);
892 cleanup_freed_objects(i915);
896 static int shrink_hole(struct drm_i915_private *i915,
897 struct i915_address_space *vm,
898 u64 hole_start, u64 hole_end,
899 unsigned long end_time)
904 vm->fault_attr.probability = 999;
905 atomic_set(&vm->fault_attr.times, -1);
907 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
908 vm->fault_attr.interval = prime;
909 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
914 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
919 static int shrink_boom(struct drm_i915_private *i915,
920 struct i915_address_space *vm,
921 u64 hole_start, u64 hole_end,
922 unsigned long end_time)
924 unsigned int sizes[] = { SZ_2M, SZ_1G };
925 struct drm_i915_gem_object *purge;
926 struct drm_i915_gem_object *explode;
931 * Catch the case which shrink_hole seems to miss. The setup here
932 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
933 * ensuring that all vma assiocated with the respective pd/pdp are
934 * unpinned at the time.
937 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
938 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
939 unsigned int size = sizes[i];
940 struct i915_vma *vma;
942 purge = fake_dma_object(i915, size);
944 return PTR_ERR(purge);
946 vma = i915_vma_instance(purge, vm, NULL);
952 err = i915_vma_pin(vma, 0, 0, flags);
956 /* Should now be ripe for purging */
959 explode = fake_dma_object(i915, size);
960 if (IS_ERR(explode)) {
961 err = PTR_ERR(explode);
965 vm->fault_attr.probability = 100;
966 vm->fault_attr.interval = 1;
967 atomic_set(&vm->fault_attr.times, -1);
969 vma = i915_vma_instance(explode, vm, NULL);
975 err = i915_vma_pin(vma, 0, 0, flags | size);
981 i915_gem_object_put(purge);
982 i915_gem_object_put(explode);
984 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
985 cleanup_freed_objects(i915);
991 i915_gem_object_put(explode);
993 i915_gem_object_put(purge);
994 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
998 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
999 int (*func)(struct drm_i915_private *i915,
1000 struct i915_address_space *vm,
1001 u64 hole_start, u64 hole_end,
1002 unsigned long end_time))
1004 struct drm_file *file;
1005 struct i915_ppgtt *ppgtt;
1006 IGT_TIMEOUT(end_time);
1009 if (!HAS_FULL_PPGTT(dev_priv))
1012 file = mock_file(dev_priv);
1014 return PTR_ERR(file);
1016 ppgtt = i915_ppgtt_create(dev_priv);
1017 if (IS_ERR(ppgtt)) {
1018 err = PTR_ERR(ppgtt);
1021 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1022 GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1024 err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1026 i915_vm_put(&ppgtt->vm);
1029 mock_file_free(dev_priv, file);
1033 static int igt_ppgtt_fill(void *arg)
1035 return exercise_ppgtt(arg, fill_hole);
1038 static int igt_ppgtt_walk(void *arg)
1040 return exercise_ppgtt(arg, walk_hole);
1043 static int igt_ppgtt_pot(void *arg)
1045 return exercise_ppgtt(arg, pot_hole);
1048 static int igt_ppgtt_drunk(void *arg)
1050 return exercise_ppgtt(arg, drunk_hole);
1053 static int igt_ppgtt_lowlevel(void *arg)
1055 return exercise_ppgtt(arg, lowlevel_hole);
1058 static int igt_ppgtt_shrink(void *arg)
1060 return exercise_ppgtt(arg, shrink_hole);
1063 static int igt_ppgtt_shrink_boom(void *arg)
1065 return exercise_ppgtt(arg, shrink_boom);
1068 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1070 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1071 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1073 if (a->start < b->start)
1079 static int exercise_ggtt(struct drm_i915_private *i915,
1080 int (*func)(struct drm_i915_private *i915,
1081 struct i915_address_space *vm,
1082 u64 hole_start, u64 hole_end,
1083 unsigned long end_time))
1085 struct i915_ggtt *ggtt = &i915->ggtt;
1086 u64 hole_start, hole_end, last = 0;
1087 struct drm_mm_node *node;
1088 IGT_TIMEOUT(end_time);
1092 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1093 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1094 if (hole_start < last)
1097 if (ggtt->vm.mm.color_adjust)
1098 ggtt->vm.mm.color_adjust(node, 0,
1099 &hole_start, &hole_end);
1100 if (hole_start >= hole_end)
1103 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1107 /* As we have manipulated the drm_mm, the list may be corrupt */
1115 static int igt_ggtt_fill(void *arg)
1117 return exercise_ggtt(arg, fill_hole);
1120 static int igt_ggtt_walk(void *arg)
1122 return exercise_ggtt(arg, walk_hole);
1125 static int igt_ggtt_pot(void *arg)
1127 return exercise_ggtt(arg, pot_hole);
1130 static int igt_ggtt_drunk(void *arg)
1132 return exercise_ggtt(arg, drunk_hole);
1135 static int igt_ggtt_lowlevel(void *arg)
1137 return exercise_ggtt(arg, lowlevel_hole);
1140 static int igt_ggtt_page(void *arg)
1142 const unsigned int count = PAGE_SIZE/sizeof(u32);
1143 I915_RND_STATE(prng);
1144 struct drm_i915_private *i915 = arg;
1145 struct i915_ggtt *ggtt = &i915->ggtt;
1146 struct drm_i915_gem_object *obj;
1147 intel_wakeref_t wakeref;
1148 struct drm_mm_node tmp;
1149 unsigned int *order, n;
1152 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1154 return PTR_ERR(obj);
1156 err = i915_gem_object_pin_pages(obj);
1160 memset(&tmp, 0, sizeof(tmp));
1161 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1162 count * PAGE_SIZE, 0,
1163 I915_COLOR_UNEVICTABLE,
1164 0, ggtt->mappable_end,
1169 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1171 for (n = 0; n < count; n++) {
1172 u64 offset = tmp.start + n * PAGE_SIZE;
1174 ggtt->vm.insert_page(&ggtt->vm,
1175 i915_gem_object_get_dma_address(obj, 0),
1176 offset, I915_CACHE_NONE, 0);
1179 order = i915_random_order(count, &prng);
1185 for (n = 0; n < count; n++) {
1186 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1189 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1190 iowrite32(n, vaddr + n);
1191 io_mapping_unmap_atomic(vaddr);
1193 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1195 i915_random_reorder(order, count, &prng);
1196 for (n = 0; n < count; n++) {
1197 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1201 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1202 val = ioread32(vaddr + n);
1203 io_mapping_unmap_atomic(vaddr);
1206 pr_err("insert page failed: found %d, expected %d\n",
1215 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1216 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1217 drm_mm_remove_node(&tmp);
1219 i915_gem_object_unpin_pages(obj);
1221 i915_gem_object_put(obj);
1225 static void track_vma_bind(struct i915_vma *vma)
1227 struct drm_i915_gem_object *obj = vma->obj;
1229 atomic_inc(&obj->bind_count); /* track for eviction later */
1230 __i915_gem_object_pin_pages(obj);
1232 GEM_BUG_ON(vma->pages);
1233 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1234 __i915_gem_object_pin_pages(obj);
1235 vma->pages = obj->mm.pages;
1237 mutex_lock(&vma->vm->mutex);
1238 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1239 mutex_unlock(&vma->vm->mutex);
1242 static int exercise_mock(struct drm_i915_private *i915,
1243 int (*func)(struct drm_i915_private *i915,
1244 struct i915_address_space *vm,
1245 u64 hole_start, u64 hole_end,
1246 unsigned long end_time))
1248 const u64 limit = totalram_pages() << PAGE_SHIFT;
1249 struct i915_address_space *vm;
1250 struct i915_gem_context *ctx;
1251 IGT_TIMEOUT(end_time);
1254 ctx = mock_context(i915, "mock");
1258 vm = i915_gem_context_get_vm_rcu(ctx);
1259 err = func(i915, vm, 0, min(vm->total, limit), end_time);
1262 mock_context_close(ctx);
1266 static int igt_mock_fill(void *arg)
1268 struct i915_ggtt *ggtt = arg;
1270 return exercise_mock(ggtt->vm.i915, fill_hole);
1273 static int igt_mock_walk(void *arg)
1275 struct i915_ggtt *ggtt = arg;
1277 return exercise_mock(ggtt->vm.i915, walk_hole);
1280 static int igt_mock_pot(void *arg)
1282 struct i915_ggtt *ggtt = arg;
1284 return exercise_mock(ggtt->vm.i915, pot_hole);
1287 static int igt_mock_drunk(void *arg)
1289 struct i915_ggtt *ggtt = arg;
1291 return exercise_mock(ggtt->vm.i915, drunk_hole);
1294 static int igt_gtt_reserve(void *arg)
1296 struct i915_ggtt *ggtt = arg;
1297 struct drm_i915_gem_object *obj, *on;
1298 I915_RND_STATE(prng);
1303 /* i915_gem_gtt_reserve() tries to reserve the precise range
1304 * for the node, and evicts if it has to. So our test checks that
1305 * it can give us the requsted space and prevent overlaps.
1308 /* Start by filling the GGTT */
1310 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1311 total += 2 * I915_GTT_PAGE_SIZE) {
1312 struct i915_vma *vma;
1314 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1321 err = i915_gem_object_pin_pages(obj);
1323 i915_gem_object_put(obj);
1327 list_add(&obj->st_link, &objects);
1329 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1335 mutex_lock(&ggtt->vm.mutex);
1336 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1341 mutex_unlock(&ggtt->vm.mutex);
1343 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1344 total, ggtt->vm.total, err);
1347 track_vma_bind(vma);
1349 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1350 if (vma->node.start != total ||
1351 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1352 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1353 vma->node.start, vma->node.size,
1354 total, 2*I915_GTT_PAGE_SIZE);
1360 /* Now we start forcing evictions */
1361 for (total = I915_GTT_PAGE_SIZE;
1362 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1363 total += 2 * I915_GTT_PAGE_SIZE) {
1364 struct i915_vma *vma;
1366 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1373 err = i915_gem_object_pin_pages(obj);
1375 i915_gem_object_put(obj);
1379 list_add(&obj->st_link, &objects);
1381 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1387 mutex_lock(&ggtt->vm.mutex);
1388 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1393 mutex_unlock(&ggtt->vm.mutex);
1395 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1396 total, ggtt->vm.total, err);
1399 track_vma_bind(vma);
1401 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1402 if (vma->node.start != total ||
1403 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1404 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1405 vma->node.start, vma->node.size,
1406 total, 2*I915_GTT_PAGE_SIZE);
1412 /* And then try at random */
1413 list_for_each_entry_safe(obj, on, &objects, st_link) {
1414 struct i915_vma *vma;
1417 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1423 err = i915_vma_unbind(vma);
1425 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1429 offset = igt_random_offset(&prng,
1431 2 * I915_GTT_PAGE_SIZE,
1432 I915_GTT_MIN_ALIGNMENT);
1434 mutex_lock(&ggtt->vm.mutex);
1435 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1440 mutex_unlock(&ggtt->vm.mutex);
1442 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1443 total, ggtt->vm.total, err);
1446 track_vma_bind(vma);
1448 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1449 if (vma->node.start != offset ||
1450 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1451 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1452 vma->node.start, vma->node.size,
1453 offset, 2*I915_GTT_PAGE_SIZE);
1460 list_for_each_entry_safe(obj, on, &objects, st_link) {
1461 i915_gem_object_unpin_pages(obj);
1462 i915_gem_object_put(obj);
1467 static int igt_gtt_insert(void *arg)
1469 struct i915_ggtt *ggtt = arg;
1470 struct drm_i915_gem_object *obj, *on;
1471 struct drm_mm_node tmp = {};
1472 const struct invalid_insert {
1476 } invalid_insert[] = {
1478 ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1482 2*I915_GTT_PAGE_SIZE, 0,
1483 0, I915_GTT_PAGE_SIZE,
1486 -(u64)I915_GTT_PAGE_SIZE, 0,
1487 0, 4*I915_GTT_PAGE_SIZE,
1490 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1491 0, 4*I915_GTT_PAGE_SIZE,
1494 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1495 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1503 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1504 * to the node, evicting if required.
1507 /* Check a couple of obviously invalid requests */
1508 for (ii = invalid_insert; ii->size; ii++) {
1509 mutex_lock(&ggtt->vm.mutex);
1510 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1511 ii->size, ii->alignment,
1512 I915_COLOR_UNEVICTABLE,
1515 mutex_unlock(&ggtt->vm.mutex);
1516 if (err != -ENOSPC) {
1517 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1518 ii->size, ii->alignment, ii->start, ii->end,
1524 /* Start by filling the GGTT */
1526 total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1527 total += I915_GTT_PAGE_SIZE) {
1528 struct i915_vma *vma;
1530 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1531 I915_GTT_PAGE_SIZE);
1537 err = i915_gem_object_pin_pages(obj);
1539 i915_gem_object_put(obj);
1543 list_add(&obj->st_link, &objects);
1545 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1551 mutex_lock(&ggtt->vm.mutex);
1552 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1553 obj->base.size, 0, obj->cache_level,
1556 mutex_unlock(&ggtt->vm.mutex);
1557 if (err == -ENOSPC) {
1558 /* maxed out the GGTT space */
1559 i915_gem_object_put(obj);
1563 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1564 total, ggtt->vm.total, err);
1567 track_vma_bind(vma);
1568 __i915_vma_pin(vma);
1570 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1573 list_for_each_entry(obj, &objects, st_link) {
1574 struct i915_vma *vma;
1576 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1582 if (!drm_mm_node_allocated(&vma->node)) {
1583 pr_err("VMA was unexpectedly evicted!\n");
1588 __i915_vma_unpin(vma);
1591 /* If we then reinsert, we should find the same hole */
1592 list_for_each_entry_safe(obj, on, &objects, st_link) {
1593 struct i915_vma *vma;
1596 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1602 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1603 offset = vma->node.start;
1605 err = i915_vma_unbind(vma);
1607 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1611 mutex_lock(&ggtt->vm.mutex);
1612 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1613 obj->base.size, 0, obj->cache_level,
1616 mutex_unlock(&ggtt->vm.mutex);
1618 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1619 total, ggtt->vm.total, err);
1622 track_vma_bind(vma);
1624 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1625 if (vma->node.start != offset) {
1626 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1627 offset, vma->node.start);
1633 /* And then force evictions */
1635 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1636 total += 2 * I915_GTT_PAGE_SIZE) {
1637 struct i915_vma *vma;
1639 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1640 2 * I915_GTT_PAGE_SIZE);
1646 err = i915_gem_object_pin_pages(obj);
1648 i915_gem_object_put(obj);
1652 list_add(&obj->st_link, &objects);
1654 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1660 mutex_lock(&ggtt->vm.mutex);
1661 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1662 obj->base.size, 0, obj->cache_level,
1665 mutex_unlock(&ggtt->vm.mutex);
1667 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1668 total, ggtt->vm.total, err);
1671 track_vma_bind(vma);
1673 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1677 list_for_each_entry_safe(obj, on, &objects, st_link) {
1678 i915_gem_object_unpin_pages(obj);
1679 i915_gem_object_put(obj);
1684 int i915_gem_gtt_mock_selftests(void)
1686 static const struct i915_subtest tests[] = {
1687 SUBTEST(igt_mock_drunk),
1688 SUBTEST(igt_mock_walk),
1689 SUBTEST(igt_mock_pot),
1690 SUBTEST(igt_mock_fill),
1691 SUBTEST(igt_gtt_reserve),
1692 SUBTEST(igt_gtt_insert),
1694 struct drm_i915_private *i915;
1695 struct i915_ggtt *ggtt;
1698 i915 = mock_gem_device();
1702 ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1707 mock_init_ggtt(i915, ggtt);
1709 err = i915_subtests(tests, ggtt);
1711 mock_device_flush(i915);
1712 i915_gem_drain_freed_objects(i915);
1713 mock_fini_ggtt(ggtt);
1716 drm_dev_put(&i915->drm);
1720 static int context_sync(struct intel_context *ce)
1722 struct i915_request *rq;
1725 rq = intel_context_create_request(ce);
1729 i915_request_get(rq);
1730 i915_request_add(rq);
1732 timeout = i915_request_wait(rq, 0, HZ / 5);
1733 i915_request_put(rq);
1735 return timeout < 0 ? -EIO : 0;
1738 static struct i915_request *
1739 submit_batch(struct intel_context *ce, u64 addr)
1741 struct i915_request *rq;
1744 rq = intel_context_create_request(ce);
1749 if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1750 err = rq->engine->emit_init_breadcrumb(rq);
1752 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1755 i915_request_get(rq);
1756 i915_request_add(rq);
1758 return err ? ERR_PTR(err) : rq;
1761 static u32 *spinner(u32 *batch, int i)
1763 return batch + i * 64 / sizeof(*batch) + 4;
1766 static void end_spin(u32 *batch, int i)
1768 *spinner(batch, i) = MI_BATCH_BUFFER_END;
1772 static int igt_cs_tlb(void *arg)
1774 const unsigned int count = PAGE_SIZE / 64;
1775 const unsigned int chunk_size = count * PAGE_SIZE;
1776 struct drm_i915_private *i915 = arg;
1777 struct drm_i915_gem_object *bbe, *act, *out;
1778 struct i915_gem_engines_iter it;
1779 struct i915_address_space *vm;
1780 struct i915_gem_context *ctx;
1781 struct intel_context *ce;
1782 struct drm_file *file;
1783 struct i915_vma *vma;
1784 I915_RND_STATE(prng);
1791 * Our mission here is to fool the hardware to execute something
1792 * from scratch as it has not seen the batch move (due to missing
1793 * the TLB invalidate).
1796 file = mock_file(i915);
1798 return PTR_ERR(file);
1800 ctx = live_context(i915, file);
1806 vm = i915_gem_context_get_vm_rcu(ctx);
1807 if (i915_is_ggtt(vm))
1810 /* Create two pages; dummy we prefill the TLB, and intended */
1811 bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1817 batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1818 if (IS_ERR(batch)) {
1819 err = PTR_ERR(batch);
1822 memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1823 i915_gem_object_flush_map(bbe);
1824 i915_gem_object_unpin_map(bbe);
1826 act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1832 /* Track the execution of each request by writing into different slot */
1833 batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1834 if (IS_ERR(batch)) {
1835 err = PTR_ERR(batch);
1838 for (i = 0; i < count; i++) {
1839 u32 *cs = batch + i * 64 / sizeof(*cs);
1840 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1842 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1843 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1844 if (INTEL_GEN(i915) >= 8) {
1845 cs[1] = lower_32_bits(addr);
1846 cs[2] = upper_32_bits(addr);
1849 cs[5] = MI_BATCH_BUFFER_START_GEN8;
1852 cs[2] = lower_32_bits(addr);
1855 cs[5] = MI_BATCH_BUFFER_START;
1859 out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1864 i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1866 vma = i915_vma_instance(out, vm, NULL);
1872 err = i915_vma_pin(vma, 0, 0,
1875 (vm->total - PAGE_SIZE));
1878 GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1880 result = i915_gem_object_pin_map(out, I915_MAP_WB);
1881 if (IS_ERR(result)) {
1882 err = PTR_ERR(result);
1886 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1887 IGT_TIMEOUT(end_time);
1888 unsigned long pass = 0;
1890 if (!intel_engine_can_store_dword(ce->engine))
1893 while (!__igt_timeout(end_time, NULL)) {
1894 struct i915_request *rq;
1897 offset = igt_random_offset(&prng,
1898 0, vm->total - PAGE_SIZE,
1899 chunk_size, PAGE_SIZE);
1901 err = vm->allocate_va_range(vm, offset, chunk_size);
1905 memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1907 vma = i915_vma_instance(bbe, vm, NULL);
1913 err = vma->ops->set_pages(vma);
1917 /* Prime the TLB with the dummy pages */
1918 for (i = 0; i < count; i++) {
1919 vma->node.start = offset + i * PAGE_SIZE;
1920 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1922 rq = submit_batch(ce, vma->node.start);
1927 i915_request_put(rq);
1930 vma->ops->clear_pages(vma);
1932 err = context_sync(ce);
1934 pr_err("%s: dummy setup timed out\n",
1939 vma = i915_vma_instance(act, vm, NULL);
1945 err = vma->ops->set_pages(vma);
1949 /* Replace the TLB with target batches */
1950 for (i = 0; i < count; i++) {
1951 struct i915_request *rq;
1952 u32 *cs = batch + i * 64 / sizeof(*cs);
1955 vma->node.start = offset + i * PAGE_SIZE;
1956 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1958 addr = vma->node.start + i * 64;
1960 cs[6] = lower_32_bits(addr);
1961 cs[7] = upper_32_bits(addr);
1964 rq = submit_batch(ce, addr);
1970 /* Wait until the context chain has started */
1972 while (READ_ONCE(result[i]) &&
1973 !i915_request_completed(rq))
1976 end_spin(batch, i - 1);
1979 i915_request_put(rq);
1981 end_spin(batch, count - 1);
1983 vma->ops->clear_pages(vma);
1985 err = context_sync(ce);
1987 pr_err("%s: writes timed out\n",
1992 for (i = 0; i < count; i++) {
1993 if (result[i] != i) {
1994 pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1995 ce->engine->name, pass,
1996 offset, i, result[i], i);
2002 vm->clear_range(vm, offset, chunk_size);
2007 if (igt_flush_test(i915))
2009 i915_gem_context_unlock_engines(ctx);
2010 i915_gem_object_unpin_map(out);
2012 i915_gem_object_put(out);
2014 i915_gem_object_unpin_map(act);
2016 i915_gem_object_put(act);
2018 i915_gem_object_put(bbe);
2022 mock_file_free(i915, file);
2026 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2028 static const struct i915_subtest tests[] = {
2029 SUBTEST(igt_ppgtt_alloc),
2030 SUBTEST(igt_ppgtt_lowlevel),
2031 SUBTEST(igt_ppgtt_drunk),
2032 SUBTEST(igt_ppgtt_walk),
2033 SUBTEST(igt_ppgtt_pot),
2034 SUBTEST(igt_ppgtt_fill),
2035 SUBTEST(igt_ppgtt_shrink),
2036 SUBTEST(igt_ppgtt_shrink_boom),
2037 SUBTEST(igt_ggtt_lowlevel),
2038 SUBTEST(igt_ggtt_drunk),
2039 SUBTEST(igt_ggtt_walk),
2040 SUBTEST(igt_ggtt_pot),
2041 SUBTEST(igt_ggtt_fill),
2042 SUBTEST(igt_ggtt_page),
2043 SUBTEST(igt_cs_tlb),
2046 GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2048 return i915_subtests(tests, i915);