165b3a7f974434c30af13dc84989cf2ba52dfe97
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "igt_flush_test.h"
38
39 static void cleanup_freed_objects(struct drm_i915_private *i915)
40 {
41         i915_gem_drain_freed_objects(i915);
42 }
43
44 static void fake_free_pages(struct drm_i915_gem_object *obj,
45                             struct sg_table *pages)
46 {
47         sg_free_table(pages);
48         kfree(pages);
49 }
50
51 static int fake_get_pages(struct drm_i915_gem_object *obj)
52 {
53 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
54 #define PFN_BIAS 0x1000
55         struct sg_table *pages;
56         struct scatterlist *sg;
57         unsigned int sg_page_sizes;
58         typeof(obj->base.size) rem;
59
60         pages = kmalloc(sizeof(*pages), GFP);
61         if (!pages)
62                 return -ENOMEM;
63
64         rem = round_up(obj->base.size, BIT(31)) >> 31;
65         if (sg_alloc_table(pages, rem, GFP)) {
66                 kfree(pages);
67                 return -ENOMEM;
68         }
69
70         sg_page_sizes = 0;
71         rem = obj->base.size;
72         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
73                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
74
75                 GEM_BUG_ON(!len);
76                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
77                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
78                 sg_dma_len(sg) = len;
79                 sg_page_sizes |= len;
80
81                 rem -= len;
82         }
83         GEM_BUG_ON(rem);
84
85         obj->mm.madv = I915_MADV_DONTNEED;
86
87         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
88
89         return 0;
90 #undef GFP
91 }
92
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94                            struct sg_table *pages)
95 {
96         fake_free_pages(obj, pages);
97         obj->mm.dirty = false;
98         obj->mm.madv = I915_MADV_WILLNEED;
99 }
100
101 static const struct drm_i915_gem_object_ops fake_ops = {
102         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103         .get_pages = fake_get_pages,
104         .put_pages = fake_put_pages,
105 };
106
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
109 {
110         struct drm_i915_gem_object *obj;
111
112         GEM_BUG_ON(!size);
113         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
114
115         if (overflows_type(size, obj->base.size))
116                 return ERR_PTR(-E2BIG);
117
118         obj = i915_gem_object_alloc();
119         if (!obj)
120                 goto err;
121
122         drm_gem_private_object_init(&i915->drm, &obj->base, size);
123         i915_gem_object_init(obj, &fake_ops);
124
125         obj->write_domain = I915_GEM_DOMAIN_CPU;
126         obj->read_domains = I915_GEM_DOMAIN_CPU;
127         obj->cache_level = I915_CACHE_NONE;
128
129         /* Preallocate the "backing storage" */
130         if (i915_gem_object_pin_pages(obj))
131                 goto err_obj;
132
133         i915_gem_object_unpin_pages(obj);
134         return obj;
135
136 err_obj:
137         i915_gem_object_put(obj);
138 err:
139         return ERR_PTR(-ENOMEM);
140 }
141
142 static int igt_ppgtt_alloc(void *arg)
143 {
144         struct drm_i915_private *dev_priv = arg;
145         struct i915_ppgtt *ppgtt;
146         u64 size, last, limit;
147         int err = 0;
148
149         /* Allocate a ppggt and try to fill the entire range */
150
151         if (!HAS_PPGTT(dev_priv))
152                 return 0;
153
154         ppgtt = __ppgtt_create(dev_priv);
155         if (IS_ERR(ppgtt))
156                 return PTR_ERR(ppgtt);
157
158         if (!ppgtt->vm.allocate_va_range)
159                 goto err_ppgtt_cleanup;
160
161         /*
162          * While we only allocate the page tables here and so we could
163          * address a much larger GTT than we could actually fit into
164          * RAM, a practical limit is the amount of physical pages in the system.
165          * This should ensure that we do not run into the oomkiller during
166          * the test and take down the machine wilfully.
167          */
168         limit = totalram_pages() << PAGE_SHIFT;
169         limit = min(ppgtt->vm.total, limit);
170
171         /* Check we can allocate the entire range */
172         for (size = 4096; size <= limit; size <<= 2) {
173                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
174                 if (err) {
175                         if (err == -ENOMEM) {
176                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
177                                         size, ilog2(size));
178                                 err = 0; /* virtual space too large! */
179                         }
180                         goto err_ppgtt_cleanup;
181                 }
182
183                 cond_resched();
184
185                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
186         }
187
188         /* Check we can incrementally allocate the entire range */
189         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
190                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
191                                                   last, size - last);
192                 if (err) {
193                         if (err == -ENOMEM) {
194                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
195                                         last, size - last, ilog2(size));
196                                 err = 0; /* virtual space too large! */
197                         }
198                         goto err_ppgtt_cleanup;
199                 }
200
201                 cond_resched();
202         }
203
204 err_ppgtt_cleanup:
205         i915_vm_put(&ppgtt->vm);
206         return err;
207 }
208
209 static int lowlevel_hole(struct drm_i915_private *i915,
210                          struct i915_address_space *vm,
211                          u64 hole_start, u64 hole_end,
212                          unsigned long end_time)
213 {
214         I915_RND_STATE(seed_prng);
215         unsigned int size;
216         struct i915_vma mock_vma;
217
218         memset(&mock_vma, 0, sizeof(struct i915_vma));
219
220         /* Keep creating larger objects until one cannot fit into the hole */
221         for (size = 12; (hole_end - hole_start) >> size; size++) {
222                 I915_RND_SUBSTATE(prng, seed_prng);
223                 struct drm_i915_gem_object *obj;
224                 unsigned int *order, count, n;
225                 u64 hole_size;
226
227                 hole_size = (hole_end - hole_start) >> size;
228                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
229                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
230                 count = hole_size >> 1;
231                 if (!count) {
232                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
233                                  __func__, hole_start, hole_end, size, hole_size);
234                         break;
235                 }
236
237                 do {
238                         order = i915_random_order(count, &prng);
239                         if (order)
240                                 break;
241                 } while (count >>= 1);
242                 if (!count)
243                         return -ENOMEM;
244                 GEM_BUG_ON(!order);
245
246                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
247                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
248
249                 /* Ignore allocation failures (i.e. don't report them as
250                  * a test failure) as we are purposefully allocating very
251                  * large objects without checking that we have sufficient
252                  * memory. We expect to hit -ENOMEM.
253                  */
254
255                 obj = fake_dma_object(i915, BIT_ULL(size));
256                 if (IS_ERR(obj)) {
257                         kfree(order);
258                         break;
259                 }
260
261                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
262
263                 if (i915_gem_object_pin_pages(obj)) {
264                         i915_gem_object_put(obj);
265                         kfree(order);
266                         break;
267                 }
268
269                 for (n = 0; n < count; n++) {
270                         u64 addr = hole_start + order[n] * BIT_ULL(size);
271                         intel_wakeref_t wakeref;
272
273                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
274
275                         if (igt_timeout(end_time,
276                                         "%s timed out before %d/%d\n",
277                                         __func__, n, count)) {
278                                 hole_end = hole_start; /* quit */
279                                 break;
280                         }
281
282                         if (vm->allocate_va_range &&
283                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
284                                 break;
285
286                         mock_vma.pages = obj->mm.pages;
287                         mock_vma.node.size = BIT_ULL(size);
288                         mock_vma.node.start = addr;
289
290                         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
291                                 vm->insert_entries(vm, &mock_vma,
292                                                    I915_CACHE_NONE, 0);
293                 }
294                 count = n;
295
296                 i915_random_reorder(order, count, &prng);
297                 for (n = 0; n < count; n++) {
298                         u64 addr = hole_start + order[n] * BIT_ULL(size);
299                         intel_wakeref_t wakeref;
300
301                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
302                         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
303                                 vm->clear_range(vm, addr, BIT_ULL(size));
304                 }
305
306                 i915_gem_object_unpin_pages(obj);
307                 i915_gem_object_put(obj);
308
309                 kfree(order);
310
311                 cleanup_freed_objects(i915);
312         }
313
314         return 0;
315 }
316
317 static void close_object_list(struct list_head *objects,
318                               struct i915_address_space *vm)
319 {
320         struct drm_i915_gem_object *obj, *on;
321         int ignored;
322
323         list_for_each_entry_safe(obj, on, objects, st_link) {
324                 struct i915_vma *vma;
325
326                 vma = i915_vma_instance(obj, vm, NULL);
327                 if (!IS_ERR(vma))
328                         ignored = i915_vma_unbind(vma);
329                 /* Only ppgtt vma may be closed before the object is freed */
330                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
331                         i915_vma_close(vma);
332
333                 list_del(&obj->st_link);
334                 i915_gem_object_put(obj);
335         }
336 }
337
338 static int fill_hole(struct drm_i915_private *i915,
339                      struct i915_address_space *vm,
340                      u64 hole_start, u64 hole_end,
341                      unsigned long end_time)
342 {
343         const u64 hole_size = hole_end - hole_start;
344         struct drm_i915_gem_object *obj;
345         const unsigned long max_pages =
346                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
347         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
348         unsigned long npages, prime, flags;
349         struct i915_vma *vma;
350         LIST_HEAD(objects);
351         int err;
352
353         /* Try binding many VMA working inwards from either edge */
354
355         flags = PIN_OFFSET_FIXED | PIN_USER;
356         if (i915_is_ggtt(vm))
357                 flags |= PIN_GLOBAL;
358
359         for_each_prime_number_from(prime, 2, max_step) {
360                 for (npages = 1; npages <= max_pages; npages *= prime) {
361                         const u64 full_size = npages << PAGE_SHIFT;
362                         const struct {
363                                 const char *name;
364                                 u64 offset;
365                                 int step;
366                         } phases[] = {
367                                 { "top-down", hole_end, -1, },
368                                 { "bottom-up", hole_start, 1, },
369                                 { }
370                         }, *p;
371
372                         obj = fake_dma_object(i915, full_size);
373                         if (IS_ERR(obj))
374                                 break;
375
376                         list_add(&obj->st_link, &objects);
377
378                         /* Align differing sized objects against the edges, and
379                          * check we don't walk off into the void when binding
380                          * them into the GTT.
381                          */
382                         for (p = phases; p->name; p++) {
383                                 u64 offset;
384
385                                 offset = p->offset;
386                                 list_for_each_entry(obj, &objects, st_link) {
387                                         vma = i915_vma_instance(obj, vm, NULL);
388                                         if (IS_ERR(vma))
389                                                 continue;
390
391                                         if (p->step < 0) {
392                                                 if (offset < hole_start + obj->base.size)
393                                                         break;
394                                                 offset -= obj->base.size;
395                                         }
396
397                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
398                                         if (err) {
399                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
400                                                        __func__, p->name, err, npages, prime, offset);
401                                                 goto err;
402                                         }
403
404                                         if (!drm_mm_node_allocated(&vma->node) ||
405                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
406                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
407                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
408                                                        offset);
409                                                 err = -EINVAL;
410                                                 goto err;
411                                         }
412
413                                         i915_vma_unpin(vma);
414
415                                         if (p->step > 0) {
416                                                 if (offset + obj->base.size > hole_end)
417                                                         break;
418                                                 offset += obj->base.size;
419                                         }
420                                 }
421
422                                 offset = p->offset;
423                                 list_for_each_entry(obj, &objects, st_link) {
424                                         vma = i915_vma_instance(obj, vm, NULL);
425                                         if (IS_ERR(vma))
426                                                 continue;
427
428                                         if (p->step < 0) {
429                                                 if (offset < hole_start + obj->base.size)
430                                                         break;
431                                                 offset -= obj->base.size;
432                                         }
433
434                                         if (!drm_mm_node_allocated(&vma->node) ||
435                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
436                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
437                                                        __func__, p->name, vma->node.start, vma->node.size,
438                                                        offset);
439                                                 err = -EINVAL;
440                                                 goto err;
441                                         }
442
443                                         err = i915_vma_unbind(vma);
444                                         if (err) {
445                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
446                                                        __func__, p->name, vma->node.start, vma->node.size,
447                                                        err);
448                                                 goto err;
449                                         }
450
451                                         if (p->step > 0) {
452                                                 if (offset + obj->base.size > hole_end)
453                                                         break;
454                                                 offset += obj->base.size;
455                                         }
456                                 }
457
458                                 offset = p->offset;
459                                 list_for_each_entry_reverse(obj, &objects, st_link) {
460                                         vma = i915_vma_instance(obj, vm, NULL);
461                                         if (IS_ERR(vma))
462                                                 continue;
463
464                                         if (p->step < 0) {
465                                                 if (offset < hole_start + obj->base.size)
466                                                         break;
467                                                 offset -= obj->base.size;
468                                         }
469
470                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
471                                         if (err) {
472                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
473                                                        __func__, p->name, err, npages, prime, offset);
474                                                 goto err;
475                                         }
476
477                                         if (!drm_mm_node_allocated(&vma->node) ||
478                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
479                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
480                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
481                                                        offset);
482                                                 err = -EINVAL;
483                                                 goto err;
484                                         }
485
486                                         i915_vma_unpin(vma);
487
488                                         if (p->step > 0) {
489                                                 if (offset + obj->base.size > hole_end)
490                                                         break;
491                                                 offset += obj->base.size;
492                                         }
493                                 }
494
495                                 offset = p->offset;
496                                 list_for_each_entry_reverse(obj, &objects, st_link) {
497                                         vma = i915_vma_instance(obj, vm, NULL);
498                                         if (IS_ERR(vma))
499                                                 continue;
500
501                                         if (p->step < 0) {
502                                                 if (offset < hole_start + obj->base.size)
503                                                         break;
504                                                 offset -= obj->base.size;
505                                         }
506
507                                         if (!drm_mm_node_allocated(&vma->node) ||
508                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
509                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
510                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
511                                                        offset);
512                                                 err = -EINVAL;
513                                                 goto err;
514                                         }
515
516                                         err = i915_vma_unbind(vma);
517                                         if (err) {
518                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
519                                                        __func__, p->name, vma->node.start, vma->node.size,
520                                                        err);
521                                                 goto err;
522                                         }
523
524                                         if (p->step > 0) {
525                                                 if (offset + obj->base.size > hole_end)
526                                                         break;
527                                                 offset += obj->base.size;
528                                         }
529                                 }
530                         }
531
532                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
533                                         __func__, npages, prime)) {
534                                 err = -EINTR;
535                                 goto err;
536                         }
537                 }
538
539                 close_object_list(&objects, vm);
540                 cleanup_freed_objects(i915);
541         }
542
543         return 0;
544
545 err:
546         close_object_list(&objects, vm);
547         return err;
548 }
549
550 static int walk_hole(struct drm_i915_private *i915,
551                      struct i915_address_space *vm,
552                      u64 hole_start, u64 hole_end,
553                      unsigned long end_time)
554 {
555         const u64 hole_size = hole_end - hole_start;
556         const unsigned long max_pages =
557                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
558         unsigned long flags;
559         u64 size;
560
561         /* Try binding a single VMA in different positions within the hole */
562
563         flags = PIN_OFFSET_FIXED | PIN_USER;
564         if (i915_is_ggtt(vm))
565                 flags |= PIN_GLOBAL;
566
567         for_each_prime_number_from(size, 1, max_pages) {
568                 struct drm_i915_gem_object *obj;
569                 struct i915_vma *vma;
570                 u64 addr;
571                 int err = 0;
572
573                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
574                 if (IS_ERR(obj))
575                         break;
576
577                 vma = i915_vma_instance(obj, vm, NULL);
578                 if (IS_ERR(vma)) {
579                         err = PTR_ERR(vma);
580                         goto err_put;
581                 }
582
583                 for (addr = hole_start;
584                      addr + obj->base.size < hole_end;
585                      addr += obj->base.size) {
586                         err = i915_vma_pin(vma, 0, 0, addr | flags);
587                         if (err) {
588                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
589                                        __func__, addr, vma->size,
590                                        hole_start, hole_end, err);
591                                 goto err_close;
592                         }
593                         i915_vma_unpin(vma);
594
595                         if (!drm_mm_node_allocated(&vma->node) ||
596                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
597                                 pr_err("%s incorrect at %llx + %llx\n",
598                                        __func__, addr, vma->size);
599                                 err = -EINVAL;
600                                 goto err_close;
601                         }
602
603                         err = i915_vma_unbind(vma);
604                         if (err) {
605                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
606                                        __func__, addr, vma->size, err);
607                                 goto err_close;
608                         }
609
610                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
611
612                         if (igt_timeout(end_time,
613                                         "%s timed out at %llx\n",
614                                         __func__, addr)) {
615                                 err = -EINTR;
616                                 goto err_close;
617                         }
618                 }
619
620 err_close:
621                 if (!i915_vma_is_ggtt(vma))
622                         i915_vma_close(vma);
623 err_put:
624                 i915_gem_object_put(obj);
625                 if (err)
626                         return err;
627
628                 cleanup_freed_objects(i915);
629         }
630
631         return 0;
632 }
633
634 static int pot_hole(struct drm_i915_private *i915,
635                     struct i915_address_space *vm,
636                     u64 hole_start, u64 hole_end,
637                     unsigned long end_time)
638 {
639         struct drm_i915_gem_object *obj;
640         struct i915_vma *vma;
641         unsigned long flags;
642         unsigned int pot;
643         int err = 0;
644
645         flags = PIN_OFFSET_FIXED | PIN_USER;
646         if (i915_is_ggtt(vm))
647                 flags |= PIN_GLOBAL;
648
649         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
650         if (IS_ERR(obj))
651                 return PTR_ERR(obj);
652
653         vma = i915_vma_instance(obj, vm, NULL);
654         if (IS_ERR(vma)) {
655                 err = PTR_ERR(vma);
656                 goto err_obj;
657         }
658
659         /* Insert a pair of pages across every pot boundary within the hole */
660         for (pot = fls64(hole_end - 1) - 1;
661              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
662              pot--) {
663                 u64 step = BIT_ULL(pot);
664                 u64 addr;
665
666                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
667                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
668                      addr += step) {
669                         err = i915_vma_pin(vma, 0, 0, addr | flags);
670                         if (err) {
671                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
672                                        __func__,
673                                        addr,
674                                        hole_start, hole_end,
675                                        err);
676                                 goto err;
677                         }
678
679                         if (!drm_mm_node_allocated(&vma->node) ||
680                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
681                                 pr_err("%s incorrect at %llx + %llx\n",
682                                        __func__, addr, vma->size);
683                                 i915_vma_unpin(vma);
684                                 err = i915_vma_unbind(vma);
685                                 err = -EINVAL;
686                                 goto err;
687                         }
688
689                         i915_vma_unpin(vma);
690                         err = i915_vma_unbind(vma);
691                         GEM_BUG_ON(err);
692                 }
693
694                 if (igt_timeout(end_time,
695                                 "%s timed out after %d/%d\n",
696                                 __func__, pot, fls64(hole_end - 1) - 1)) {
697                         err = -EINTR;
698                         goto err;
699                 }
700         }
701
702 err:
703         if (!i915_vma_is_ggtt(vma))
704                 i915_vma_close(vma);
705 err_obj:
706         i915_gem_object_put(obj);
707         return err;
708 }
709
710 static int drunk_hole(struct drm_i915_private *i915,
711                       struct i915_address_space *vm,
712                       u64 hole_start, u64 hole_end,
713                       unsigned long end_time)
714 {
715         I915_RND_STATE(prng);
716         unsigned int size;
717         unsigned long flags;
718
719         flags = PIN_OFFSET_FIXED | PIN_USER;
720         if (i915_is_ggtt(vm))
721                 flags |= PIN_GLOBAL;
722
723         /* Keep creating larger objects until one cannot fit into the hole */
724         for (size = 12; (hole_end - hole_start) >> size; size++) {
725                 struct drm_i915_gem_object *obj;
726                 unsigned int *order, count, n;
727                 struct i915_vma *vma;
728                 u64 hole_size;
729                 int err = -ENODEV;
730
731                 hole_size = (hole_end - hole_start) >> size;
732                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
733                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
734                 count = hole_size >> 1;
735                 if (!count) {
736                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
737                                  __func__, hole_start, hole_end, size, hole_size);
738                         break;
739                 }
740
741                 do {
742                         order = i915_random_order(count, &prng);
743                         if (order)
744                                 break;
745                 } while (count >>= 1);
746                 if (!count)
747                         return -ENOMEM;
748                 GEM_BUG_ON(!order);
749
750                 /* Ignore allocation failures (i.e. don't report them as
751                  * a test failure) as we are purposefully allocating very
752                  * large objects without checking that we have sufficient
753                  * memory. We expect to hit -ENOMEM.
754                  */
755
756                 obj = fake_dma_object(i915, BIT_ULL(size));
757                 if (IS_ERR(obj)) {
758                         kfree(order);
759                         break;
760                 }
761
762                 vma = i915_vma_instance(obj, vm, NULL);
763                 if (IS_ERR(vma)) {
764                         err = PTR_ERR(vma);
765                         goto err_obj;
766                 }
767
768                 GEM_BUG_ON(vma->size != BIT_ULL(size));
769
770                 for (n = 0; n < count; n++) {
771                         u64 addr = hole_start + order[n] * BIT_ULL(size);
772
773                         err = i915_vma_pin(vma, 0, 0, addr | flags);
774                         if (err) {
775                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
776                                        __func__,
777                                        addr, BIT_ULL(size),
778                                        hole_start, hole_end,
779                                        err);
780                                 goto err;
781                         }
782
783                         if (!drm_mm_node_allocated(&vma->node) ||
784                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
785                                 pr_err("%s incorrect at %llx + %llx\n",
786                                        __func__, addr, BIT_ULL(size));
787                                 i915_vma_unpin(vma);
788                                 err = i915_vma_unbind(vma);
789                                 err = -EINVAL;
790                                 goto err;
791                         }
792
793                         i915_vma_unpin(vma);
794                         err = i915_vma_unbind(vma);
795                         GEM_BUG_ON(err);
796
797                         if (igt_timeout(end_time,
798                                         "%s timed out after %d/%d\n",
799                                         __func__, n, count)) {
800                                 err = -EINTR;
801                                 goto err;
802                         }
803                 }
804
805 err:
806                 if (!i915_vma_is_ggtt(vma))
807                         i915_vma_close(vma);
808 err_obj:
809                 i915_gem_object_put(obj);
810                 kfree(order);
811                 if (err)
812                         return err;
813
814                 cleanup_freed_objects(i915);
815         }
816
817         return 0;
818 }
819
820 static int __shrink_hole(struct drm_i915_private *i915,
821                          struct i915_address_space *vm,
822                          u64 hole_start, u64 hole_end,
823                          unsigned long end_time)
824 {
825         struct drm_i915_gem_object *obj;
826         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
827         unsigned int order = 12;
828         LIST_HEAD(objects);
829         int err = 0;
830         u64 addr;
831
832         /* Keep creating larger objects until one cannot fit into the hole */
833         for (addr = hole_start; addr < hole_end; ) {
834                 struct i915_vma *vma;
835                 u64 size = BIT_ULL(order++);
836
837                 size = min(size, hole_end - addr);
838                 obj = fake_dma_object(i915, size);
839                 if (IS_ERR(obj)) {
840                         err = PTR_ERR(obj);
841                         break;
842                 }
843
844                 list_add(&obj->st_link, &objects);
845
846                 vma = i915_vma_instance(obj, vm, NULL);
847                 if (IS_ERR(vma)) {
848                         err = PTR_ERR(vma);
849                         break;
850                 }
851
852                 GEM_BUG_ON(vma->size != size);
853
854                 err = i915_vma_pin(vma, 0, 0, addr | flags);
855                 if (err) {
856                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
857                                __func__, addr, size, hole_start, hole_end, err);
858                         break;
859                 }
860
861                 if (!drm_mm_node_allocated(&vma->node) ||
862                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
863                         pr_err("%s incorrect at %llx + %llx\n",
864                                __func__, addr, size);
865                         i915_vma_unpin(vma);
866                         err = i915_vma_unbind(vma);
867                         err = -EINVAL;
868                         break;
869                 }
870
871                 i915_vma_unpin(vma);
872                 addr += size;
873
874                 /*
875                  * Since we are injecting allocation faults at random intervals,
876                  * wait for this allocation to complete before we change the
877                  * faultinjection.
878                  */
879                 err = i915_vma_sync(vma);
880                 if (err)
881                         break;
882
883                 if (igt_timeout(end_time,
884                                 "%s timed out at ofset %llx [%llx - %llx]\n",
885                                 __func__, addr, hole_start, hole_end)) {
886                         err = -EINTR;
887                         break;
888                 }
889         }
890
891         close_object_list(&objects, vm);
892         cleanup_freed_objects(i915);
893         return err;
894 }
895
896 static int shrink_hole(struct drm_i915_private *i915,
897                        struct i915_address_space *vm,
898                        u64 hole_start, u64 hole_end,
899                        unsigned long end_time)
900 {
901         unsigned long prime;
902         int err;
903
904         vm->fault_attr.probability = 999;
905         atomic_set(&vm->fault_attr.times, -1);
906
907         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
908                 vm->fault_attr.interval = prime;
909                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
910                 if (err)
911                         break;
912         }
913
914         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
915
916         return err;
917 }
918
919 static int shrink_boom(struct drm_i915_private *i915,
920                        struct i915_address_space *vm,
921                        u64 hole_start, u64 hole_end,
922                        unsigned long end_time)
923 {
924         unsigned int sizes[] = { SZ_2M, SZ_1G };
925         struct drm_i915_gem_object *purge;
926         struct drm_i915_gem_object *explode;
927         int err;
928         int i;
929
930         /*
931          * Catch the case which shrink_hole seems to miss. The setup here
932          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
933          * ensuring that all vma assiocated with the respective pd/pdp are
934          * unpinned at the time.
935          */
936
937         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
938                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
939                 unsigned int size = sizes[i];
940                 struct i915_vma *vma;
941
942                 purge = fake_dma_object(i915, size);
943                 if (IS_ERR(purge))
944                         return PTR_ERR(purge);
945
946                 vma = i915_vma_instance(purge, vm, NULL);
947                 if (IS_ERR(vma)) {
948                         err = PTR_ERR(vma);
949                         goto err_purge;
950                 }
951
952                 err = i915_vma_pin(vma, 0, 0, flags);
953                 if (err)
954                         goto err_purge;
955
956                 /* Should now be ripe for purging */
957                 i915_vma_unpin(vma);
958
959                 explode = fake_dma_object(i915, size);
960                 if (IS_ERR(explode)) {
961                         err = PTR_ERR(explode);
962                         goto err_purge;
963                 }
964
965                 vm->fault_attr.probability = 100;
966                 vm->fault_attr.interval = 1;
967                 atomic_set(&vm->fault_attr.times, -1);
968
969                 vma = i915_vma_instance(explode, vm, NULL);
970                 if (IS_ERR(vma)) {
971                         err = PTR_ERR(vma);
972                         goto err_explode;
973                 }
974
975                 err = i915_vma_pin(vma, 0, 0, flags | size);
976                 if (err)
977                         goto err_explode;
978
979                 i915_vma_unpin(vma);
980
981                 i915_gem_object_put(purge);
982                 i915_gem_object_put(explode);
983
984                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
985                 cleanup_freed_objects(i915);
986         }
987
988         return 0;
989
990 err_explode:
991         i915_gem_object_put(explode);
992 err_purge:
993         i915_gem_object_put(purge);
994         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
995         return err;
996 }
997
998 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
999                           int (*func)(struct drm_i915_private *i915,
1000                                       struct i915_address_space *vm,
1001                                       u64 hole_start, u64 hole_end,
1002                                       unsigned long end_time))
1003 {
1004         struct drm_file *file;
1005         struct i915_ppgtt *ppgtt;
1006         IGT_TIMEOUT(end_time);
1007         int err;
1008
1009         if (!HAS_FULL_PPGTT(dev_priv))
1010                 return 0;
1011
1012         file = mock_file(dev_priv);
1013         if (IS_ERR(file))
1014                 return PTR_ERR(file);
1015
1016         ppgtt = i915_ppgtt_create(dev_priv);
1017         if (IS_ERR(ppgtt)) {
1018                 err = PTR_ERR(ppgtt);
1019                 goto out_free;
1020         }
1021         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1022         GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1023
1024         err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1025
1026         i915_vm_put(&ppgtt->vm);
1027
1028 out_free:
1029         mock_file_free(dev_priv, file);
1030         return err;
1031 }
1032
1033 static int igt_ppgtt_fill(void *arg)
1034 {
1035         return exercise_ppgtt(arg, fill_hole);
1036 }
1037
1038 static int igt_ppgtt_walk(void *arg)
1039 {
1040         return exercise_ppgtt(arg, walk_hole);
1041 }
1042
1043 static int igt_ppgtt_pot(void *arg)
1044 {
1045         return exercise_ppgtt(arg, pot_hole);
1046 }
1047
1048 static int igt_ppgtt_drunk(void *arg)
1049 {
1050         return exercise_ppgtt(arg, drunk_hole);
1051 }
1052
1053 static int igt_ppgtt_lowlevel(void *arg)
1054 {
1055         return exercise_ppgtt(arg, lowlevel_hole);
1056 }
1057
1058 static int igt_ppgtt_shrink(void *arg)
1059 {
1060         return exercise_ppgtt(arg, shrink_hole);
1061 }
1062
1063 static int igt_ppgtt_shrink_boom(void *arg)
1064 {
1065         return exercise_ppgtt(arg, shrink_boom);
1066 }
1067
1068 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1069 {
1070         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1071         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1072
1073         if (a->start < b->start)
1074                 return -1;
1075         else
1076                 return 1;
1077 }
1078
1079 static int exercise_ggtt(struct drm_i915_private *i915,
1080                          int (*func)(struct drm_i915_private *i915,
1081                                      struct i915_address_space *vm,
1082                                      u64 hole_start, u64 hole_end,
1083                                      unsigned long end_time))
1084 {
1085         struct i915_ggtt *ggtt = &i915->ggtt;
1086         u64 hole_start, hole_end, last = 0;
1087         struct drm_mm_node *node;
1088         IGT_TIMEOUT(end_time);
1089         int err = 0;
1090
1091 restart:
1092         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1093         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1094                 if (hole_start < last)
1095                         continue;
1096
1097                 if (ggtt->vm.mm.color_adjust)
1098                         ggtt->vm.mm.color_adjust(node, 0,
1099                                                  &hole_start, &hole_end);
1100                 if (hole_start >= hole_end)
1101                         continue;
1102
1103                 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1104                 if (err)
1105                         break;
1106
1107                 /* As we have manipulated the drm_mm, the list may be corrupt */
1108                 last = hole_end;
1109                 goto restart;
1110         }
1111
1112         return err;
1113 }
1114
1115 static int igt_ggtt_fill(void *arg)
1116 {
1117         return exercise_ggtt(arg, fill_hole);
1118 }
1119
1120 static int igt_ggtt_walk(void *arg)
1121 {
1122         return exercise_ggtt(arg, walk_hole);
1123 }
1124
1125 static int igt_ggtt_pot(void *arg)
1126 {
1127         return exercise_ggtt(arg, pot_hole);
1128 }
1129
1130 static int igt_ggtt_drunk(void *arg)
1131 {
1132         return exercise_ggtt(arg, drunk_hole);
1133 }
1134
1135 static int igt_ggtt_lowlevel(void *arg)
1136 {
1137         return exercise_ggtt(arg, lowlevel_hole);
1138 }
1139
1140 static int igt_ggtt_page(void *arg)
1141 {
1142         const unsigned int count = PAGE_SIZE/sizeof(u32);
1143         I915_RND_STATE(prng);
1144         struct drm_i915_private *i915 = arg;
1145         struct i915_ggtt *ggtt = &i915->ggtt;
1146         struct drm_i915_gem_object *obj;
1147         intel_wakeref_t wakeref;
1148         struct drm_mm_node tmp;
1149         unsigned int *order, n;
1150         int err;
1151
1152         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1153         if (IS_ERR(obj))
1154                 return PTR_ERR(obj);
1155
1156         err = i915_gem_object_pin_pages(obj);
1157         if (err)
1158                 goto out_free;
1159
1160         memset(&tmp, 0, sizeof(tmp));
1161         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1162                                           count * PAGE_SIZE, 0,
1163                                           I915_COLOR_UNEVICTABLE,
1164                                           0, ggtt->mappable_end,
1165                                           DRM_MM_INSERT_LOW);
1166         if (err)
1167                 goto out_unpin;
1168
1169         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1170
1171         for (n = 0; n < count; n++) {
1172                 u64 offset = tmp.start + n * PAGE_SIZE;
1173
1174                 ggtt->vm.insert_page(&ggtt->vm,
1175                                      i915_gem_object_get_dma_address(obj, 0),
1176                                      offset, I915_CACHE_NONE, 0);
1177         }
1178
1179         order = i915_random_order(count, &prng);
1180         if (!order) {
1181                 err = -ENOMEM;
1182                 goto out_remove;
1183         }
1184
1185         for (n = 0; n < count; n++) {
1186                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1187                 u32 __iomem *vaddr;
1188
1189                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1190                 iowrite32(n, vaddr + n);
1191                 io_mapping_unmap_atomic(vaddr);
1192         }
1193         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1194
1195         i915_random_reorder(order, count, &prng);
1196         for (n = 0; n < count; n++) {
1197                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1198                 u32 __iomem *vaddr;
1199                 u32 val;
1200
1201                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1202                 val = ioread32(vaddr + n);
1203                 io_mapping_unmap_atomic(vaddr);
1204
1205                 if (val != n) {
1206                         pr_err("insert page failed: found %d, expected %d\n",
1207                                val, n);
1208                         err = -EINVAL;
1209                         break;
1210                 }
1211         }
1212
1213         kfree(order);
1214 out_remove:
1215         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1216         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1217         drm_mm_remove_node(&tmp);
1218 out_unpin:
1219         i915_gem_object_unpin_pages(obj);
1220 out_free:
1221         i915_gem_object_put(obj);
1222         return err;
1223 }
1224
1225 static void track_vma_bind(struct i915_vma *vma)
1226 {
1227         struct drm_i915_gem_object *obj = vma->obj;
1228
1229         atomic_inc(&obj->bind_count); /* track for eviction later */
1230         __i915_gem_object_pin_pages(obj);
1231
1232         GEM_BUG_ON(vma->pages);
1233         atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1234         __i915_gem_object_pin_pages(obj);
1235         vma->pages = obj->mm.pages;
1236
1237         mutex_lock(&vma->vm->mutex);
1238         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1239         mutex_unlock(&vma->vm->mutex);
1240 }
1241
1242 static int exercise_mock(struct drm_i915_private *i915,
1243                          int (*func)(struct drm_i915_private *i915,
1244                                      struct i915_address_space *vm,
1245                                      u64 hole_start, u64 hole_end,
1246                                      unsigned long end_time))
1247 {
1248         const u64 limit = totalram_pages() << PAGE_SHIFT;
1249         struct i915_address_space *vm;
1250         struct i915_gem_context *ctx;
1251         IGT_TIMEOUT(end_time);
1252         int err;
1253
1254         ctx = mock_context(i915, "mock");
1255         if (!ctx)
1256                 return -ENOMEM;
1257
1258         vm = i915_gem_context_get_vm_rcu(ctx);
1259         err = func(i915, vm, 0, min(vm->total, limit), end_time);
1260         i915_vm_put(vm);
1261
1262         mock_context_close(ctx);
1263         return err;
1264 }
1265
1266 static int igt_mock_fill(void *arg)
1267 {
1268         struct i915_ggtt *ggtt = arg;
1269
1270         return exercise_mock(ggtt->vm.i915, fill_hole);
1271 }
1272
1273 static int igt_mock_walk(void *arg)
1274 {
1275         struct i915_ggtt *ggtt = arg;
1276
1277         return exercise_mock(ggtt->vm.i915, walk_hole);
1278 }
1279
1280 static int igt_mock_pot(void *arg)
1281 {
1282         struct i915_ggtt *ggtt = arg;
1283
1284         return exercise_mock(ggtt->vm.i915, pot_hole);
1285 }
1286
1287 static int igt_mock_drunk(void *arg)
1288 {
1289         struct i915_ggtt *ggtt = arg;
1290
1291         return exercise_mock(ggtt->vm.i915, drunk_hole);
1292 }
1293
1294 static int igt_gtt_reserve(void *arg)
1295 {
1296         struct i915_ggtt *ggtt = arg;
1297         struct drm_i915_gem_object *obj, *on;
1298         I915_RND_STATE(prng);
1299         LIST_HEAD(objects);
1300         u64 total;
1301         int err = -ENODEV;
1302
1303         /* i915_gem_gtt_reserve() tries to reserve the precise range
1304          * for the node, and evicts if it has to. So our test checks that
1305          * it can give us the requsted space and prevent overlaps.
1306          */
1307
1308         /* Start by filling the GGTT */
1309         for (total = 0;
1310              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1311              total += 2 * I915_GTT_PAGE_SIZE) {
1312                 struct i915_vma *vma;
1313
1314                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1315                                                       2 * PAGE_SIZE);
1316                 if (IS_ERR(obj)) {
1317                         err = PTR_ERR(obj);
1318                         goto out;
1319                 }
1320
1321                 err = i915_gem_object_pin_pages(obj);
1322                 if (err) {
1323                         i915_gem_object_put(obj);
1324                         goto out;
1325                 }
1326
1327                 list_add(&obj->st_link, &objects);
1328
1329                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1330                 if (IS_ERR(vma)) {
1331                         err = PTR_ERR(vma);
1332                         goto out;
1333                 }
1334
1335                 mutex_lock(&ggtt->vm.mutex);
1336                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1337                                            obj->base.size,
1338                                            total,
1339                                            obj->cache_level,
1340                                            0);
1341                 mutex_unlock(&ggtt->vm.mutex);
1342                 if (err) {
1343                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1344                                total, ggtt->vm.total, err);
1345                         goto out;
1346                 }
1347                 track_vma_bind(vma);
1348
1349                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1350                 if (vma->node.start != total ||
1351                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1352                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1353                                vma->node.start, vma->node.size,
1354                                total, 2*I915_GTT_PAGE_SIZE);
1355                         err = -EINVAL;
1356                         goto out;
1357                 }
1358         }
1359
1360         /* Now we start forcing evictions */
1361         for (total = I915_GTT_PAGE_SIZE;
1362              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1363              total += 2 * I915_GTT_PAGE_SIZE) {
1364                 struct i915_vma *vma;
1365
1366                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1367                                                       2 * PAGE_SIZE);
1368                 if (IS_ERR(obj)) {
1369                         err = PTR_ERR(obj);
1370                         goto out;
1371                 }
1372
1373                 err = i915_gem_object_pin_pages(obj);
1374                 if (err) {
1375                         i915_gem_object_put(obj);
1376                         goto out;
1377                 }
1378
1379                 list_add(&obj->st_link, &objects);
1380
1381                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1382                 if (IS_ERR(vma)) {
1383                         err = PTR_ERR(vma);
1384                         goto out;
1385                 }
1386
1387                 mutex_lock(&ggtt->vm.mutex);
1388                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1389                                            obj->base.size,
1390                                            total,
1391                                            obj->cache_level,
1392                                            0);
1393                 mutex_unlock(&ggtt->vm.mutex);
1394                 if (err) {
1395                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1396                                total, ggtt->vm.total, err);
1397                         goto out;
1398                 }
1399                 track_vma_bind(vma);
1400
1401                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1402                 if (vma->node.start != total ||
1403                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1404                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1405                                vma->node.start, vma->node.size,
1406                                total, 2*I915_GTT_PAGE_SIZE);
1407                         err = -EINVAL;
1408                         goto out;
1409                 }
1410         }
1411
1412         /* And then try at random */
1413         list_for_each_entry_safe(obj, on, &objects, st_link) {
1414                 struct i915_vma *vma;
1415                 u64 offset;
1416
1417                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1418                 if (IS_ERR(vma)) {
1419                         err = PTR_ERR(vma);
1420                         goto out;
1421                 }
1422
1423                 err = i915_vma_unbind(vma);
1424                 if (err) {
1425                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1426                         goto out;
1427                 }
1428
1429                 offset = igt_random_offset(&prng,
1430                                            0, ggtt->vm.total,
1431                                            2 * I915_GTT_PAGE_SIZE,
1432                                            I915_GTT_MIN_ALIGNMENT);
1433
1434                 mutex_lock(&ggtt->vm.mutex);
1435                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1436                                            obj->base.size,
1437                                            offset,
1438                                            obj->cache_level,
1439                                            0);
1440                 mutex_unlock(&ggtt->vm.mutex);
1441                 if (err) {
1442                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1443                                total, ggtt->vm.total, err);
1444                         goto out;
1445                 }
1446                 track_vma_bind(vma);
1447
1448                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1449                 if (vma->node.start != offset ||
1450                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1451                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1452                                vma->node.start, vma->node.size,
1453                                offset, 2*I915_GTT_PAGE_SIZE);
1454                         err = -EINVAL;
1455                         goto out;
1456                 }
1457         }
1458
1459 out:
1460         list_for_each_entry_safe(obj, on, &objects, st_link) {
1461                 i915_gem_object_unpin_pages(obj);
1462                 i915_gem_object_put(obj);
1463         }
1464         return err;
1465 }
1466
1467 static int igt_gtt_insert(void *arg)
1468 {
1469         struct i915_ggtt *ggtt = arg;
1470         struct drm_i915_gem_object *obj, *on;
1471         struct drm_mm_node tmp = {};
1472         const struct invalid_insert {
1473                 u64 size;
1474                 u64 alignment;
1475                 u64 start, end;
1476         } invalid_insert[] = {
1477                 {
1478                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1479                         0, ggtt->vm.total,
1480                 },
1481                 {
1482                         2*I915_GTT_PAGE_SIZE, 0,
1483                         0, I915_GTT_PAGE_SIZE,
1484                 },
1485                 {
1486                         -(u64)I915_GTT_PAGE_SIZE, 0,
1487                         0, 4*I915_GTT_PAGE_SIZE,
1488                 },
1489                 {
1490                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1491                         0, 4*I915_GTT_PAGE_SIZE,
1492                 },
1493                 {
1494                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1495                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1496                 },
1497                 {}
1498         }, *ii;
1499         LIST_HEAD(objects);
1500         u64 total;
1501         int err = -ENODEV;
1502
1503         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1504          * to the node, evicting if required.
1505          */
1506
1507         /* Check a couple of obviously invalid requests */
1508         for (ii = invalid_insert; ii->size; ii++) {
1509                 mutex_lock(&ggtt->vm.mutex);
1510                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1511                                           ii->size, ii->alignment,
1512                                           I915_COLOR_UNEVICTABLE,
1513                                           ii->start, ii->end,
1514                                           0);
1515                 mutex_unlock(&ggtt->vm.mutex);
1516                 if (err != -ENOSPC) {
1517                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1518                                ii->size, ii->alignment, ii->start, ii->end,
1519                                err);
1520                         return -EINVAL;
1521                 }
1522         }
1523
1524         /* Start by filling the GGTT */
1525         for (total = 0;
1526              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1527              total += I915_GTT_PAGE_SIZE) {
1528                 struct i915_vma *vma;
1529
1530                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1531                                                       I915_GTT_PAGE_SIZE);
1532                 if (IS_ERR(obj)) {
1533                         err = PTR_ERR(obj);
1534                         goto out;
1535                 }
1536
1537                 err = i915_gem_object_pin_pages(obj);
1538                 if (err) {
1539                         i915_gem_object_put(obj);
1540                         goto out;
1541                 }
1542
1543                 list_add(&obj->st_link, &objects);
1544
1545                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1546                 if (IS_ERR(vma)) {
1547                         err = PTR_ERR(vma);
1548                         goto out;
1549                 }
1550
1551                 mutex_lock(&ggtt->vm.mutex);
1552                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1553                                           obj->base.size, 0, obj->cache_level,
1554                                           0, ggtt->vm.total,
1555                                           0);
1556                 mutex_unlock(&ggtt->vm.mutex);
1557                 if (err == -ENOSPC) {
1558                         /* maxed out the GGTT space */
1559                         i915_gem_object_put(obj);
1560                         break;
1561                 }
1562                 if (err) {
1563                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1564                                total, ggtt->vm.total, err);
1565                         goto out;
1566                 }
1567                 track_vma_bind(vma);
1568                 __i915_vma_pin(vma);
1569
1570                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1571         }
1572
1573         list_for_each_entry(obj, &objects, st_link) {
1574                 struct i915_vma *vma;
1575
1576                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1577                 if (IS_ERR(vma)) {
1578                         err = PTR_ERR(vma);
1579                         goto out;
1580                 }
1581
1582                 if (!drm_mm_node_allocated(&vma->node)) {
1583                         pr_err("VMA was unexpectedly evicted!\n");
1584                         err = -EINVAL;
1585                         goto out;
1586                 }
1587
1588                 __i915_vma_unpin(vma);
1589         }
1590
1591         /* If we then reinsert, we should find the same hole */
1592         list_for_each_entry_safe(obj, on, &objects, st_link) {
1593                 struct i915_vma *vma;
1594                 u64 offset;
1595
1596                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1597                 if (IS_ERR(vma)) {
1598                         err = PTR_ERR(vma);
1599                         goto out;
1600                 }
1601
1602                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1603                 offset = vma->node.start;
1604
1605                 err = i915_vma_unbind(vma);
1606                 if (err) {
1607                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1608                         goto out;
1609                 }
1610
1611                 mutex_lock(&ggtt->vm.mutex);
1612                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1613                                           obj->base.size, 0, obj->cache_level,
1614                                           0, ggtt->vm.total,
1615                                           0);
1616                 mutex_unlock(&ggtt->vm.mutex);
1617                 if (err) {
1618                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1619                                total, ggtt->vm.total, err);
1620                         goto out;
1621                 }
1622                 track_vma_bind(vma);
1623
1624                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1625                 if (vma->node.start != offset) {
1626                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1627                                offset, vma->node.start);
1628                         err = -EINVAL;
1629                         goto out;
1630                 }
1631         }
1632
1633         /* And then force evictions */
1634         for (total = 0;
1635              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1636              total += 2 * I915_GTT_PAGE_SIZE) {
1637                 struct i915_vma *vma;
1638
1639                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1640                                                       2 * I915_GTT_PAGE_SIZE);
1641                 if (IS_ERR(obj)) {
1642                         err = PTR_ERR(obj);
1643                         goto out;
1644                 }
1645
1646                 err = i915_gem_object_pin_pages(obj);
1647                 if (err) {
1648                         i915_gem_object_put(obj);
1649                         goto out;
1650                 }
1651
1652                 list_add(&obj->st_link, &objects);
1653
1654                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1655                 if (IS_ERR(vma)) {
1656                         err = PTR_ERR(vma);
1657                         goto out;
1658                 }
1659
1660                 mutex_lock(&ggtt->vm.mutex);
1661                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1662                                           obj->base.size, 0, obj->cache_level,
1663                                           0, ggtt->vm.total,
1664                                           0);
1665                 mutex_unlock(&ggtt->vm.mutex);
1666                 if (err) {
1667                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1668                                total, ggtt->vm.total, err);
1669                         goto out;
1670                 }
1671                 track_vma_bind(vma);
1672
1673                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1674         }
1675
1676 out:
1677         list_for_each_entry_safe(obj, on, &objects, st_link) {
1678                 i915_gem_object_unpin_pages(obj);
1679                 i915_gem_object_put(obj);
1680         }
1681         return err;
1682 }
1683
1684 int i915_gem_gtt_mock_selftests(void)
1685 {
1686         static const struct i915_subtest tests[] = {
1687                 SUBTEST(igt_mock_drunk),
1688                 SUBTEST(igt_mock_walk),
1689                 SUBTEST(igt_mock_pot),
1690                 SUBTEST(igt_mock_fill),
1691                 SUBTEST(igt_gtt_reserve),
1692                 SUBTEST(igt_gtt_insert),
1693         };
1694         struct drm_i915_private *i915;
1695         struct i915_ggtt *ggtt;
1696         int err;
1697
1698         i915 = mock_gem_device();
1699         if (!i915)
1700                 return -ENOMEM;
1701
1702         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1703         if (!ggtt) {
1704                 err = -ENOMEM;
1705                 goto out_put;
1706         }
1707         mock_init_ggtt(i915, ggtt);
1708
1709         err = i915_subtests(tests, ggtt);
1710
1711         mock_device_flush(i915);
1712         i915_gem_drain_freed_objects(i915);
1713         mock_fini_ggtt(ggtt);
1714         kfree(ggtt);
1715 out_put:
1716         drm_dev_put(&i915->drm);
1717         return err;
1718 }
1719
1720 static int context_sync(struct intel_context *ce)
1721 {
1722         struct i915_request *rq;
1723         long timeout;
1724
1725         rq = intel_context_create_request(ce);
1726         if (IS_ERR(rq))
1727                 return PTR_ERR(rq);
1728
1729         i915_request_get(rq);
1730         i915_request_add(rq);
1731
1732         timeout = i915_request_wait(rq, 0, HZ / 5);
1733         i915_request_put(rq);
1734
1735         return timeout < 0 ? -EIO : 0;
1736 }
1737
1738 static struct i915_request *
1739 submit_batch(struct intel_context *ce, u64 addr)
1740 {
1741         struct i915_request *rq;
1742         int err;
1743
1744         rq = intel_context_create_request(ce);
1745         if (IS_ERR(rq))
1746                 return rq;
1747
1748         err = 0;
1749         if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1750                 err = rq->engine->emit_init_breadcrumb(rq);
1751         if (err == 0)
1752                 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1753
1754         if (err == 0)
1755                 i915_request_get(rq);
1756         i915_request_add(rq);
1757
1758         return err ? ERR_PTR(err) : rq;
1759 }
1760
1761 static u32 *spinner(u32 *batch, int i)
1762 {
1763         return batch + i * 64 / sizeof(*batch) + 4;
1764 }
1765
1766 static void end_spin(u32 *batch, int i)
1767 {
1768         *spinner(batch, i) = MI_BATCH_BUFFER_END;
1769         wmb();
1770 }
1771
1772 static int igt_cs_tlb(void *arg)
1773 {
1774         const unsigned int count = PAGE_SIZE / 64;
1775         const unsigned int chunk_size = count * PAGE_SIZE;
1776         struct drm_i915_private *i915 = arg;
1777         struct drm_i915_gem_object *bbe, *act, *out;
1778         struct i915_gem_engines_iter it;
1779         struct i915_address_space *vm;
1780         struct i915_gem_context *ctx;
1781         struct intel_context *ce;
1782         struct drm_file *file;
1783         struct i915_vma *vma;
1784         I915_RND_STATE(prng);
1785         unsigned int i;
1786         u32 *result;
1787         u32 *batch;
1788         int err = 0;
1789
1790         /*
1791          * Our mission here is to fool the hardware to execute something
1792          * from scratch as it has not seen the batch move (due to missing
1793          * the TLB invalidate).
1794          */
1795
1796         file = mock_file(i915);
1797         if (IS_ERR(file))
1798                 return PTR_ERR(file);
1799
1800         ctx = live_context(i915, file);
1801         if (IS_ERR(ctx)) {
1802                 err = PTR_ERR(ctx);
1803                 goto out_unlock;
1804         }
1805
1806         vm = i915_gem_context_get_vm_rcu(ctx);
1807         if (i915_is_ggtt(vm))
1808                 goto out_vm;
1809
1810         /* Create two pages; dummy we prefill the TLB, and intended */
1811         bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1812         if (IS_ERR(bbe)) {
1813                 err = PTR_ERR(bbe);
1814                 goto out_vm;
1815         }
1816
1817         batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1818         if (IS_ERR(batch)) {
1819                 err = PTR_ERR(batch);
1820                 goto out_put_bbe;
1821         }
1822         memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1823         i915_gem_object_flush_map(bbe);
1824         i915_gem_object_unpin_map(bbe);
1825
1826         act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1827         if (IS_ERR(act)) {
1828                 err = PTR_ERR(act);
1829                 goto out_put_bbe;
1830         }
1831
1832         /* Track the execution of each request by writing into different slot */
1833         batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1834         if (IS_ERR(batch)) {
1835                 err = PTR_ERR(batch);
1836                 goto out_put_act;
1837         }
1838         for (i = 0; i < count; i++) {
1839                 u32 *cs = batch + i * 64 / sizeof(*cs);
1840                 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1841
1842                 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1843                 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1844                 if (INTEL_GEN(i915) >= 8) {
1845                         cs[1] = lower_32_bits(addr);
1846                         cs[2] = upper_32_bits(addr);
1847                         cs[3] = i;
1848                         cs[4] = MI_NOOP;
1849                         cs[5] = MI_BATCH_BUFFER_START_GEN8;
1850                 } else {
1851                         cs[1] = 0;
1852                         cs[2] = lower_32_bits(addr);
1853                         cs[3] = i;
1854                         cs[4] = MI_NOOP;
1855                         cs[5] = MI_BATCH_BUFFER_START;
1856                 }
1857         }
1858
1859         out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1860         if (IS_ERR(out)) {
1861                 err = PTR_ERR(out);
1862                 goto out_put_batch;
1863         }
1864         i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1865
1866         vma = i915_vma_instance(out, vm, NULL);
1867         if (IS_ERR(vma)) {
1868                 err = PTR_ERR(vma);
1869                 goto out_put_batch;
1870         }
1871
1872         err = i915_vma_pin(vma, 0, 0,
1873                            PIN_USER |
1874                            PIN_OFFSET_FIXED |
1875                            (vm->total - PAGE_SIZE));
1876         if (err)
1877                 goto out_put_out;
1878         GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1879
1880         result = i915_gem_object_pin_map(out, I915_MAP_WB);
1881         if (IS_ERR(result)) {
1882                 err = PTR_ERR(result);
1883                 goto out_put_out;
1884         }
1885
1886         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1887                 IGT_TIMEOUT(end_time);
1888                 unsigned long pass = 0;
1889
1890                 if (!intel_engine_can_store_dword(ce->engine))
1891                         continue;
1892
1893                 while (!__igt_timeout(end_time, NULL)) {
1894                         struct i915_request *rq;
1895                         u64 offset;
1896
1897                         offset = igt_random_offset(&prng,
1898                                                    0, vm->total - PAGE_SIZE,
1899                                                    chunk_size, PAGE_SIZE);
1900
1901                         err = vm->allocate_va_range(vm, offset, chunk_size);
1902                         if (err)
1903                                 goto end;
1904
1905                         memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1906
1907                         vma = i915_vma_instance(bbe, vm, NULL);
1908                         if (IS_ERR(vma)) {
1909                                 err = PTR_ERR(vma);
1910                                 goto end;
1911                         }
1912
1913                         err = vma->ops->set_pages(vma);
1914                         if (err)
1915                                 goto end;
1916
1917                         /* Prime the TLB with the dummy pages */
1918                         for (i = 0; i < count; i++) {
1919                                 vma->node.start = offset + i * PAGE_SIZE;
1920                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1921
1922                                 rq = submit_batch(ce, vma->node.start);
1923                                 if (IS_ERR(rq)) {
1924                                         err = PTR_ERR(rq);
1925                                         goto end;
1926                                 }
1927                                 i915_request_put(rq);
1928                         }
1929
1930                         vma->ops->clear_pages(vma);
1931
1932                         err = context_sync(ce);
1933                         if (err) {
1934                                 pr_err("%s: dummy setup timed out\n",
1935                                        ce->engine->name);
1936                                 goto end;
1937                         }
1938
1939                         vma = i915_vma_instance(act, vm, NULL);
1940                         if (IS_ERR(vma)) {
1941                                 err = PTR_ERR(vma);
1942                                 goto end;
1943                         }
1944
1945                         err = vma->ops->set_pages(vma);
1946                         if (err)
1947                                 goto end;
1948
1949                         /* Replace the TLB with target batches */
1950                         for (i = 0; i < count; i++) {
1951                                 struct i915_request *rq;
1952                                 u32 *cs = batch + i * 64 / sizeof(*cs);
1953                                 u64 addr;
1954
1955                                 vma->node.start = offset + i * PAGE_SIZE;
1956                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1957
1958                                 addr = vma->node.start + i * 64;
1959                                 cs[4] = MI_NOOP;
1960                                 cs[6] = lower_32_bits(addr);
1961                                 cs[7] = upper_32_bits(addr);
1962                                 wmb();
1963
1964                                 rq = submit_batch(ce, addr);
1965                                 if (IS_ERR(rq)) {
1966                                         err = PTR_ERR(rq);
1967                                         goto end;
1968                                 }
1969
1970                                 /* Wait until the context chain has started */
1971                                 if (i == 0) {
1972                                         while (READ_ONCE(result[i]) &&
1973                                                !i915_request_completed(rq))
1974                                                 cond_resched();
1975                                 } else {
1976                                         end_spin(batch, i - 1);
1977                                 }
1978
1979                                 i915_request_put(rq);
1980                         }
1981                         end_spin(batch, count - 1);
1982
1983                         vma->ops->clear_pages(vma);
1984
1985                         err = context_sync(ce);
1986                         if (err) {
1987                                 pr_err("%s: writes timed out\n",
1988                                        ce->engine->name);
1989                                 goto end;
1990                         }
1991
1992                         for (i = 0; i < count; i++) {
1993                                 if (result[i] != i) {
1994                                         pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1995                                                ce->engine->name, pass,
1996                                                offset, i, result[i], i);
1997                                         err = -EINVAL;
1998                                         goto end;
1999                                 }
2000                         }
2001
2002                         vm->clear_range(vm, offset, chunk_size);
2003                         pass++;
2004                 }
2005         }
2006 end:
2007         if (igt_flush_test(i915))
2008                 err = -EIO;
2009         i915_gem_context_unlock_engines(ctx);
2010         i915_gem_object_unpin_map(out);
2011 out_put_out:
2012         i915_gem_object_put(out);
2013 out_put_batch:
2014         i915_gem_object_unpin_map(act);
2015 out_put_act:
2016         i915_gem_object_put(act);
2017 out_put_bbe:
2018         i915_gem_object_put(bbe);
2019 out_vm:
2020         i915_vm_put(vm);
2021 out_unlock:
2022         mock_file_free(i915, file);
2023         return err;
2024 }
2025
2026 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2027 {
2028         static const struct i915_subtest tests[] = {
2029                 SUBTEST(igt_ppgtt_alloc),
2030                 SUBTEST(igt_ppgtt_lowlevel),
2031                 SUBTEST(igt_ppgtt_drunk),
2032                 SUBTEST(igt_ppgtt_walk),
2033                 SUBTEST(igt_ppgtt_pot),
2034                 SUBTEST(igt_ppgtt_fill),
2035                 SUBTEST(igt_ppgtt_shrink),
2036                 SUBTEST(igt_ppgtt_shrink_boom),
2037                 SUBTEST(igt_ggtt_lowlevel),
2038                 SUBTEST(igt_ggtt_drunk),
2039                 SUBTEST(igt_ggtt_walk),
2040                 SUBTEST(igt_ggtt_pot),
2041                 SUBTEST(igt_ggtt_fill),
2042                 SUBTEST(igt_ggtt_page),
2043                 SUBTEST(igt_cs_tlb),
2044         };
2045
2046         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2047
2048         return i915_subtests(tests, i915);
2049 }