drm/i915/gtt: split up i915_gem_gtt
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31
32 #include "i915_random.h"
33 #include "i915_selftest.h"
34
35 #include "mock_drm.h"
36 #include "mock_gem_device.h"
37 #include "mock_gtt.h"
38 #include "igt_flush_test.h"
39
40 static void cleanup_freed_objects(struct drm_i915_private *i915)
41 {
42         i915_gem_drain_freed_objects(i915);
43 }
44
45 static void fake_free_pages(struct drm_i915_gem_object *obj,
46                             struct sg_table *pages)
47 {
48         sg_free_table(pages);
49         kfree(pages);
50 }
51
52 static int fake_get_pages(struct drm_i915_gem_object *obj)
53 {
54 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
55 #define PFN_BIAS 0x1000
56         struct sg_table *pages;
57         struct scatterlist *sg;
58         unsigned int sg_page_sizes;
59         typeof(obj->base.size) rem;
60
61         pages = kmalloc(sizeof(*pages), GFP);
62         if (!pages)
63                 return -ENOMEM;
64
65         rem = round_up(obj->base.size, BIT(31)) >> 31;
66         if (sg_alloc_table(pages, rem, GFP)) {
67                 kfree(pages);
68                 return -ENOMEM;
69         }
70
71         sg_page_sizes = 0;
72         rem = obj->base.size;
73         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
74                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
75
76                 GEM_BUG_ON(!len);
77                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
78                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
79                 sg_dma_len(sg) = len;
80                 sg_page_sizes |= len;
81
82                 rem -= len;
83         }
84         GEM_BUG_ON(rem);
85
86         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
87
88         return 0;
89 #undef GFP
90 }
91
92 static void fake_put_pages(struct drm_i915_gem_object *obj,
93                            struct sg_table *pages)
94 {
95         fake_free_pages(obj, pages);
96         obj->mm.dirty = false;
97 }
98
99 static const struct drm_i915_gem_object_ops fake_ops = {
100         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
101         .get_pages = fake_get_pages,
102         .put_pages = fake_put_pages,
103 };
104
105 static struct drm_i915_gem_object *
106 fake_dma_object(struct drm_i915_private *i915, u64 size)
107 {
108         static struct lock_class_key lock_class;
109         struct drm_i915_gem_object *obj;
110
111         GEM_BUG_ON(!size);
112         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
113
114         if (overflows_type(size, obj->base.size))
115                 return ERR_PTR(-E2BIG);
116
117         obj = i915_gem_object_alloc();
118         if (!obj)
119                 goto err;
120
121         drm_gem_private_object_init(&i915->drm, &obj->base, size);
122         i915_gem_object_init(obj, &fake_ops, &lock_class);
123
124         i915_gem_object_set_volatile(obj);
125
126         obj->write_domain = I915_GEM_DOMAIN_CPU;
127         obj->read_domains = I915_GEM_DOMAIN_CPU;
128         obj->cache_level = I915_CACHE_NONE;
129
130         /* Preallocate the "backing storage" */
131         if (i915_gem_object_pin_pages(obj))
132                 goto err_obj;
133
134         i915_gem_object_unpin_pages(obj);
135         return obj;
136
137 err_obj:
138         i915_gem_object_put(obj);
139 err:
140         return ERR_PTR(-ENOMEM);
141 }
142
143 static int igt_ppgtt_alloc(void *arg)
144 {
145         struct drm_i915_private *dev_priv = arg;
146         struct i915_ppgtt *ppgtt;
147         u64 size, last, limit;
148         int err = 0;
149
150         /* Allocate a ppggt and try to fill the entire range */
151
152         if (!HAS_PPGTT(dev_priv))
153                 return 0;
154
155         ppgtt = i915_ppgtt_create(&dev_priv->gt);
156         if (IS_ERR(ppgtt))
157                 return PTR_ERR(ppgtt);
158
159         if (!ppgtt->vm.allocate_va_range)
160                 goto err_ppgtt_cleanup;
161
162         /*
163          * While we only allocate the page tables here and so we could
164          * address a much larger GTT than we could actually fit into
165          * RAM, a practical limit is the amount of physical pages in the system.
166          * This should ensure that we do not run into the oomkiller during
167          * the test and take down the machine wilfully.
168          */
169         limit = totalram_pages() << PAGE_SHIFT;
170         limit = min(ppgtt->vm.total, limit);
171
172         /* Check we can allocate the entire range */
173         for (size = 4096; size <= limit; size <<= 2) {
174                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
175                 if (err) {
176                         if (err == -ENOMEM) {
177                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
178                                         size, ilog2(size));
179                                 err = 0; /* virtual space too large! */
180                         }
181                         goto err_ppgtt_cleanup;
182                 }
183
184                 cond_resched();
185
186                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
187         }
188
189         /* Check we can incrementally allocate the entire range */
190         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
191                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
192                                                   last, size - last);
193                 if (err) {
194                         if (err == -ENOMEM) {
195                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
196                                         last, size - last, ilog2(size));
197                                 err = 0; /* virtual space too large! */
198                         }
199                         goto err_ppgtt_cleanup;
200                 }
201
202                 cond_resched();
203         }
204
205 err_ppgtt_cleanup:
206         i915_vm_put(&ppgtt->vm);
207         return err;
208 }
209
210 static int lowlevel_hole(struct i915_address_space *vm,
211                          u64 hole_start, u64 hole_end,
212                          unsigned long end_time)
213 {
214         I915_RND_STATE(seed_prng);
215         struct i915_vma *mock_vma;
216         unsigned int size;
217
218         mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
219         if (!mock_vma)
220                 return -ENOMEM;
221
222         /* Keep creating larger objects until one cannot fit into the hole */
223         for (size = 12; (hole_end - hole_start) >> size; size++) {
224                 I915_RND_SUBSTATE(prng, seed_prng);
225                 struct drm_i915_gem_object *obj;
226                 unsigned int *order, count, n;
227                 u64 hole_size;
228
229                 hole_size = (hole_end - hole_start) >> size;
230                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
231                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
232                 count = hole_size >> 1;
233                 if (!count) {
234                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
235                                  __func__, hole_start, hole_end, size, hole_size);
236                         break;
237                 }
238
239                 do {
240                         order = i915_random_order(count, &prng);
241                         if (order)
242                                 break;
243                 } while (count >>= 1);
244                 if (!count) {
245                         kfree(mock_vma);
246                         return -ENOMEM;
247                 }
248                 GEM_BUG_ON(!order);
249
250                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
251                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
252
253                 /* Ignore allocation failures (i.e. don't report them as
254                  * a test failure) as we are purposefully allocating very
255                  * large objects without checking that we have sufficient
256                  * memory. We expect to hit -ENOMEM.
257                  */
258
259                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
260                 if (IS_ERR(obj)) {
261                         kfree(order);
262                         break;
263                 }
264
265                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
266
267                 if (i915_gem_object_pin_pages(obj)) {
268                         i915_gem_object_put(obj);
269                         kfree(order);
270                         break;
271                 }
272
273                 for (n = 0; n < count; n++) {
274                         u64 addr = hole_start + order[n] * BIT_ULL(size);
275                         intel_wakeref_t wakeref;
276
277                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
278
279                         if (igt_timeout(end_time,
280                                         "%s timed out before %d/%d\n",
281                                         __func__, n, count)) {
282                                 hole_end = hole_start; /* quit */
283                                 break;
284                         }
285
286                         if (vm->allocate_va_range &&
287                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
288                                 break;
289
290                         mock_vma->pages = obj->mm.pages;
291                         mock_vma->node.size = BIT_ULL(size);
292                         mock_vma->node.start = addr;
293
294                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
295                                 vm->insert_entries(vm, mock_vma,
296                                                    I915_CACHE_NONE, 0);
297                 }
298                 count = n;
299
300                 i915_random_reorder(order, count, &prng);
301                 for (n = 0; n < count; n++) {
302                         u64 addr = hole_start + order[n] * BIT_ULL(size);
303                         intel_wakeref_t wakeref;
304
305                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
306                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
307                                 vm->clear_range(vm, addr, BIT_ULL(size));
308                 }
309
310                 i915_gem_object_unpin_pages(obj);
311                 i915_gem_object_put(obj);
312
313                 kfree(order);
314
315                 cleanup_freed_objects(vm->i915);
316         }
317
318         kfree(mock_vma);
319         return 0;
320 }
321
322 static void close_object_list(struct list_head *objects,
323                               struct i915_address_space *vm)
324 {
325         struct drm_i915_gem_object *obj, *on;
326         int ignored;
327
328         list_for_each_entry_safe(obj, on, objects, st_link) {
329                 struct i915_vma *vma;
330
331                 vma = i915_vma_instance(obj, vm, NULL);
332                 if (!IS_ERR(vma))
333                         ignored = i915_vma_unbind(vma);
334                 /* Only ppgtt vma may be closed before the object is freed */
335                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
336                         i915_vma_close(vma);
337
338                 list_del(&obj->st_link);
339                 i915_gem_object_put(obj);
340         }
341 }
342
343 static int fill_hole(struct i915_address_space *vm,
344                      u64 hole_start, u64 hole_end,
345                      unsigned long end_time)
346 {
347         const u64 hole_size = hole_end - hole_start;
348         struct drm_i915_gem_object *obj;
349         const unsigned long max_pages =
350                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
351         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
352         unsigned long npages, prime, flags;
353         struct i915_vma *vma;
354         LIST_HEAD(objects);
355         int err;
356
357         /* Try binding many VMA working inwards from either edge */
358
359         flags = PIN_OFFSET_FIXED | PIN_USER;
360         if (i915_is_ggtt(vm))
361                 flags |= PIN_GLOBAL;
362
363         for_each_prime_number_from(prime, 2, max_step) {
364                 for (npages = 1; npages <= max_pages; npages *= prime) {
365                         const u64 full_size = npages << PAGE_SHIFT;
366                         const struct {
367                                 const char *name;
368                                 u64 offset;
369                                 int step;
370                         } phases[] = {
371                                 { "top-down", hole_end, -1, },
372                                 { "bottom-up", hole_start, 1, },
373                                 { }
374                         }, *p;
375
376                         obj = fake_dma_object(vm->i915, full_size);
377                         if (IS_ERR(obj))
378                                 break;
379
380                         list_add(&obj->st_link, &objects);
381
382                         /* Align differing sized objects against the edges, and
383                          * check we don't walk off into the void when binding
384                          * them into the GTT.
385                          */
386                         for (p = phases; p->name; p++) {
387                                 u64 offset;
388
389                                 offset = p->offset;
390                                 list_for_each_entry(obj, &objects, st_link) {
391                                         vma = i915_vma_instance(obj, vm, NULL);
392                                         if (IS_ERR(vma))
393                                                 continue;
394
395                                         if (p->step < 0) {
396                                                 if (offset < hole_start + obj->base.size)
397                                                         break;
398                                                 offset -= obj->base.size;
399                                         }
400
401                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
402                                         if (err) {
403                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
404                                                        __func__, p->name, err, npages, prime, offset);
405                                                 goto err;
406                                         }
407
408                                         if (!drm_mm_node_allocated(&vma->node) ||
409                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
410                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
411                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
412                                                        offset);
413                                                 err = -EINVAL;
414                                                 goto err;
415                                         }
416
417                                         i915_vma_unpin(vma);
418
419                                         if (p->step > 0) {
420                                                 if (offset + obj->base.size > hole_end)
421                                                         break;
422                                                 offset += obj->base.size;
423                                         }
424                                 }
425
426                                 offset = p->offset;
427                                 list_for_each_entry(obj, &objects, st_link) {
428                                         vma = i915_vma_instance(obj, vm, NULL);
429                                         if (IS_ERR(vma))
430                                                 continue;
431
432                                         if (p->step < 0) {
433                                                 if (offset < hole_start + obj->base.size)
434                                                         break;
435                                                 offset -= obj->base.size;
436                                         }
437
438                                         if (!drm_mm_node_allocated(&vma->node) ||
439                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
440                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
441                                                        __func__, p->name, vma->node.start, vma->node.size,
442                                                        offset);
443                                                 err = -EINVAL;
444                                                 goto err;
445                                         }
446
447                                         err = i915_vma_unbind(vma);
448                                         if (err) {
449                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
450                                                        __func__, p->name, vma->node.start, vma->node.size,
451                                                        err);
452                                                 goto err;
453                                         }
454
455                                         if (p->step > 0) {
456                                                 if (offset + obj->base.size > hole_end)
457                                                         break;
458                                                 offset += obj->base.size;
459                                         }
460                                 }
461
462                                 offset = p->offset;
463                                 list_for_each_entry_reverse(obj, &objects, st_link) {
464                                         vma = i915_vma_instance(obj, vm, NULL);
465                                         if (IS_ERR(vma))
466                                                 continue;
467
468                                         if (p->step < 0) {
469                                                 if (offset < hole_start + obj->base.size)
470                                                         break;
471                                                 offset -= obj->base.size;
472                                         }
473
474                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
475                                         if (err) {
476                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
477                                                        __func__, p->name, err, npages, prime, offset);
478                                                 goto err;
479                                         }
480
481                                         if (!drm_mm_node_allocated(&vma->node) ||
482                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
483                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
484                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
485                                                        offset);
486                                                 err = -EINVAL;
487                                                 goto err;
488                                         }
489
490                                         i915_vma_unpin(vma);
491
492                                         if (p->step > 0) {
493                                                 if (offset + obj->base.size > hole_end)
494                                                         break;
495                                                 offset += obj->base.size;
496                                         }
497                                 }
498
499                                 offset = p->offset;
500                                 list_for_each_entry_reverse(obj, &objects, st_link) {
501                                         vma = i915_vma_instance(obj, vm, NULL);
502                                         if (IS_ERR(vma))
503                                                 continue;
504
505                                         if (p->step < 0) {
506                                                 if (offset < hole_start + obj->base.size)
507                                                         break;
508                                                 offset -= obj->base.size;
509                                         }
510
511                                         if (!drm_mm_node_allocated(&vma->node) ||
512                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
513                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
514                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
515                                                        offset);
516                                                 err = -EINVAL;
517                                                 goto err;
518                                         }
519
520                                         err = i915_vma_unbind(vma);
521                                         if (err) {
522                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
523                                                        __func__, p->name, vma->node.start, vma->node.size,
524                                                        err);
525                                                 goto err;
526                                         }
527
528                                         if (p->step > 0) {
529                                                 if (offset + obj->base.size > hole_end)
530                                                         break;
531                                                 offset += obj->base.size;
532                                         }
533                                 }
534                         }
535
536                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
537                                         __func__, npages, prime)) {
538                                 err = -EINTR;
539                                 goto err;
540                         }
541                 }
542
543                 close_object_list(&objects, vm);
544                 cleanup_freed_objects(vm->i915);
545         }
546
547         return 0;
548
549 err:
550         close_object_list(&objects, vm);
551         return err;
552 }
553
554 static int walk_hole(struct i915_address_space *vm,
555                      u64 hole_start, u64 hole_end,
556                      unsigned long end_time)
557 {
558         const u64 hole_size = hole_end - hole_start;
559         const unsigned long max_pages =
560                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
561         unsigned long flags;
562         u64 size;
563
564         /* Try binding a single VMA in different positions within the hole */
565
566         flags = PIN_OFFSET_FIXED | PIN_USER;
567         if (i915_is_ggtt(vm))
568                 flags |= PIN_GLOBAL;
569
570         for_each_prime_number_from(size, 1, max_pages) {
571                 struct drm_i915_gem_object *obj;
572                 struct i915_vma *vma;
573                 u64 addr;
574                 int err = 0;
575
576                 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
577                 if (IS_ERR(obj))
578                         break;
579
580                 vma = i915_vma_instance(obj, vm, NULL);
581                 if (IS_ERR(vma)) {
582                         err = PTR_ERR(vma);
583                         goto err_put;
584                 }
585
586                 for (addr = hole_start;
587                      addr + obj->base.size < hole_end;
588                      addr += obj->base.size) {
589                         err = i915_vma_pin(vma, 0, 0, addr | flags);
590                         if (err) {
591                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
592                                        __func__, addr, vma->size,
593                                        hole_start, hole_end, err);
594                                 goto err_close;
595                         }
596                         i915_vma_unpin(vma);
597
598                         if (!drm_mm_node_allocated(&vma->node) ||
599                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
600                                 pr_err("%s incorrect at %llx + %llx\n",
601                                        __func__, addr, vma->size);
602                                 err = -EINVAL;
603                                 goto err_close;
604                         }
605
606                         err = i915_vma_unbind(vma);
607                         if (err) {
608                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
609                                        __func__, addr, vma->size, err);
610                                 goto err_close;
611                         }
612
613                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
614
615                         if (igt_timeout(end_time,
616                                         "%s timed out at %llx\n",
617                                         __func__, addr)) {
618                                 err = -EINTR;
619                                 goto err_close;
620                         }
621                 }
622
623 err_close:
624                 if (!i915_vma_is_ggtt(vma))
625                         i915_vma_close(vma);
626 err_put:
627                 i915_gem_object_put(obj);
628                 if (err)
629                         return err;
630
631                 cleanup_freed_objects(vm->i915);
632         }
633
634         return 0;
635 }
636
637 static int pot_hole(struct i915_address_space *vm,
638                     u64 hole_start, u64 hole_end,
639                     unsigned long end_time)
640 {
641         struct drm_i915_gem_object *obj;
642         struct i915_vma *vma;
643         unsigned long flags;
644         unsigned int pot;
645         int err = 0;
646
647         flags = PIN_OFFSET_FIXED | PIN_USER;
648         if (i915_is_ggtt(vm))
649                 flags |= PIN_GLOBAL;
650
651         obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
652         if (IS_ERR(obj))
653                 return PTR_ERR(obj);
654
655         vma = i915_vma_instance(obj, vm, NULL);
656         if (IS_ERR(vma)) {
657                 err = PTR_ERR(vma);
658                 goto err_obj;
659         }
660
661         /* Insert a pair of pages across every pot boundary within the hole */
662         for (pot = fls64(hole_end - 1) - 1;
663              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
664              pot--) {
665                 u64 step = BIT_ULL(pot);
666                 u64 addr;
667
668                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
669                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
670                      addr += step) {
671                         err = i915_vma_pin(vma, 0, 0, addr | flags);
672                         if (err) {
673                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
674                                        __func__,
675                                        addr,
676                                        hole_start, hole_end,
677                                        err);
678                                 goto err;
679                         }
680
681                         if (!drm_mm_node_allocated(&vma->node) ||
682                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
683                                 pr_err("%s incorrect at %llx + %llx\n",
684                                        __func__, addr, vma->size);
685                                 i915_vma_unpin(vma);
686                                 err = i915_vma_unbind(vma);
687                                 err = -EINVAL;
688                                 goto err;
689                         }
690
691                         i915_vma_unpin(vma);
692                         err = i915_vma_unbind(vma);
693                         GEM_BUG_ON(err);
694                 }
695
696                 if (igt_timeout(end_time,
697                                 "%s timed out after %d/%d\n",
698                                 __func__, pot, fls64(hole_end - 1) - 1)) {
699                         err = -EINTR;
700                         goto err;
701                 }
702         }
703
704 err:
705         if (!i915_vma_is_ggtt(vma))
706                 i915_vma_close(vma);
707 err_obj:
708         i915_gem_object_put(obj);
709         return err;
710 }
711
712 static int drunk_hole(struct i915_address_space *vm,
713                       u64 hole_start, u64 hole_end,
714                       unsigned long end_time)
715 {
716         I915_RND_STATE(prng);
717         unsigned int size;
718         unsigned long flags;
719
720         flags = PIN_OFFSET_FIXED | PIN_USER;
721         if (i915_is_ggtt(vm))
722                 flags |= PIN_GLOBAL;
723
724         /* Keep creating larger objects until one cannot fit into the hole */
725         for (size = 12; (hole_end - hole_start) >> size; size++) {
726                 struct drm_i915_gem_object *obj;
727                 unsigned int *order, count, n;
728                 struct i915_vma *vma;
729                 u64 hole_size;
730                 int err = -ENODEV;
731
732                 hole_size = (hole_end - hole_start) >> size;
733                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
734                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
735                 count = hole_size >> 1;
736                 if (!count) {
737                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
738                                  __func__, hole_start, hole_end, size, hole_size);
739                         break;
740                 }
741
742                 do {
743                         order = i915_random_order(count, &prng);
744                         if (order)
745                                 break;
746                 } while (count >>= 1);
747                 if (!count)
748                         return -ENOMEM;
749                 GEM_BUG_ON(!order);
750
751                 /* Ignore allocation failures (i.e. don't report them as
752                  * a test failure) as we are purposefully allocating very
753                  * large objects without checking that we have sufficient
754                  * memory. We expect to hit -ENOMEM.
755                  */
756
757                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
758                 if (IS_ERR(obj)) {
759                         kfree(order);
760                         break;
761                 }
762
763                 vma = i915_vma_instance(obj, vm, NULL);
764                 if (IS_ERR(vma)) {
765                         err = PTR_ERR(vma);
766                         goto err_obj;
767                 }
768
769                 GEM_BUG_ON(vma->size != BIT_ULL(size));
770
771                 for (n = 0; n < count; n++) {
772                         u64 addr = hole_start + order[n] * BIT_ULL(size);
773
774                         err = i915_vma_pin(vma, 0, 0, addr | flags);
775                         if (err) {
776                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
777                                        __func__,
778                                        addr, BIT_ULL(size),
779                                        hole_start, hole_end,
780                                        err);
781                                 goto err;
782                         }
783
784                         if (!drm_mm_node_allocated(&vma->node) ||
785                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
786                                 pr_err("%s incorrect at %llx + %llx\n",
787                                        __func__, addr, BIT_ULL(size));
788                                 i915_vma_unpin(vma);
789                                 err = i915_vma_unbind(vma);
790                                 err = -EINVAL;
791                                 goto err;
792                         }
793
794                         i915_vma_unpin(vma);
795                         err = i915_vma_unbind(vma);
796                         GEM_BUG_ON(err);
797
798                         if (igt_timeout(end_time,
799                                         "%s timed out after %d/%d\n",
800                                         __func__, n, count)) {
801                                 err = -EINTR;
802                                 goto err;
803                         }
804                 }
805
806 err:
807                 if (!i915_vma_is_ggtt(vma))
808                         i915_vma_close(vma);
809 err_obj:
810                 i915_gem_object_put(obj);
811                 kfree(order);
812                 if (err)
813                         return err;
814
815                 cleanup_freed_objects(vm->i915);
816         }
817
818         return 0;
819 }
820
821 static int __shrink_hole(struct i915_address_space *vm,
822                          u64 hole_start, u64 hole_end,
823                          unsigned long end_time)
824 {
825         struct drm_i915_gem_object *obj;
826         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
827         unsigned int order = 12;
828         LIST_HEAD(objects);
829         int err = 0;
830         u64 addr;
831
832         /* Keep creating larger objects until one cannot fit into the hole */
833         for (addr = hole_start; addr < hole_end; ) {
834                 struct i915_vma *vma;
835                 u64 size = BIT_ULL(order++);
836
837                 size = min(size, hole_end - addr);
838                 obj = fake_dma_object(vm->i915, size);
839                 if (IS_ERR(obj)) {
840                         err = PTR_ERR(obj);
841                         break;
842                 }
843
844                 list_add(&obj->st_link, &objects);
845
846                 vma = i915_vma_instance(obj, vm, NULL);
847                 if (IS_ERR(vma)) {
848                         err = PTR_ERR(vma);
849                         break;
850                 }
851
852                 GEM_BUG_ON(vma->size != size);
853
854                 err = i915_vma_pin(vma, 0, 0, addr | flags);
855                 if (err) {
856                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
857                                __func__, addr, size, hole_start, hole_end, err);
858                         break;
859                 }
860
861                 if (!drm_mm_node_allocated(&vma->node) ||
862                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
863                         pr_err("%s incorrect at %llx + %llx\n",
864                                __func__, addr, size);
865                         i915_vma_unpin(vma);
866                         err = i915_vma_unbind(vma);
867                         err = -EINVAL;
868                         break;
869                 }
870
871                 i915_vma_unpin(vma);
872                 addr += size;
873
874                 /*
875                  * Since we are injecting allocation faults at random intervals,
876                  * wait for this allocation to complete before we change the
877                  * faultinjection.
878                  */
879                 err = i915_vma_sync(vma);
880                 if (err)
881                         break;
882
883                 if (igt_timeout(end_time,
884                                 "%s timed out at ofset %llx [%llx - %llx]\n",
885                                 __func__, addr, hole_start, hole_end)) {
886                         err = -EINTR;
887                         break;
888                 }
889         }
890
891         close_object_list(&objects, vm);
892         cleanup_freed_objects(vm->i915);
893         return err;
894 }
895
896 static int shrink_hole(struct i915_address_space *vm,
897                        u64 hole_start, u64 hole_end,
898                        unsigned long end_time)
899 {
900         unsigned long prime;
901         int err;
902
903         vm->fault_attr.probability = 999;
904         atomic_set(&vm->fault_attr.times, -1);
905
906         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
907                 vm->fault_attr.interval = prime;
908                 err = __shrink_hole(vm, hole_start, hole_end, end_time);
909                 if (err)
910                         break;
911         }
912
913         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
914
915         return err;
916 }
917
918 static int shrink_boom(struct i915_address_space *vm,
919                        u64 hole_start, u64 hole_end,
920                        unsigned long end_time)
921 {
922         unsigned int sizes[] = { SZ_2M, SZ_1G };
923         struct drm_i915_gem_object *purge;
924         struct drm_i915_gem_object *explode;
925         int err;
926         int i;
927
928         /*
929          * Catch the case which shrink_hole seems to miss. The setup here
930          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
931          * ensuring that all vma assiocated with the respective pd/pdp are
932          * unpinned at the time.
933          */
934
935         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
936                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
937                 unsigned int size = sizes[i];
938                 struct i915_vma *vma;
939
940                 purge = fake_dma_object(vm->i915, size);
941                 if (IS_ERR(purge))
942                         return PTR_ERR(purge);
943
944                 vma = i915_vma_instance(purge, vm, NULL);
945                 if (IS_ERR(vma)) {
946                         err = PTR_ERR(vma);
947                         goto err_purge;
948                 }
949
950                 err = i915_vma_pin(vma, 0, 0, flags);
951                 if (err)
952                         goto err_purge;
953
954                 /* Should now be ripe for purging */
955                 i915_vma_unpin(vma);
956
957                 explode = fake_dma_object(vm->i915, size);
958                 if (IS_ERR(explode)) {
959                         err = PTR_ERR(explode);
960                         goto err_purge;
961                 }
962
963                 vm->fault_attr.probability = 100;
964                 vm->fault_attr.interval = 1;
965                 atomic_set(&vm->fault_attr.times, -1);
966
967                 vma = i915_vma_instance(explode, vm, NULL);
968                 if (IS_ERR(vma)) {
969                         err = PTR_ERR(vma);
970                         goto err_explode;
971                 }
972
973                 err = i915_vma_pin(vma, 0, 0, flags | size);
974                 if (err)
975                         goto err_explode;
976
977                 i915_vma_unpin(vma);
978
979                 i915_gem_object_put(purge);
980                 i915_gem_object_put(explode);
981
982                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
983                 cleanup_freed_objects(vm->i915);
984         }
985
986         return 0;
987
988 err_explode:
989         i915_gem_object_put(explode);
990 err_purge:
991         i915_gem_object_put(purge);
992         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
993         return err;
994 }
995
996 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
997                           int (*func)(struct i915_address_space *vm,
998                                       u64 hole_start, u64 hole_end,
999                                       unsigned long end_time))
1000 {
1001         struct i915_ppgtt *ppgtt;
1002         IGT_TIMEOUT(end_time);
1003         struct file *file;
1004         int err;
1005
1006         if (!HAS_FULL_PPGTT(dev_priv))
1007                 return 0;
1008
1009         file = mock_file(dev_priv);
1010         if (IS_ERR(file))
1011                 return PTR_ERR(file);
1012
1013         ppgtt = i915_ppgtt_create(&dev_priv->gt);
1014         if (IS_ERR(ppgtt)) {
1015                 err = PTR_ERR(ppgtt);
1016                 goto out_free;
1017         }
1018         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1019         GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1020
1021         err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1022
1023         i915_vm_put(&ppgtt->vm);
1024
1025 out_free:
1026         fput(file);
1027         return err;
1028 }
1029
1030 static int igt_ppgtt_fill(void *arg)
1031 {
1032         return exercise_ppgtt(arg, fill_hole);
1033 }
1034
1035 static int igt_ppgtt_walk(void *arg)
1036 {
1037         return exercise_ppgtt(arg, walk_hole);
1038 }
1039
1040 static int igt_ppgtt_pot(void *arg)
1041 {
1042         return exercise_ppgtt(arg, pot_hole);
1043 }
1044
1045 static int igt_ppgtt_drunk(void *arg)
1046 {
1047         return exercise_ppgtt(arg, drunk_hole);
1048 }
1049
1050 static int igt_ppgtt_lowlevel(void *arg)
1051 {
1052         return exercise_ppgtt(arg, lowlevel_hole);
1053 }
1054
1055 static int igt_ppgtt_shrink(void *arg)
1056 {
1057         return exercise_ppgtt(arg, shrink_hole);
1058 }
1059
1060 static int igt_ppgtt_shrink_boom(void *arg)
1061 {
1062         return exercise_ppgtt(arg, shrink_boom);
1063 }
1064
1065 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1066 {
1067         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1068         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1069
1070         if (a->start < b->start)
1071                 return -1;
1072         else
1073                 return 1;
1074 }
1075
1076 static int exercise_ggtt(struct drm_i915_private *i915,
1077                          int (*func)(struct i915_address_space *vm,
1078                                      u64 hole_start, u64 hole_end,
1079                                      unsigned long end_time))
1080 {
1081         struct i915_ggtt *ggtt = &i915->ggtt;
1082         u64 hole_start, hole_end, last = 0;
1083         struct drm_mm_node *node;
1084         IGT_TIMEOUT(end_time);
1085         int err = 0;
1086
1087 restart:
1088         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1089         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1090                 if (hole_start < last)
1091                         continue;
1092
1093                 if (ggtt->vm.mm.color_adjust)
1094                         ggtt->vm.mm.color_adjust(node, 0,
1095                                                  &hole_start, &hole_end);
1096                 if (hole_start >= hole_end)
1097                         continue;
1098
1099                 err = func(&ggtt->vm, hole_start, hole_end, end_time);
1100                 if (err)
1101                         break;
1102
1103                 /* As we have manipulated the drm_mm, the list may be corrupt */
1104                 last = hole_end;
1105                 goto restart;
1106         }
1107
1108         return err;
1109 }
1110
1111 static int igt_ggtt_fill(void *arg)
1112 {
1113         return exercise_ggtt(arg, fill_hole);
1114 }
1115
1116 static int igt_ggtt_walk(void *arg)
1117 {
1118         return exercise_ggtt(arg, walk_hole);
1119 }
1120
1121 static int igt_ggtt_pot(void *arg)
1122 {
1123         return exercise_ggtt(arg, pot_hole);
1124 }
1125
1126 static int igt_ggtt_drunk(void *arg)
1127 {
1128         return exercise_ggtt(arg, drunk_hole);
1129 }
1130
1131 static int igt_ggtt_lowlevel(void *arg)
1132 {
1133         return exercise_ggtt(arg, lowlevel_hole);
1134 }
1135
1136 static int igt_ggtt_page(void *arg)
1137 {
1138         const unsigned int count = PAGE_SIZE/sizeof(u32);
1139         I915_RND_STATE(prng);
1140         struct drm_i915_private *i915 = arg;
1141         struct i915_ggtt *ggtt = &i915->ggtt;
1142         struct drm_i915_gem_object *obj;
1143         intel_wakeref_t wakeref;
1144         struct drm_mm_node tmp;
1145         unsigned int *order, n;
1146         int err;
1147
1148         if (!i915_ggtt_has_aperture(ggtt))
1149                 return 0;
1150
1151         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1152         if (IS_ERR(obj))
1153                 return PTR_ERR(obj);
1154
1155         err = i915_gem_object_pin_pages(obj);
1156         if (err)
1157                 goto out_free;
1158
1159         memset(&tmp, 0, sizeof(tmp));
1160         mutex_lock(&ggtt->vm.mutex);
1161         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1162                                           count * PAGE_SIZE, 0,
1163                                           I915_COLOR_UNEVICTABLE,
1164                                           0, ggtt->mappable_end,
1165                                           DRM_MM_INSERT_LOW);
1166         mutex_unlock(&ggtt->vm.mutex);
1167         if (err)
1168                 goto out_unpin;
1169
1170         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1171
1172         for (n = 0; n < count; n++) {
1173                 u64 offset = tmp.start + n * PAGE_SIZE;
1174
1175                 ggtt->vm.insert_page(&ggtt->vm,
1176                                      i915_gem_object_get_dma_address(obj, 0),
1177                                      offset, I915_CACHE_NONE, 0);
1178         }
1179
1180         order = i915_random_order(count, &prng);
1181         if (!order) {
1182                 err = -ENOMEM;
1183                 goto out_remove;
1184         }
1185
1186         for (n = 0; n < count; n++) {
1187                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1188                 u32 __iomem *vaddr;
1189
1190                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1191                 iowrite32(n, vaddr + n);
1192                 io_mapping_unmap_atomic(vaddr);
1193         }
1194         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1195
1196         i915_random_reorder(order, count, &prng);
1197         for (n = 0; n < count; n++) {
1198                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1199                 u32 __iomem *vaddr;
1200                 u32 val;
1201
1202                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1203                 val = ioread32(vaddr + n);
1204                 io_mapping_unmap_atomic(vaddr);
1205
1206                 if (val != n) {
1207                         pr_err("insert page failed: found %d, expected %d\n",
1208                                val, n);
1209                         err = -EINVAL;
1210                         break;
1211                 }
1212         }
1213
1214         kfree(order);
1215 out_remove:
1216         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1217         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1218         mutex_lock(&ggtt->vm.mutex);
1219         drm_mm_remove_node(&tmp);
1220         mutex_unlock(&ggtt->vm.mutex);
1221 out_unpin:
1222         i915_gem_object_unpin_pages(obj);
1223 out_free:
1224         i915_gem_object_put(obj);
1225         return err;
1226 }
1227
1228 static void track_vma_bind(struct i915_vma *vma)
1229 {
1230         struct drm_i915_gem_object *obj = vma->obj;
1231
1232         atomic_inc(&obj->bind_count); /* track for eviction later */
1233         __i915_gem_object_pin_pages(obj);
1234
1235         GEM_BUG_ON(vma->pages);
1236         atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1237         __i915_gem_object_pin_pages(obj);
1238         vma->pages = obj->mm.pages;
1239
1240         mutex_lock(&vma->vm->mutex);
1241         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1242         mutex_unlock(&vma->vm->mutex);
1243 }
1244
1245 static int exercise_mock(struct drm_i915_private *i915,
1246                          int (*func)(struct i915_address_space *vm,
1247                                      u64 hole_start, u64 hole_end,
1248                                      unsigned long end_time))
1249 {
1250         const u64 limit = totalram_pages() << PAGE_SHIFT;
1251         struct i915_address_space *vm;
1252         struct i915_gem_context *ctx;
1253         IGT_TIMEOUT(end_time);
1254         int err;
1255
1256         ctx = mock_context(i915, "mock");
1257         if (!ctx)
1258                 return -ENOMEM;
1259
1260         vm = i915_gem_context_get_vm_rcu(ctx);
1261         err = func(vm, 0, min(vm->total, limit), end_time);
1262         i915_vm_put(vm);
1263
1264         mock_context_close(ctx);
1265         return err;
1266 }
1267
1268 static int igt_mock_fill(void *arg)
1269 {
1270         struct i915_ggtt *ggtt = arg;
1271
1272         return exercise_mock(ggtt->vm.i915, fill_hole);
1273 }
1274
1275 static int igt_mock_walk(void *arg)
1276 {
1277         struct i915_ggtt *ggtt = arg;
1278
1279         return exercise_mock(ggtt->vm.i915, walk_hole);
1280 }
1281
1282 static int igt_mock_pot(void *arg)
1283 {
1284         struct i915_ggtt *ggtt = arg;
1285
1286         return exercise_mock(ggtt->vm.i915, pot_hole);
1287 }
1288
1289 static int igt_mock_drunk(void *arg)
1290 {
1291         struct i915_ggtt *ggtt = arg;
1292
1293         return exercise_mock(ggtt->vm.i915, drunk_hole);
1294 }
1295
1296 static int igt_gtt_reserve(void *arg)
1297 {
1298         struct i915_ggtt *ggtt = arg;
1299         struct drm_i915_gem_object *obj, *on;
1300         I915_RND_STATE(prng);
1301         LIST_HEAD(objects);
1302         u64 total;
1303         int err = -ENODEV;
1304
1305         /* i915_gem_gtt_reserve() tries to reserve the precise range
1306          * for the node, and evicts if it has to. So our test checks that
1307          * it can give us the requsted space and prevent overlaps.
1308          */
1309
1310         /* Start by filling the GGTT */
1311         for (total = 0;
1312              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1313              total += 2 * I915_GTT_PAGE_SIZE) {
1314                 struct i915_vma *vma;
1315
1316                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1317                                                       2 * PAGE_SIZE);
1318                 if (IS_ERR(obj)) {
1319                         err = PTR_ERR(obj);
1320                         goto out;
1321                 }
1322
1323                 err = i915_gem_object_pin_pages(obj);
1324                 if (err) {
1325                         i915_gem_object_put(obj);
1326                         goto out;
1327                 }
1328
1329                 list_add(&obj->st_link, &objects);
1330
1331                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1332                 if (IS_ERR(vma)) {
1333                         err = PTR_ERR(vma);
1334                         goto out;
1335                 }
1336
1337                 mutex_lock(&ggtt->vm.mutex);
1338                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1339                                            obj->base.size,
1340                                            total,
1341                                            obj->cache_level,
1342                                            0);
1343                 mutex_unlock(&ggtt->vm.mutex);
1344                 if (err) {
1345                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1346                                total, ggtt->vm.total, err);
1347                         goto out;
1348                 }
1349                 track_vma_bind(vma);
1350
1351                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1352                 if (vma->node.start != total ||
1353                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1354                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1355                                vma->node.start, vma->node.size,
1356                                total, 2*I915_GTT_PAGE_SIZE);
1357                         err = -EINVAL;
1358                         goto out;
1359                 }
1360         }
1361
1362         /* Now we start forcing evictions */
1363         for (total = I915_GTT_PAGE_SIZE;
1364              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1365              total += 2 * I915_GTT_PAGE_SIZE) {
1366                 struct i915_vma *vma;
1367
1368                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1369                                                       2 * PAGE_SIZE);
1370                 if (IS_ERR(obj)) {
1371                         err = PTR_ERR(obj);
1372                         goto out;
1373                 }
1374
1375                 err = i915_gem_object_pin_pages(obj);
1376                 if (err) {
1377                         i915_gem_object_put(obj);
1378                         goto out;
1379                 }
1380
1381                 list_add(&obj->st_link, &objects);
1382
1383                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1384                 if (IS_ERR(vma)) {
1385                         err = PTR_ERR(vma);
1386                         goto out;
1387                 }
1388
1389                 mutex_lock(&ggtt->vm.mutex);
1390                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1391                                            obj->base.size,
1392                                            total,
1393                                            obj->cache_level,
1394                                            0);
1395                 mutex_unlock(&ggtt->vm.mutex);
1396                 if (err) {
1397                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1398                                total, ggtt->vm.total, err);
1399                         goto out;
1400                 }
1401                 track_vma_bind(vma);
1402
1403                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1404                 if (vma->node.start != total ||
1405                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1406                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1407                                vma->node.start, vma->node.size,
1408                                total, 2*I915_GTT_PAGE_SIZE);
1409                         err = -EINVAL;
1410                         goto out;
1411                 }
1412         }
1413
1414         /* And then try at random */
1415         list_for_each_entry_safe(obj, on, &objects, st_link) {
1416                 struct i915_vma *vma;
1417                 u64 offset;
1418
1419                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1420                 if (IS_ERR(vma)) {
1421                         err = PTR_ERR(vma);
1422                         goto out;
1423                 }
1424
1425                 err = i915_vma_unbind(vma);
1426                 if (err) {
1427                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1428                         goto out;
1429                 }
1430
1431                 offset = igt_random_offset(&prng,
1432                                            0, ggtt->vm.total,
1433                                            2 * I915_GTT_PAGE_SIZE,
1434                                            I915_GTT_MIN_ALIGNMENT);
1435
1436                 mutex_lock(&ggtt->vm.mutex);
1437                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1438                                            obj->base.size,
1439                                            offset,
1440                                            obj->cache_level,
1441                                            0);
1442                 mutex_unlock(&ggtt->vm.mutex);
1443                 if (err) {
1444                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1445                                total, ggtt->vm.total, err);
1446                         goto out;
1447                 }
1448                 track_vma_bind(vma);
1449
1450                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1451                 if (vma->node.start != offset ||
1452                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1453                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1454                                vma->node.start, vma->node.size,
1455                                offset, 2*I915_GTT_PAGE_SIZE);
1456                         err = -EINVAL;
1457                         goto out;
1458                 }
1459         }
1460
1461 out:
1462         list_for_each_entry_safe(obj, on, &objects, st_link) {
1463                 i915_gem_object_unpin_pages(obj);
1464                 i915_gem_object_put(obj);
1465         }
1466         return err;
1467 }
1468
1469 static int igt_gtt_insert(void *arg)
1470 {
1471         struct i915_ggtt *ggtt = arg;
1472         struct drm_i915_gem_object *obj, *on;
1473         struct drm_mm_node tmp = {};
1474         const struct invalid_insert {
1475                 u64 size;
1476                 u64 alignment;
1477                 u64 start, end;
1478         } invalid_insert[] = {
1479                 {
1480                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1481                         0, ggtt->vm.total,
1482                 },
1483                 {
1484                         2*I915_GTT_PAGE_SIZE, 0,
1485                         0, I915_GTT_PAGE_SIZE,
1486                 },
1487                 {
1488                         -(u64)I915_GTT_PAGE_SIZE, 0,
1489                         0, 4*I915_GTT_PAGE_SIZE,
1490                 },
1491                 {
1492                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1493                         0, 4*I915_GTT_PAGE_SIZE,
1494                 },
1495                 {
1496                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1497                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1498                 },
1499                 {}
1500         }, *ii;
1501         LIST_HEAD(objects);
1502         u64 total;
1503         int err = -ENODEV;
1504
1505         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1506          * to the node, evicting if required.
1507          */
1508
1509         /* Check a couple of obviously invalid requests */
1510         for (ii = invalid_insert; ii->size; ii++) {
1511                 mutex_lock(&ggtt->vm.mutex);
1512                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1513                                           ii->size, ii->alignment,
1514                                           I915_COLOR_UNEVICTABLE,
1515                                           ii->start, ii->end,
1516                                           0);
1517                 mutex_unlock(&ggtt->vm.mutex);
1518                 if (err != -ENOSPC) {
1519                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1520                                ii->size, ii->alignment, ii->start, ii->end,
1521                                err);
1522                         return -EINVAL;
1523                 }
1524         }
1525
1526         /* Start by filling the GGTT */
1527         for (total = 0;
1528              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1529              total += I915_GTT_PAGE_SIZE) {
1530                 struct i915_vma *vma;
1531
1532                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1533                                                       I915_GTT_PAGE_SIZE);
1534                 if (IS_ERR(obj)) {
1535                         err = PTR_ERR(obj);
1536                         goto out;
1537                 }
1538
1539                 err = i915_gem_object_pin_pages(obj);
1540                 if (err) {
1541                         i915_gem_object_put(obj);
1542                         goto out;
1543                 }
1544
1545                 list_add(&obj->st_link, &objects);
1546
1547                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1548                 if (IS_ERR(vma)) {
1549                         err = PTR_ERR(vma);
1550                         goto out;
1551                 }
1552
1553                 mutex_lock(&ggtt->vm.mutex);
1554                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1555                                           obj->base.size, 0, obj->cache_level,
1556                                           0, ggtt->vm.total,
1557                                           0);
1558                 mutex_unlock(&ggtt->vm.mutex);
1559                 if (err == -ENOSPC) {
1560                         /* maxed out the GGTT space */
1561                         i915_gem_object_put(obj);
1562                         break;
1563                 }
1564                 if (err) {
1565                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1566                                total, ggtt->vm.total, err);
1567                         goto out;
1568                 }
1569                 track_vma_bind(vma);
1570                 __i915_vma_pin(vma);
1571
1572                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1573         }
1574
1575         list_for_each_entry(obj, &objects, st_link) {
1576                 struct i915_vma *vma;
1577
1578                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1579                 if (IS_ERR(vma)) {
1580                         err = PTR_ERR(vma);
1581                         goto out;
1582                 }
1583
1584                 if (!drm_mm_node_allocated(&vma->node)) {
1585                         pr_err("VMA was unexpectedly evicted!\n");
1586                         err = -EINVAL;
1587                         goto out;
1588                 }
1589
1590                 __i915_vma_unpin(vma);
1591         }
1592
1593         /* If we then reinsert, we should find the same hole */
1594         list_for_each_entry_safe(obj, on, &objects, st_link) {
1595                 struct i915_vma *vma;
1596                 u64 offset;
1597
1598                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1599                 if (IS_ERR(vma)) {
1600                         err = PTR_ERR(vma);
1601                         goto out;
1602                 }
1603
1604                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1605                 offset = vma->node.start;
1606
1607                 err = i915_vma_unbind(vma);
1608                 if (err) {
1609                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1610                         goto out;
1611                 }
1612
1613                 mutex_lock(&ggtt->vm.mutex);
1614                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1615                                           obj->base.size, 0, obj->cache_level,
1616                                           0, ggtt->vm.total,
1617                                           0);
1618                 mutex_unlock(&ggtt->vm.mutex);
1619                 if (err) {
1620                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1621                                total, ggtt->vm.total, err);
1622                         goto out;
1623                 }
1624                 track_vma_bind(vma);
1625
1626                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1627                 if (vma->node.start != offset) {
1628                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1629                                offset, vma->node.start);
1630                         err = -EINVAL;
1631                         goto out;
1632                 }
1633         }
1634
1635         /* And then force evictions */
1636         for (total = 0;
1637              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1638              total += 2 * I915_GTT_PAGE_SIZE) {
1639                 struct i915_vma *vma;
1640
1641                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1642                                                       2 * I915_GTT_PAGE_SIZE);
1643                 if (IS_ERR(obj)) {
1644                         err = PTR_ERR(obj);
1645                         goto out;
1646                 }
1647
1648                 err = i915_gem_object_pin_pages(obj);
1649                 if (err) {
1650                         i915_gem_object_put(obj);
1651                         goto out;
1652                 }
1653
1654                 list_add(&obj->st_link, &objects);
1655
1656                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1657                 if (IS_ERR(vma)) {
1658                         err = PTR_ERR(vma);
1659                         goto out;
1660                 }
1661
1662                 mutex_lock(&ggtt->vm.mutex);
1663                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1664                                           obj->base.size, 0, obj->cache_level,
1665                                           0, ggtt->vm.total,
1666                                           0);
1667                 mutex_unlock(&ggtt->vm.mutex);
1668                 if (err) {
1669                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1670                                total, ggtt->vm.total, err);
1671                         goto out;
1672                 }
1673                 track_vma_bind(vma);
1674
1675                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1676         }
1677
1678 out:
1679         list_for_each_entry_safe(obj, on, &objects, st_link) {
1680                 i915_gem_object_unpin_pages(obj);
1681                 i915_gem_object_put(obj);
1682         }
1683         return err;
1684 }
1685
1686 int i915_gem_gtt_mock_selftests(void)
1687 {
1688         static const struct i915_subtest tests[] = {
1689                 SUBTEST(igt_mock_drunk),
1690                 SUBTEST(igt_mock_walk),
1691                 SUBTEST(igt_mock_pot),
1692                 SUBTEST(igt_mock_fill),
1693                 SUBTEST(igt_gtt_reserve),
1694                 SUBTEST(igt_gtt_insert),
1695         };
1696         struct drm_i915_private *i915;
1697         struct i915_ggtt *ggtt;
1698         int err;
1699
1700         i915 = mock_gem_device();
1701         if (!i915)
1702                 return -ENOMEM;
1703
1704         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1705         if (!ggtt) {
1706                 err = -ENOMEM;
1707                 goto out_put;
1708         }
1709         mock_init_ggtt(i915, ggtt);
1710
1711         err = i915_subtests(tests, ggtt);
1712
1713         mock_device_flush(i915);
1714         i915_gem_drain_freed_objects(i915);
1715         mock_fini_ggtt(ggtt);
1716         kfree(ggtt);
1717 out_put:
1718         drm_dev_put(&i915->drm);
1719         return err;
1720 }
1721
1722 static int context_sync(struct intel_context *ce)
1723 {
1724         struct i915_request *rq;
1725         long timeout;
1726
1727         rq = intel_context_create_request(ce);
1728         if (IS_ERR(rq))
1729                 return PTR_ERR(rq);
1730
1731         i915_request_get(rq);
1732         i915_request_add(rq);
1733
1734         timeout = i915_request_wait(rq, 0, HZ / 5);
1735         i915_request_put(rq);
1736
1737         return timeout < 0 ? -EIO : 0;
1738 }
1739
1740 static struct i915_request *
1741 submit_batch(struct intel_context *ce, u64 addr)
1742 {
1743         struct i915_request *rq;
1744         int err;
1745
1746         rq = intel_context_create_request(ce);
1747         if (IS_ERR(rq))
1748                 return rq;
1749
1750         err = 0;
1751         if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1752                 err = rq->engine->emit_init_breadcrumb(rq);
1753         if (err == 0)
1754                 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1755
1756         if (err == 0)
1757                 i915_request_get(rq);
1758         i915_request_add(rq);
1759
1760         return err ? ERR_PTR(err) : rq;
1761 }
1762
1763 static u32 *spinner(u32 *batch, int i)
1764 {
1765         return batch + i * 64 / sizeof(*batch) + 4;
1766 }
1767
1768 static void end_spin(u32 *batch, int i)
1769 {
1770         *spinner(batch, i) = MI_BATCH_BUFFER_END;
1771         wmb();
1772 }
1773
1774 static int igt_cs_tlb(void *arg)
1775 {
1776         const unsigned int count = PAGE_SIZE / 64;
1777         const unsigned int chunk_size = count * PAGE_SIZE;
1778         struct drm_i915_private *i915 = arg;
1779         struct drm_i915_gem_object *bbe, *act, *out;
1780         struct i915_gem_engines_iter it;
1781         struct i915_address_space *vm;
1782         struct i915_gem_context *ctx;
1783         struct intel_context *ce;
1784         struct i915_vma *vma;
1785         I915_RND_STATE(prng);
1786         struct file *file;
1787         unsigned int i;
1788         u32 *result;
1789         u32 *batch;
1790         int err = 0;
1791
1792         /*
1793          * Our mission here is to fool the hardware to execute something
1794          * from scratch as it has not seen the batch move (due to missing
1795          * the TLB invalidate).
1796          */
1797
1798         file = mock_file(i915);
1799         if (IS_ERR(file))
1800                 return PTR_ERR(file);
1801
1802         ctx = live_context(i915, file);
1803         if (IS_ERR(ctx)) {
1804                 err = PTR_ERR(ctx);
1805                 goto out_unlock;
1806         }
1807
1808         vm = i915_gem_context_get_vm_rcu(ctx);
1809         if (i915_is_ggtt(vm))
1810                 goto out_vm;
1811
1812         /* Create two pages; dummy we prefill the TLB, and intended */
1813         bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1814         if (IS_ERR(bbe)) {
1815                 err = PTR_ERR(bbe);
1816                 goto out_vm;
1817         }
1818
1819         batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1820         if (IS_ERR(batch)) {
1821                 err = PTR_ERR(batch);
1822                 goto out_put_bbe;
1823         }
1824         memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1825         i915_gem_object_flush_map(bbe);
1826         i915_gem_object_unpin_map(bbe);
1827
1828         act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1829         if (IS_ERR(act)) {
1830                 err = PTR_ERR(act);
1831                 goto out_put_bbe;
1832         }
1833
1834         /* Track the execution of each request by writing into different slot */
1835         batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1836         if (IS_ERR(batch)) {
1837                 err = PTR_ERR(batch);
1838                 goto out_put_act;
1839         }
1840         for (i = 0; i < count; i++) {
1841                 u32 *cs = batch + i * 64 / sizeof(*cs);
1842                 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1843
1844                 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1845                 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1846                 if (INTEL_GEN(i915) >= 8) {
1847                         cs[1] = lower_32_bits(addr);
1848                         cs[2] = upper_32_bits(addr);
1849                         cs[3] = i;
1850                         cs[4] = MI_NOOP;
1851                         cs[5] = MI_BATCH_BUFFER_START_GEN8;
1852                 } else {
1853                         cs[1] = 0;
1854                         cs[2] = lower_32_bits(addr);
1855                         cs[3] = i;
1856                         cs[4] = MI_NOOP;
1857                         cs[5] = MI_BATCH_BUFFER_START;
1858                 }
1859         }
1860
1861         out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1862         if (IS_ERR(out)) {
1863                 err = PTR_ERR(out);
1864                 goto out_put_batch;
1865         }
1866         i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1867
1868         vma = i915_vma_instance(out, vm, NULL);
1869         if (IS_ERR(vma)) {
1870                 err = PTR_ERR(vma);
1871                 goto out_put_batch;
1872         }
1873
1874         err = i915_vma_pin(vma, 0, 0,
1875                            PIN_USER |
1876                            PIN_OFFSET_FIXED |
1877                            (vm->total - PAGE_SIZE));
1878         if (err)
1879                 goto out_put_out;
1880         GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1881
1882         result = i915_gem_object_pin_map(out, I915_MAP_WB);
1883         if (IS_ERR(result)) {
1884                 err = PTR_ERR(result);
1885                 goto out_put_out;
1886         }
1887
1888         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1889                 IGT_TIMEOUT(end_time);
1890                 unsigned long pass = 0;
1891
1892                 if (!intel_engine_can_store_dword(ce->engine))
1893                         continue;
1894
1895                 while (!__igt_timeout(end_time, NULL)) {
1896                         struct i915_request *rq;
1897                         u64 offset;
1898
1899                         offset = igt_random_offset(&prng,
1900                                                    0, vm->total - PAGE_SIZE,
1901                                                    chunk_size, PAGE_SIZE);
1902
1903                         err = vm->allocate_va_range(vm, offset, chunk_size);
1904                         if (err)
1905                                 goto end;
1906
1907                         memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1908
1909                         vma = i915_vma_instance(bbe, vm, NULL);
1910                         if (IS_ERR(vma)) {
1911                                 err = PTR_ERR(vma);
1912                                 goto end;
1913                         }
1914
1915                         err = vma->ops->set_pages(vma);
1916                         if (err)
1917                                 goto end;
1918
1919                         /* Prime the TLB with the dummy pages */
1920                         for (i = 0; i < count; i++) {
1921                                 vma->node.start = offset + i * PAGE_SIZE;
1922                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1923
1924                                 rq = submit_batch(ce, vma->node.start);
1925                                 if (IS_ERR(rq)) {
1926                                         err = PTR_ERR(rq);
1927                                         goto end;
1928                                 }
1929                                 i915_request_put(rq);
1930                         }
1931
1932                         vma->ops->clear_pages(vma);
1933
1934                         err = context_sync(ce);
1935                         if (err) {
1936                                 pr_err("%s: dummy setup timed out\n",
1937                                        ce->engine->name);
1938                                 goto end;
1939                         }
1940
1941                         vma = i915_vma_instance(act, vm, NULL);
1942                         if (IS_ERR(vma)) {
1943                                 err = PTR_ERR(vma);
1944                                 goto end;
1945                         }
1946
1947                         err = vma->ops->set_pages(vma);
1948                         if (err)
1949                                 goto end;
1950
1951                         /* Replace the TLB with target batches */
1952                         for (i = 0; i < count; i++) {
1953                                 struct i915_request *rq;
1954                                 u32 *cs = batch + i * 64 / sizeof(*cs);
1955                                 u64 addr;
1956
1957                                 vma->node.start = offset + i * PAGE_SIZE;
1958                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1959
1960                                 addr = vma->node.start + i * 64;
1961                                 cs[4] = MI_NOOP;
1962                                 cs[6] = lower_32_bits(addr);
1963                                 cs[7] = upper_32_bits(addr);
1964                                 wmb();
1965
1966                                 rq = submit_batch(ce, addr);
1967                                 if (IS_ERR(rq)) {
1968                                         err = PTR_ERR(rq);
1969                                         goto end;
1970                                 }
1971
1972                                 /* Wait until the context chain has started */
1973                                 if (i == 0) {
1974                                         while (READ_ONCE(result[i]) &&
1975                                                !i915_request_completed(rq))
1976                                                 cond_resched();
1977                                 } else {
1978                                         end_spin(batch, i - 1);
1979                                 }
1980
1981                                 i915_request_put(rq);
1982                         }
1983                         end_spin(batch, count - 1);
1984
1985                         vma->ops->clear_pages(vma);
1986
1987                         err = context_sync(ce);
1988                         if (err) {
1989                                 pr_err("%s: writes timed out\n",
1990                                        ce->engine->name);
1991                                 goto end;
1992                         }
1993
1994                         for (i = 0; i < count; i++) {
1995                                 if (result[i] != i) {
1996                                         pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
1997                                                ce->engine->name, pass,
1998                                                offset, i, result[i], i);
1999                                         err = -EINVAL;
2000                                         goto end;
2001                                 }
2002                         }
2003
2004                         vm->clear_range(vm, offset, chunk_size);
2005                         pass++;
2006                 }
2007         }
2008 end:
2009         if (igt_flush_test(i915))
2010                 err = -EIO;
2011         i915_gem_context_unlock_engines(ctx);
2012         i915_gem_object_unpin_map(out);
2013 out_put_out:
2014         i915_gem_object_put(out);
2015 out_put_batch:
2016         i915_gem_object_unpin_map(act);
2017 out_put_act:
2018         i915_gem_object_put(act);
2019 out_put_bbe:
2020         i915_gem_object_put(bbe);
2021 out_vm:
2022         i915_vm_put(vm);
2023 out_unlock:
2024         fput(file);
2025         return err;
2026 }
2027
2028 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2029 {
2030         static const struct i915_subtest tests[] = {
2031                 SUBTEST(igt_ppgtt_alloc),
2032                 SUBTEST(igt_ppgtt_lowlevel),
2033                 SUBTEST(igt_ppgtt_drunk),
2034                 SUBTEST(igt_ppgtt_walk),
2035                 SUBTEST(igt_ppgtt_pot),
2036                 SUBTEST(igt_ppgtt_fill),
2037                 SUBTEST(igt_ppgtt_shrink),
2038                 SUBTEST(igt_ppgtt_shrink_boom),
2039                 SUBTEST(igt_ggtt_lowlevel),
2040                 SUBTEST(igt_ggtt_drunk),
2041                 SUBTEST(igt_ggtt_walk),
2042                 SUBTEST(igt_ggtt_pot),
2043                 SUBTEST(igt_ggtt_fill),
2044                 SUBTEST(igt_ggtt_page),
2045                 SUBTEST(igt_cs_tlb),
2046         };
2047
2048         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2049
2050         return i915_subtests(tests, i915);
2051 }