Merge drm/drm-next into drm-intel-gt-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_mman.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
19
20 struct tile {
21         unsigned int width;
22         unsigned int height;
23         unsigned int stride;
24         unsigned int size;
25         unsigned int tiling;
26         unsigned int swizzle;
27 };
28
29 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 {
31         return (offset & BIT_ULL(bit)) >> (bit - 6);
32 }
33
34 static u64 tiled_offset(const struct tile *tile, u64 v)
35 {
36         u64 x, y;
37
38         if (tile->tiling == I915_TILING_NONE)
39                 return v;
40
41         y = div64_u64_rem(v, tile->stride, &x);
42         v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43
44         if (tile->tiling == I915_TILING_X) {
45                 v += y * tile->width;
46                 v += div64_u64_rem(x, tile->width, &x) << tile->size;
47                 v += x;
48         } else if (tile->width == 128) {
49                 const unsigned int ytile_span = 16;
50                 const unsigned int ytile_height = 512;
51
52                 v += y * ytile_span;
53                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54                 v += x;
55         } else {
56                 const unsigned int ytile_span = 32;
57                 const unsigned int ytile_height = 256;
58
59                 v += y * ytile_span;
60                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61                 v += x;
62         }
63
64         switch (tile->swizzle) {
65         case I915_BIT_6_SWIZZLE_9:
66                 v ^= swizzle_bit(9, v);
67                 break;
68         case I915_BIT_6_SWIZZLE_9_10:
69                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70                 break;
71         case I915_BIT_6_SWIZZLE_9_11:
72                 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73                 break;
74         case I915_BIT_6_SWIZZLE_9_10_11:
75                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76                 break;
77         }
78
79         return v;
80 }
81
82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83                                  const struct tile *tile,
84                                  struct rnd_state *prng)
85 {
86         const unsigned long npages = obj->base.size / PAGE_SIZE;
87         struct i915_ggtt_view view;
88         struct i915_vma *vma;
89         unsigned long page;
90         u32 __iomem *io;
91         struct page *p;
92         unsigned int n;
93         u64 offset;
94         u32 *cpu;
95         int err;
96
97         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
98         if (err) {
99                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
100                        tile->tiling, tile->stride, err);
101                 return err;
102         }
103
104         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
105         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
106
107         i915_gem_object_lock(obj, NULL);
108         err = i915_gem_object_set_to_gtt_domain(obj, true);
109         i915_gem_object_unlock(obj);
110         if (err) {
111                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
112                 return err;
113         }
114
115         page = i915_prandom_u32_max_state(npages, prng);
116         view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
117
118         vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
119         if (IS_ERR(vma)) {
120                 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
121                        page, (int)PTR_ERR(vma));
122                 return PTR_ERR(vma);
123         }
124
125         n = page - view.partial.offset;
126         GEM_BUG_ON(n >= view.partial.size);
127
128         io = i915_vma_pin_iomap(vma);
129         i915_vma_unpin(vma);
130         if (IS_ERR(io)) {
131                 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
132                        page, (int)PTR_ERR(io));
133                 err = PTR_ERR(io);
134                 goto out;
135         }
136
137         iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
138         i915_vma_unpin_iomap(vma);
139
140         offset = tiled_offset(tile, page << PAGE_SHIFT);
141         if (offset >= obj->base.size)
142                 goto out;
143
144         intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
145
146         p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
147         cpu = kmap(p) + offset_in_page(offset);
148         drm_clflush_virt_range(cpu, sizeof(*cpu));
149         if (*cpu != (u32)page) {
150                 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
151                        page, n,
152                        view.partial.offset,
153                        view.partial.size,
154                        vma->size >> PAGE_SHIFT,
155                        tile->tiling ? tile_row_pages(obj) : 0,
156                        vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
157                        offset >> PAGE_SHIFT,
158                        (unsigned int)offset_in_page(offset),
159                        offset,
160                        (u32)page, *cpu);
161                 err = -EINVAL;
162         }
163         *cpu = 0;
164         drm_clflush_virt_range(cpu, sizeof(*cpu));
165         kunmap(p);
166
167 out:
168         __i915_vma_put(vma);
169         return err;
170 }
171
172 static int check_partial_mappings(struct drm_i915_gem_object *obj,
173                                   const struct tile *tile,
174                                   unsigned long end_time)
175 {
176         const unsigned int nreal = obj->scratch / PAGE_SIZE;
177         const unsigned long npages = obj->base.size / PAGE_SIZE;
178         struct i915_vma *vma;
179         unsigned long page;
180         int err;
181
182         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
183         if (err) {
184                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
185                        tile->tiling, tile->stride, err);
186                 return err;
187         }
188
189         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
190         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
191
192         i915_gem_object_lock(obj, NULL);
193         err = i915_gem_object_set_to_gtt_domain(obj, true);
194         i915_gem_object_unlock(obj);
195         if (err) {
196                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
197                 return err;
198         }
199
200         for_each_prime_number_from(page, 1, npages) {
201                 struct i915_ggtt_view view =
202                         compute_partial_view(obj, page, MIN_CHUNK_PAGES);
203                 u32 __iomem *io;
204                 struct page *p;
205                 unsigned int n;
206                 u64 offset;
207                 u32 *cpu;
208
209                 GEM_BUG_ON(view.partial.size > nreal);
210                 cond_resched();
211
212                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
213                 if (IS_ERR(vma)) {
214                         pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
215                                page, (int)PTR_ERR(vma));
216                         return PTR_ERR(vma);
217                 }
218
219                 n = page - view.partial.offset;
220                 GEM_BUG_ON(n >= view.partial.size);
221
222                 io = i915_vma_pin_iomap(vma);
223                 i915_vma_unpin(vma);
224                 if (IS_ERR(io)) {
225                         pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
226                                page, (int)PTR_ERR(io));
227                         return PTR_ERR(io);
228                 }
229
230                 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
231                 i915_vma_unpin_iomap(vma);
232
233                 offset = tiled_offset(tile, page << PAGE_SHIFT);
234                 if (offset >= obj->base.size)
235                         continue;
236
237                 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
238
239                 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
240                 cpu = kmap(p) + offset_in_page(offset);
241                 drm_clflush_virt_range(cpu, sizeof(*cpu));
242                 if (*cpu != (u32)page) {
243                         pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
244                                page, n,
245                                view.partial.offset,
246                                view.partial.size,
247                                vma->size >> PAGE_SHIFT,
248                                tile->tiling ? tile_row_pages(obj) : 0,
249                                vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
250                                offset >> PAGE_SHIFT,
251                                (unsigned int)offset_in_page(offset),
252                                offset,
253                                (u32)page, *cpu);
254                         err = -EINVAL;
255                 }
256                 *cpu = 0;
257                 drm_clflush_virt_range(cpu, sizeof(*cpu));
258                 kunmap(p);
259                 if (err)
260                         return err;
261
262                 __i915_vma_put(vma);
263
264                 if (igt_timeout(end_time,
265                                 "%s: timed out after tiling=%d stride=%d\n",
266                                 __func__, tile->tiling, tile->stride))
267                         return -EINTR;
268         }
269
270         return 0;
271 }
272
273 static unsigned int
274 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
275 {
276         if (GRAPHICS_VER(i915) <= 2) {
277                 tile->height = 16;
278                 tile->width = 128;
279                 tile->size = 11;
280         } else if (tile->tiling == I915_TILING_Y &&
281                    HAS_128_BYTE_Y_TILING(i915)) {
282                 tile->height = 32;
283                 tile->width = 128;
284                 tile->size = 12;
285         } else {
286                 tile->height = 8;
287                 tile->width = 512;
288                 tile->size = 12;
289         }
290
291         if (GRAPHICS_VER(i915) < 4)
292                 return 8192 / tile->width;
293         else if (GRAPHICS_VER(i915) < 7)
294                 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
295         else
296                 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
297 }
298
299 static int igt_partial_tiling(void *arg)
300 {
301         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
302         struct drm_i915_private *i915 = arg;
303         struct drm_i915_gem_object *obj;
304         intel_wakeref_t wakeref;
305         int tiling;
306         int err;
307
308         if (!i915_ggtt_has_aperture(&i915->ggtt))
309                 return 0;
310
311         /* We want to check the page mapping and fencing of a large object
312          * mmapped through the GTT. The object we create is larger than can
313          * possibly be mmaped as a whole, and so we must use partial GGTT vma.
314          * We then check that a write through each partial GGTT vma ends up
315          * in the right set of pages within the object, and with the expected
316          * tiling, which we verify by manual swizzling.
317          */
318
319         obj = huge_gem_object(i915,
320                               nreal << PAGE_SHIFT,
321                               (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
322         if (IS_ERR(obj))
323                 return PTR_ERR(obj);
324
325         err = i915_gem_object_pin_pages_unlocked(obj);
326         if (err) {
327                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
328                        nreal, obj->base.size / PAGE_SIZE, err);
329                 goto out;
330         }
331
332         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
333
334         if (1) {
335                 IGT_TIMEOUT(end);
336                 struct tile tile;
337
338                 tile.height = 1;
339                 tile.width = 1;
340                 tile.size = 0;
341                 tile.stride = 0;
342                 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
343                 tile.tiling = I915_TILING_NONE;
344
345                 err = check_partial_mappings(obj, &tile, end);
346                 if (err && err != -EINTR)
347                         goto out_unlock;
348         }
349
350         for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
351                 IGT_TIMEOUT(end);
352                 unsigned int max_pitch;
353                 unsigned int pitch;
354                 struct tile tile;
355
356                 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
357                         /*
358                          * The swizzling pattern is actually unknown as it
359                          * varies based on physical address of each page.
360                          * See i915_gem_detect_bit_6_swizzle().
361                          */
362                         break;
363
364                 tile.tiling = tiling;
365                 switch (tiling) {
366                 case I915_TILING_X:
367                         tile.swizzle = i915->ggtt.bit_6_swizzle_x;
368                         break;
369                 case I915_TILING_Y:
370                         tile.swizzle = i915->ggtt.bit_6_swizzle_y;
371                         break;
372                 }
373
374                 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
375                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
376                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
377                         continue;
378
379                 max_pitch = setup_tile_size(&tile, i915);
380
381                 for (pitch = max_pitch; pitch; pitch >>= 1) {
382                         tile.stride = tile.width * pitch;
383                         err = check_partial_mappings(obj, &tile, end);
384                         if (err == -EINTR)
385                                 goto next_tiling;
386                         if (err)
387                                 goto out_unlock;
388
389                         if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
390                                 tile.stride = tile.width * (pitch - 1);
391                                 err = check_partial_mappings(obj, &tile, end);
392                                 if (err == -EINTR)
393                                         goto next_tiling;
394                                 if (err)
395                                         goto out_unlock;
396                         }
397
398                         if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
399                                 tile.stride = tile.width * (pitch + 1);
400                                 err = check_partial_mappings(obj, &tile, end);
401                                 if (err == -EINTR)
402                                         goto next_tiling;
403                                 if (err)
404                                         goto out_unlock;
405                         }
406                 }
407
408                 if (GRAPHICS_VER(i915) >= 4) {
409                         for_each_prime_number(pitch, max_pitch) {
410                                 tile.stride = tile.width * pitch;
411                                 err = check_partial_mappings(obj, &tile, end);
412                                 if (err == -EINTR)
413                                         goto next_tiling;
414                                 if (err)
415                                         goto out_unlock;
416                         }
417                 }
418
419 next_tiling: ;
420         }
421
422 out_unlock:
423         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
424         i915_gem_object_unpin_pages(obj);
425 out:
426         i915_gem_object_put(obj);
427         return err;
428 }
429
430 static int igt_smoke_tiling(void *arg)
431 {
432         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
433         struct drm_i915_private *i915 = arg;
434         struct drm_i915_gem_object *obj;
435         intel_wakeref_t wakeref;
436         I915_RND_STATE(prng);
437         unsigned long count;
438         IGT_TIMEOUT(end);
439         int err;
440
441         if (!i915_ggtt_has_aperture(&i915->ggtt))
442                 return 0;
443
444         /*
445          * igt_partial_tiling() does an exhastive check of partial tiling
446          * chunking, but will undoubtably run out of time. Here, we do a
447          * randomised search and hope over many runs of 1s with different
448          * seeds we will do a thorough check.
449          *
450          * Remember to look at the st_seed if we see a flip-flop in BAT!
451          */
452
453         if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
454                 return 0;
455
456         obj = huge_gem_object(i915,
457                               nreal << PAGE_SHIFT,
458                               (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
459         if (IS_ERR(obj))
460                 return PTR_ERR(obj);
461
462         err = i915_gem_object_pin_pages_unlocked(obj);
463         if (err) {
464                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
465                        nreal, obj->base.size / PAGE_SIZE, err);
466                 goto out;
467         }
468
469         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
470
471         count = 0;
472         do {
473                 struct tile tile;
474
475                 tile.tiling =
476                         i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
477                 switch (tile.tiling) {
478                 case I915_TILING_NONE:
479                         tile.height = 1;
480                         tile.width = 1;
481                         tile.size = 0;
482                         tile.stride = 0;
483                         tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
484                         break;
485
486                 case I915_TILING_X:
487                         tile.swizzle = i915->ggtt.bit_6_swizzle_x;
488                         break;
489                 case I915_TILING_Y:
490                         tile.swizzle = i915->ggtt.bit_6_swizzle_y;
491                         break;
492                 }
493
494                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
495                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
496                         continue;
497
498                 if (tile.tiling != I915_TILING_NONE) {
499                         unsigned int max_pitch = setup_tile_size(&tile, i915);
500
501                         tile.stride =
502                                 i915_prandom_u32_max_state(max_pitch, &prng);
503                         tile.stride = (1 + tile.stride) * tile.width;
504                         if (GRAPHICS_VER(i915) < 4)
505                                 tile.stride = rounddown_pow_of_two(tile.stride);
506                 }
507
508                 err = check_partial_mapping(obj, &tile, &prng);
509                 if (err)
510                         break;
511
512                 count++;
513         } while (!__igt_timeout(end, NULL));
514
515         pr_info("%s: Completed %lu trials\n", __func__, count);
516
517         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
518         i915_gem_object_unpin_pages(obj);
519 out:
520         i915_gem_object_put(obj);
521         return err;
522 }
523
524 static int make_obj_busy(struct drm_i915_gem_object *obj)
525 {
526         struct drm_i915_private *i915 = to_i915(obj->base.dev);
527         struct intel_engine_cs *engine;
528
529         for_each_uabi_engine(engine, i915) {
530                 struct i915_request *rq;
531                 struct i915_vma *vma;
532                 struct i915_gem_ww_ctx ww;
533                 int err;
534
535                 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
536                 if (IS_ERR(vma))
537                         return PTR_ERR(vma);
538
539                 i915_gem_ww_ctx_init(&ww, false);
540 retry:
541                 err = i915_gem_object_lock(obj, &ww);
542                 if (!err)
543                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
544                 if (err)
545                         goto err;
546
547                 rq = intel_engine_create_kernel_request(engine);
548                 if (IS_ERR(rq)) {
549                         err = PTR_ERR(rq);
550                         goto err_unpin;
551                 }
552
553                 err = i915_request_await_object(rq, vma->obj, true);
554                 if (err == 0)
555                         err = i915_vma_move_to_active(vma, rq,
556                                                       EXEC_OBJECT_WRITE);
557
558                 i915_request_add(rq);
559 err_unpin:
560                 i915_vma_unpin(vma);
561 err:
562                 if (err == -EDEADLK) {
563                         err = i915_gem_ww_ctx_backoff(&ww);
564                         if (!err)
565                                 goto retry;
566                 }
567                 i915_gem_ww_ctx_fini(&ww);
568                 if (err)
569                         return err;
570         }
571
572         i915_gem_object_put(obj); /* leave it only alive via its active ref */
573         return 0;
574 }
575
576 static bool assert_mmap_offset(struct drm_i915_private *i915,
577                                unsigned long size,
578                                int expected)
579 {
580         struct drm_i915_gem_object *obj;
581         u64 offset;
582         int ret;
583
584         obj = i915_gem_object_create_internal(i915, size);
585         if (IS_ERR(obj))
586                 return expected && expected == PTR_ERR(obj);
587
588         ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
589         i915_gem_object_put(obj);
590
591         return ret == expected;
592 }
593
594 static void disable_retire_worker(struct drm_i915_private *i915)
595 {
596         i915_gem_driver_unregister__shrinker(i915);
597         intel_gt_pm_get(&i915->gt);
598         cancel_delayed_work_sync(&i915->gt.requests.retire_work);
599 }
600
601 static void restore_retire_worker(struct drm_i915_private *i915)
602 {
603         igt_flush_test(i915);
604         intel_gt_pm_put(&i915->gt);
605         i915_gem_driver_register__shrinker(i915);
606 }
607
608 static void mmap_offset_lock(struct drm_i915_private *i915)
609         __acquires(&i915->drm.vma_offset_manager->vm_lock)
610 {
611         write_lock(&i915->drm.vma_offset_manager->vm_lock);
612 }
613
614 static void mmap_offset_unlock(struct drm_i915_private *i915)
615         __releases(&i915->drm.vma_offset_manager->vm_lock)
616 {
617         write_unlock(&i915->drm.vma_offset_manager->vm_lock);
618 }
619
620 static int igt_mmap_offset_exhaustion(void *arg)
621 {
622         struct drm_i915_private *i915 = arg;
623         struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
624         struct drm_i915_gem_object *obj;
625         struct drm_mm_node *hole, *next;
626         int loop, err = 0;
627         u64 offset;
628
629         /* Disable background reaper */
630         disable_retire_worker(i915);
631         GEM_BUG_ON(!i915->gt.awake);
632         intel_gt_retire_requests(&i915->gt);
633         i915_gem_drain_freed_objects(i915);
634
635         /* Trim the device mmap space to only a page */
636         mmap_offset_lock(i915);
637         loop = 1; /* PAGE_SIZE units */
638         list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
639                 struct drm_mm_node *resv;
640
641                 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
642                 if (!resv) {
643                         err = -ENOMEM;
644                         goto out_park;
645                 }
646
647                 resv->start = drm_mm_hole_node_start(hole) + loop;
648                 resv->size = hole->hole_size - loop;
649                 resv->color = -1ul;
650                 loop = 0;
651
652                 if (!resv->size) {
653                         kfree(resv);
654                         continue;
655                 }
656
657                 pr_debug("Reserving hole [%llx + %llx]\n",
658                          resv->start, resv->size);
659
660                 err = drm_mm_reserve_node(mm, resv);
661                 if (err) {
662                         pr_err("Failed to trim VMA manager, err=%d\n", err);
663                         kfree(resv);
664                         goto out_park;
665                 }
666         }
667         GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
668         mmap_offset_unlock(i915);
669
670         /* Just fits! */
671         if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
672                 pr_err("Unable to insert object into single page hole\n");
673                 err = -EINVAL;
674                 goto out;
675         }
676
677         /* Too large */
678         if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
679                 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
680                 err = -EINVAL;
681                 goto out;
682         }
683
684         /* Fill the hole, further allocation attempts should then fail */
685         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
686         if (IS_ERR(obj)) {
687                 err = PTR_ERR(obj);
688                 pr_err("Unable to create object for reclaimed hole\n");
689                 goto out;
690         }
691
692         err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
693         if (err) {
694                 pr_err("Unable to insert object into reclaimed hole\n");
695                 goto err_obj;
696         }
697
698         if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
699                 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
700                 err = -EINVAL;
701                 goto err_obj;
702         }
703
704         i915_gem_object_put(obj);
705
706         /* Now fill with busy dead objects that we expect to reap */
707         for (loop = 0; loop < 3; loop++) {
708                 if (intel_gt_is_wedged(&i915->gt))
709                         break;
710
711                 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
712                 if (IS_ERR(obj)) {
713                         err = PTR_ERR(obj);
714                         goto out;
715                 }
716
717                 err = make_obj_busy(obj);
718                 if (err) {
719                         pr_err("[loop %d] Failed to busy the object\n", loop);
720                         goto err_obj;
721                 }
722         }
723
724 out:
725         mmap_offset_lock(i915);
726 out_park:
727         drm_mm_for_each_node_safe(hole, next, mm) {
728                 if (hole->color != -1ul)
729                         continue;
730
731                 drm_mm_remove_node(hole);
732                 kfree(hole);
733         }
734         mmap_offset_unlock(i915);
735         restore_retire_worker(i915);
736         return err;
737 err_obj:
738         i915_gem_object_put(obj);
739         goto out;
740 }
741
742 static int gtt_set(struct drm_i915_gem_object *obj)
743 {
744         struct i915_vma *vma;
745         void __iomem *map;
746         int err = 0;
747
748         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
749         if (IS_ERR(vma))
750                 return PTR_ERR(vma);
751
752         intel_gt_pm_get(vma->vm->gt);
753         map = i915_vma_pin_iomap(vma);
754         i915_vma_unpin(vma);
755         if (IS_ERR(map)) {
756                 err = PTR_ERR(map);
757                 goto out;
758         }
759
760         memset_io(map, POISON_INUSE, obj->base.size);
761         i915_vma_unpin_iomap(vma);
762
763 out:
764         intel_gt_pm_put(vma->vm->gt);
765         return err;
766 }
767
768 static int gtt_check(struct drm_i915_gem_object *obj)
769 {
770         struct i915_vma *vma;
771         void __iomem *map;
772         int err = 0;
773
774         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
775         if (IS_ERR(vma))
776                 return PTR_ERR(vma);
777
778         intel_gt_pm_get(vma->vm->gt);
779         map = i915_vma_pin_iomap(vma);
780         i915_vma_unpin(vma);
781         if (IS_ERR(map)) {
782                 err = PTR_ERR(map);
783                 goto out;
784         }
785
786         if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
787                 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
788                        obj->mm.region->name);
789                 err = -EINVAL;
790         }
791         i915_vma_unpin_iomap(vma);
792
793 out:
794         intel_gt_pm_put(vma->vm->gt);
795         return err;
796 }
797
798 static int wc_set(struct drm_i915_gem_object *obj)
799 {
800         void *vaddr;
801
802         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
803         if (IS_ERR(vaddr))
804                 return PTR_ERR(vaddr);
805
806         memset(vaddr, POISON_INUSE, obj->base.size);
807         i915_gem_object_flush_map(obj);
808         i915_gem_object_unpin_map(obj);
809
810         return 0;
811 }
812
813 static int wc_check(struct drm_i915_gem_object *obj)
814 {
815         void *vaddr;
816         int err = 0;
817
818         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
819         if (IS_ERR(vaddr))
820                 return PTR_ERR(vaddr);
821
822         if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
823                 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
824                        obj->mm.region->name);
825                 err = -EINVAL;
826         }
827         i915_gem_object_unpin_map(obj);
828
829         return err;
830 }
831
832 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
833 {
834         bool no_map;
835
836         if (type == I915_MMAP_TYPE_GTT &&
837             !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
838                 return false;
839
840         i915_gem_object_lock(obj, NULL);
841         no_map = (type != I915_MMAP_TYPE_GTT &&
842                   !i915_gem_object_has_struct_page(obj) &&
843                   !i915_gem_object_has_iomem(obj));
844         i915_gem_object_unlock(obj);
845
846         return !no_map;
847 }
848
849 static void object_set_placements(struct drm_i915_gem_object *obj,
850                                   struct intel_memory_region **placements,
851                                   unsigned int n_placements)
852 {
853         GEM_BUG_ON(!n_placements);
854
855         if (n_placements == 1) {
856                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
857                 struct intel_memory_region *mr = placements[0];
858
859                 obj->mm.placements = &i915->mm.regions[mr->id];
860                 obj->mm.n_placements = 1;
861         } else {
862                 obj->mm.placements = placements;
863                 obj->mm.n_placements = n_placements;
864         }
865 }
866
867 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
868 static int __igt_mmap(struct drm_i915_private *i915,
869                       struct drm_i915_gem_object *obj,
870                       enum i915_mmap_type type)
871 {
872         struct vm_area_struct *area;
873         unsigned long addr;
874         int err, i;
875         u64 offset;
876
877         if (!can_mmap(obj, type))
878                 return 0;
879
880         err = wc_set(obj);
881         if (err == -ENXIO)
882                 err = gtt_set(obj);
883         if (err)
884                 return err;
885
886         err = __assign_mmap_offset(obj, type, &offset, NULL);
887         if (err)
888                 return err;
889
890         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
891         if (IS_ERR_VALUE(addr))
892                 return addr;
893
894         pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
895
896         area = vma_lookup(current->mm, addr);
897         if (!area) {
898                 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
899                        obj->mm.region->name);
900                 err = -EINVAL;
901                 goto out_unmap;
902         }
903
904         for (i = 0; i < obj->base.size / sizeof(u32); i++) {
905                 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
906                 u32 x;
907
908                 if (get_user(x, ux)) {
909                         pr_err("%s: Unable to read from mmap, offset:%zd\n",
910                                obj->mm.region->name, i * sizeof(x));
911                         err = -EFAULT;
912                         goto out_unmap;
913                 }
914
915                 if (x != expand32(POISON_INUSE)) {
916                         pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
917                                obj->mm.region->name,
918                                i * sizeof(x), x, expand32(POISON_INUSE));
919                         err = -EINVAL;
920                         goto out_unmap;
921                 }
922
923                 x = expand32(POISON_FREE);
924                 if (put_user(x, ux)) {
925                         pr_err("%s: Unable to write to mmap, offset:%zd\n",
926                                obj->mm.region->name, i * sizeof(x));
927                         err = -EFAULT;
928                         goto out_unmap;
929                 }
930         }
931
932         if (type == I915_MMAP_TYPE_GTT)
933                 intel_gt_flush_ggtt_writes(&i915->gt);
934
935         err = wc_check(obj);
936         if (err == -ENXIO)
937                 err = gtt_check(obj);
938 out_unmap:
939         vm_munmap(addr, obj->base.size);
940         return err;
941 }
942
943 static int igt_mmap(void *arg)
944 {
945         struct drm_i915_private *i915 = arg;
946         struct intel_memory_region *mr;
947         enum intel_region_id id;
948
949         for_each_memory_region(mr, i915, id) {
950                 unsigned long sizes[] = {
951                         PAGE_SIZE,
952                         mr->min_page_size,
953                         SZ_4M,
954                 };
955                 int i;
956
957                 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
958                         struct drm_i915_gem_object *obj;
959                         int err;
960
961                         obj = i915_gem_object_create_region(mr, sizes[i], 0, I915_BO_ALLOC_USER);
962                         if (obj == ERR_PTR(-ENODEV))
963                                 continue;
964
965                         if (IS_ERR(obj))
966                                 return PTR_ERR(obj);
967
968                         object_set_placements(obj, &mr, 1);
969
970                         err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
971                         if (err == 0)
972                                 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
973
974                         i915_gem_object_put(obj);
975                         if (err)
976                                 return err;
977                 }
978         }
979
980         return 0;
981 }
982
983 static const char *repr_mmap_type(enum i915_mmap_type type)
984 {
985         switch (type) {
986         case I915_MMAP_TYPE_GTT: return "gtt";
987         case I915_MMAP_TYPE_WB: return "wb";
988         case I915_MMAP_TYPE_WC: return "wc";
989         case I915_MMAP_TYPE_UC: return "uc";
990         default: return "unknown";
991         }
992 }
993
994 static bool can_access(struct drm_i915_gem_object *obj)
995 {
996         bool access;
997
998         i915_gem_object_lock(obj, NULL);
999         access = i915_gem_object_has_struct_page(obj) ||
1000                 i915_gem_object_has_iomem(obj);
1001         i915_gem_object_unlock(obj);
1002
1003         return access;
1004 }
1005
1006 static int __igt_mmap_access(struct drm_i915_private *i915,
1007                              struct drm_i915_gem_object *obj,
1008                              enum i915_mmap_type type)
1009 {
1010         unsigned long __user *ptr;
1011         unsigned long A, B;
1012         unsigned long x, y;
1013         unsigned long addr;
1014         int err;
1015         u64 offset;
1016
1017         memset(&A, 0xAA, sizeof(A));
1018         memset(&B, 0xBB, sizeof(B));
1019
1020         if (!can_mmap(obj, type) || !can_access(obj))
1021                 return 0;
1022
1023         err = __assign_mmap_offset(obj, type, &offset, NULL);
1024         if (err)
1025                 return err;
1026
1027         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1028         if (IS_ERR_VALUE(addr))
1029                 return addr;
1030         ptr = (unsigned long __user *)addr;
1031
1032         err = __put_user(A, ptr);
1033         if (err) {
1034                 pr_err("%s(%s): failed to write into user mmap\n",
1035                        obj->mm.region->name, repr_mmap_type(type));
1036                 goto out_unmap;
1037         }
1038
1039         intel_gt_flush_ggtt_writes(&i915->gt);
1040
1041         err = access_process_vm(current, addr, &x, sizeof(x), 0);
1042         if (err != sizeof(x)) {
1043                 pr_err("%s(%s): access_process_vm() read failed\n",
1044                        obj->mm.region->name, repr_mmap_type(type));
1045                 goto out_unmap;
1046         }
1047
1048         err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1049         if (err != sizeof(B)) {
1050                 pr_err("%s(%s): access_process_vm() write failed\n",
1051                        obj->mm.region->name, repr_mmap_type(type));
1052                 goto out_unmap;
1053         }
1054
1055         intel_gt_flush_ggtt_writes(&i915->gt);
1056
1057         err = __get_user(y, ptr);
1058         if (err) {
1059                 pr_err("%s(%s): failed to read from user mmap\n",
1060                        obj->mm.region->name, repr_mmap_type(type));
1061                 goto out_unmap;
1062         }
1063
1064         if (x != A || y != B) {
1065                 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1066                        obj->mm.region->name, repr_mmap_type(type),
1067                        x, y);
1068                 err = -EINVAL;
1069                 goto out_unmap;
1070         }
1071
1072 out_unmap:
1073         vm_munmap(addr, obj->base.size);
1074         return err;
1075 }
1076
1077 static int igt_mmap_access(void *arg)
1078 {
1079         struct drm_i915_private *i915 = arg;
1080         struct intel_memory_region *mr;
1081         enum intel_region_id id;
1082
1083         for_each_memory_region(mr, i915, id) {
1084                 struct drm_i915_gem_object *obj;
1085                 int err;
1086
1087                 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1088                 if (obj == ERR_PTR(-ENODEV))
1089                         continue;
1090
1091                 if (IS_ERR(obj))
1092                         return PTR_ERR(obj);
1093
1094                 object_set_placements(obj, &mr, 1);
1095
1096                 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1097                 if (err == 0)
1098                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1099                 if (err == 0)
1100                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1101                 if (err == 0)
1102                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1103
1104                 i915_gem_object_put(obj);
1105                 if (err)
1106                         return err;
1107         }
1108
1109         return 0;
1110 }
1111
1112 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1113                           struct drm_i915_gem_object *obj,
1114                           enum i915_mmap_type type)
1115 {
1116         struct intel_engine_cs *engine;
1117         unsigned long addr;
1118         u32 __user *ux;
1119         u32 bbe;
1120         int err;
1121         u64 offset;
1122
1123         /*
1124          * Verify that the mmap access into the backing store aligns with
1125          * that of the GPU, i.e. that mmap is indeed writing into the same
1126          * page as being read by the GPU.
1127          */
1128
1129         if (!can_mmap(obj, type))
1130                 return 0;
1131
1132         err = wc_set(obj);
1133         if (err == -ENXIO)
1134                 err = gtt_set(obj);
1135         if (err)
1136                 return err;
1137
1138         err = __assign_mmap_offset(obj, type, &offset, NULL);
1139         if (err)
1140                 return err;
1141
1142         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1143         if (IS_ERR_VALUE(addr))
1144                 return addr;
1145
1146         ux = u64_to_user_ptr((u64)addr);
1147         bbe = MI_BATCH_BUFFER_END;
1148         if (put_user(bbe, ux)) {
1149                 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1150                 err = -EFAULT;
1151                 goto out_unmap;
1152         }
1153
1154         if (type == I915_MMAP_TYPE_GTT)
1155                 intel_gt_flush_ggtt_writes(&i915->gt);
1156
1157         for_each_uabi_engine(engine, i915) {
1158                 struct i915_request *rq;
1159                 struct i915_vma *vma;
1160                 struct i915_gem_ww_ctx ww;
1161
1162                 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1163                 if (IS_ERR(vma)) {
1164                         err = PTR_ERR(vma);
1165                         goto out_unmap;
1166                 }
1167
1168                 i915_gem_ww_ctx_init(&ww, false);
1169 retry:
1170                 err = i915_gem_object_lock(obj, &ww);
1171                 if (!err)
1172                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1173                 if (err)
1174                         goto out_ww;
1175
1176                 rq = i915_request_create(engine->kernel_context);
1177                 if (IS_ERR(rq)) {
1178                         err = PTR_ERR(rq);
1179                         goto out_unpin;
1180                 }
1181
1182                 err = i915_request_await_object(rq, vma->obj, false);
1183                 if (err == 0)
1184                         err = i915_vma_move_to_active(vma, rq, 0);
1185
1186                 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1187                 i915_request_get(rq);
1188                 i915_request_add(rq);
1189
1190                 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1191                         struct drm_printer p =
1192                                 drm_info_printer(engine->i915->drm.dev);
1193
1194                         pr_err("%s(%s, %s): Failed to execute batch\n",
1195                                __func__, engine->name, obj->mm.region->name);
1196                         intel_engine_dump(engine, &p,
1197                                           "%s\n", engine->name);
1198
1199                         intel_gt_set_wedged(engine->gt);
1200                         err = -EIO;
1201                 }
1202                 i915_request_put(rq);
1203
1204 out_unpin:
1205                 i915_vma_unpin(vma);
1206 out_ww:
1207                 if (err == -EDEADLK) {
1208                         err = i915_gem_ww_ctx_backoff(&ww);
1209                         if (!err)
1210                                 goto retry;
1211                 }
1212                 i915_gem_ww_ctx_fini(&ww);
1213                 if (err)
1214                         goto out_unmap;
1215         }
1216
1217 out_unmap:
1218         vm_munmap(addr, obj->base.size);
1219         return err;
1220 }
1221
1222 static int igt_mmap_gpu(void *arg)
1223 {
1224         struct drm_i915_private *i915 = arg;
1225         struct intel_memory_region *mr;
1226         enum intel_region_id id;
1227
1228         for_each_memory_region(mr, i915, id) {
1229                 struct drm_i915_gem_object *obj;
1230                 int err;
1231
1232                 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1233                 if (obj == ERR_PTR(-ENODEV))
1234                         continue;
1235
1236                 if (IS_ERR(obj))
1237                         return PTR_ERR(obj);
1238
1239                 object_set_placements(obj, &mr, 1);
1240
1241                 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1242                 if (err == 0)
1243                         err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1244
1245                 i915_gem_object_put(obj);
1246                 if (err)
1247                         return err;
1248         }
1249
1250         return 0;
1251 }
1252
1253 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1254 {
1255         if (!pte_present(*pte) || pte_none(*pte)) {
1256                 pr_err("missing PTE:%lx\n",
1257                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1258                 return -EINVAL;
1259         }
1260
1261         return 0;
1262 }
1263
1264 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1265 {
1266         if (pte_present(*pte) && !pte_none(*pte)) {
1267                 pr_err("present PTE:%lx; expected to be revoked\n",
1268                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1269                 return -EINVAL;
1270         }
1271
1272         return 0;
1273 }
1274
1275 static int check_present(unsigned long addr, unsigned long len)
1276 {
1277         return apply_to_page_range(current->mm, addr, len,
1278                                    check_present_pte, (void *)addr);
1279 }
1280
1281 static int check_absent(unsigned long addr, unsigned long len)
1282 {
1283         return apply_to_page_range(current->mm, addr, len,
1284                                    check_absent_pte, (void *)addr);
1285 }
1286
1287 static int prefault_range(u64 start, u64 len)
1288 {
1289         const char __user *addr, *end;
1290         char __maybe_unused c;
1291         int err;
1292
1293         addr = u64_to_user_ptr(start);
1294         end = addr + len;
1295
1296         for (; addr < end; addr += PAGE_SIZE) {
1297                 err = __get_user(c, addr);
1298                 if (err)
1299                         return err;
1300         }
1301
1302         return __get_user(c, end - 1);
1303 }
1304
1305 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1306                              struct drm_i915_gem_object *obj,
1307                              enum i915_mmap_type type)
1308 {
1309         unsigned long addr;
1310         int err;
1311         u64 offset;
1312
1313         if (!can_mmap(obj, type))
1314                 return 0;
1315
1316         err = __assign_mmap_offset(obj, type, &offset, NULL);
1317         if (err)
1318                 return err;
1319
1320         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1321         if (IS_ERR_VALUE(addr))
1322                 return addr;
1323
1324         err = prefault_range(addr, obj->base.size);
1325         if (err)
1326                 goto out_unmap;
1327
1328         err = check_present(addr, obj->base.size);
1329         if (err) {
1330                 pr_err("%s: was not present\n", obj->mm.region->name);
1331                 goto out_unmap;
1332         }
1333
1334         /*
1335          * After unbinding the object from the GGTT, its address may be reused
1336          * for other objects. Ergo we have to revoke the previous mmap PTE
1337          * access as it no longer points to the same object.
1338          */
1339         err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1340         if (err) {
1341                 pr_err("Failed to unbind object!\n");
1342                 goto out_unmap;
1343         }
1344
1345         if (type != I915_MMAP_TYPE_GTT) {
1346                 i915_gem_object_lock(obj, NULL);
1347                 __i915_gem_object_put_pages(obj);
1348                 i915_gem_object_unlock(obj);
1349                 if (i915_gem_object_has_pages(obj)) {
1350                         pr_err("Failed to put-pages object!\n");
1351                         err = -EINVAL;
1352                         goto out_unmap;
1353                 }
1354         }
1355
1356         if (!obj->ops->mmap_ops) {
1357                 err = check_absent(addr, obj->base.size);
1358                 if (err) {
1359                         pr_err("%s: was not absent\n", obj->mm.region->name);
1360                         goto out_unmap;
1361                 }
1362         } else {
1363                 /* ttm allows access to evicted regions by design */
1364
1365                 err = check_present(addr, obj->base.size);
1366                 if (err) {
1367                         pr_err("%s: was not present\n", obj->mm.region->name);
1368                         goto out_unmap;
1369                 }
1370         }
1371
1372 out_unmap:
1373         vm_munmap(addr, obj->base.size);
1374         return err;
1375 }
1376
1377 static int igt_mmap_revoke(void *arg)
1378 {
1379         struct drm_i915_private *i915 = arg;
1380         struct intel_memory_region *mr;
1381         enum intel_region_id id;
1382
1383         for_each_memory_region(mr, i915, id) {
1384                 struct drm_i915_gem_object *obj;
1385                 int err;
1386
1387                 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
1388                 if (obj == ERR_PTR(-ENODEV))
1389                         continue;
1390
1391                 if (IS_ERR(obj))
1392                         return PTR_ERR(obj);
1393
1394                 object_set_placements(obj, &mr, 1);
1395
1396                 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1397                 if (err == 0)
1398                         err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1399
1400                 i915_gem_object_put(obj);
1401                 if (err)
1402                         return err;
1403         }
1404
1405         return 0;
1406 }
1407
1408 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1409 {
1410         static const struct i915_subtest tests[] = {
1411                 SUBTEST(igt_partial_tiling),
1412                 SUBTEST(igt_smoke_tiling),
1413                 SUBTEST(igt_mmap_offset_exhaustion),
1414                 SUBTEST(igt_mmap),
1415                 SUBTEST(igt_mmap_access),
1416                 SUBTEST(igt_mmap_revoke),
1417                 SUBTEST(igt_mmap_gpu),
1418         };
1419
1420         return i915_subtests(tests, i915);
1421 }