a5fc0bf3feb9a74f21a48775089f3cffa3637190
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
8
9 #include "../i915_selftest.h"
10
11 #include "mock_drm.h"
12 #include "mock_gem_device.h"
13 #include "mock_region.h"
14
15 #include "gem/i915_gem_context.h"
16 #include "gem/i915_gem_lmem.h"
17 #include "gem/i915_gem_region.h"
18 #include "gem/i915_gem_object_blt.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21 #include "gt/intel_engine_user.h"
22 #include "gt/intel_gt.h"
23 #include "i915_memcpy.h"
24 #include "selftests/igt_flush_test.h"
25 #include "selftests/i915_random.h"
26
27 static void close_objects(struct intel_memory_region *mem,
28                           struct list_head *objects)
29 {
30         struct drm_i915_private *i915 = mem->i915;
31         struct drm_i915_gem_object *obj, *on;
32
33         list_for_each_entry_safe(obj, on, objects, st_link) {
34                 i915_gem_object_lock(obj, NULL);
35                 if (i915_gem_object_has_pinned_pages(obj))
36                         i915_gem_object_unpin_pages(obj);
37                 /* No polluting the memory region between tests */
38                 __i915_gem_object_put_pages(obj);
39                 i915_gem_object_unlock(obj);
40                 list_del(&obj->st_link);
41                 i915_gem_object_put(obj);
42         }
43
44         cond_resched();
45
46         i915_gem_drain_freed_objects(i915);
47 }
48
49 static int igt_mock_fill(void *arg)
50 {
51         struct intel_memory_region *mem = arg;
52         resource_size_t total = resource_size(&mem->region);
53         resource_size_t page_size;
54         resource_size_t rem;
55         unsigned long max_pages;
56         unsigned long page_num;
57         LIST_HEAD(objects);
58         int err = 0;
59
60         page_size = mem->mm.chunk_size;
61         max_pages = div64_u64(total, page_size);
62         rem = total;
63
64         for_each_prime_number_from(page_num, 1, max_pages) {
65                 resource_size_t size = page_num * page_size;
66                 struct drm_i915_gem_object *obj;
67
68                 obj = i915_gem_object_create_region(mem, size, 0);
69                 if (IS_ERR(obj)) {
70                         err = PTR_ERR(obj);
71                         break;
72                 }
73
74                 err = i915_gem_object_pin_pages_unlocked(obj);
75                 if (err) {
76                         i915_gem_object_put(obj);
77                         break;
78                 }
79
80                 list_add(&obj->st_link, &objects);
81                 rem -= size;
82         }
83
84         if (err == -ENOMEM)
85                 err = 0;
86         if (err == -ENXIO) {
87                 if (page_num * page_size <= rem) {
88                         pr_err("%s failed, space still left in region\n",
89                                __func__);
90                         err = -EINVAL;
91                 } else {
92                         err = 0;
93                 }
94         }
95
96         close_objects(mem, &objects);
97
98         return err;
99 }
100
101 static struct drm_i915_gem_object *
102 igt_object_create(struct intel_memory_region *mem,
103                   struct list_head *objects,
104                   u64 size,
105                   unsigned int flags)
106 {
107         struct drm_i915_gem_object *obj;
108         int err;
109
110         obj = i915_gem_object_create_region(mem, size, flags);
111         if (IS_ERR(obj))
112                 return obj;
113
114         err = i915_gem_object_pin_pages_unlocked(obj);
115         if (err)
116                 goto put;
117
118         list_add(&obj->st_link, objects);
119         return obj;
120
121 put:
122         i915_gem_object_put(obj);
123         return ERR_PTR(err);
124 }
125
126 static void igt_object_release(struct drm_i915_gem_object *obj)
127 {
128         i915_gem_object_lock(obj, NULL);
129         i915_gem_object_unpin_pages(obj);
130         __i915_gem_object_put_pages(obj);
131         i915_gem_object_unlock(obj);
132         list_del(&obj->st_link);
133         i915_gem_object_put(obj);
134 }
135
136 static bool is_contiguous(struct drm_i915_gem_object *obj)
137 {
138         struct scatterlist *sg;
139         dma_addr_t addr = -1;
140
141         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
142                 if (addr != -1 && sg_dma_address(sg) != addr)
143                         return false;
144
145                 addr = sg_dma_address(sg) + sg_dma_len(sg);
146         }
147
148         return true;
149 }
150
151 static int igt_mock_reserve(void *arg)
152 {
153         struct intel_memory_region *mem = arg;
154         resource_size_t avail = resource_size(&mem->region);
155         struct drm_i915_gem_object *obj;
156         const u32 chunk_size = SZ_32M;
157         u32 i, offset, count, *order;
158         u64 allocated, cur_avail;
159         I915_RND_STATE(prng);
160         LIST_HEAD(objects);
161         int err = 0;
162
163         if (!list_empty(&mem->reserved)) {
164                 pr_err("%s region reserved list is not empty\n", __func__);
165                 return -EINVAL;
166         }
167
168         count = avail / chunk_size;
169         order = i915_random_order(count, &prng);
170         if (!order)
171                 return 0;
172
173         /* Reserve a bunch of ranges within the region */
174         for (i = 0; i < count; ++i) {
175                 u64 start = order[i] * chunk_size;
176                 u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
177
178                 /* Allow for some really big holes */
179                 if (!size)
180                         continue;
181
182                 size = round_up(size, PAGE_SIZE);
183                 offset = igt_random_offset(&prng, 0, chunk_size, size,
184                                            PAGE_SIZE);
185
186                 err = intel_memory_region_reserve(mem, start + offset, size);
187                 if (err) {
188                         pr_err("%s failed to reserve range", __func__);
189                         goto out_close;
190                 }
191
192                 /* XXX: maybe sanity check the block range here? */
193                 avail -= size;
194         }
195
196         /* Try to see if we can allocate from the remaining space */
197         allocated = 0;
198         cur_avail = avail;
199         do {
200                 u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
201
202                 size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
203                 obj = igt_object_create(mem, &objects, size, 0);
204                 if (IS_ERR(obj)) {
205                         if (PTR_ERR(obj) == -ENXIO)
206                                 break;
207
208                         err = PTR_ERR(obj);
209                         goto out_close;
210                 }
211                 cur_avail -= size;
212                 allocated += size;
213         } while (1);
214
215         if (allocated != avail) {
216                 pr_err("%s mismatch between allocation and free space", __func__);
217                 err = -EINVAL;
218         }
219
220 out_close:
221         kfree(order);
222         close_objects(mem, &objects);
223         i915_buddy_free_list(&mem->mm, &mem->reserved);
224         return err;
225 }
226
227 static int igt_mock_contiguous(void *arg)
228 {
229         struct intel_memory_region *mem = arg;
230         struct drm_i915_gem_object *obj;
231         unsigned long n_objects;
232         LIST_HEAD(objects);
233         LIST_HEAD(holes);
234         I915_RND_STATE(prng);
235         resource_size_t total;
236         resource_size_t min;
237         u64 target;
238         int err = 0;
239
240         total = resource_size(&mem->region);
241
242         /* Min size */
243         obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
244                                 I915_BO_ALLOC_CONTIGUOUS);
245         if (IS_ERR(obj))
246                 return PTR_ERR(obj);
247
248         if (!is_contiguous(obj)) {
249                 pr_err("%s min object spans disjoint sg entries\n", __func__);
250                 err = -EINVAL;
251                 goto err_close_objects;
252         }
253
254         igt_object_release(obj);
255
256         /* Max size */
257         obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
258         if (IS_ERR(obj))
259                 return PTR_ERR(obj);
260
261         if (!is_contiguous(obj)) {
262                 pr_err("%s max object spans disjoint sg entries\n", __func__);
263                 err = -EINVAL;
264                 goto err_close_objects;
265         }
266
267         igt_object_release(obj);
268
269         /* Internal fragmentation should not bleed into the object size */
270         target = i915_prandom_u64_state(&prng);
271         div64_u64_rem(target, total, &target);
272         target = round_up(target, PAGE_SIZE);
273         target = max_t(u64, PAGE_SIZE, target);
274
275         obj = igt_object_create(mem, &objects, target,
276                                 I915_BO_ALLOC_CONTIGUOUS);
277         if (IS_ERR(obj))
278                 return PTR_ERR(obj);
279
280         if (obj->base.size != target) {
281                 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
282                        obj->base.size, target);
283                 err = -EINVAL;
284                 goto err_close_objects;
285         }
286
287         if (!is_contiguous(obj)) {
288                 pr_err("%s object spans disjoint sg entries\n", __func__);
289                 err = -EINVAL;
290                 goto err_close_objects;
291         }
292
293         igt_object_release(obj);
294
295         /*
296          * Try to fragment the address space, such that half of it is free, but
297          * the max contiguous block size is SZ_64K.
298          */
299
300         target = SZ_64K;
301         n_objects = div64_u64(total, target);
302
303         while (n_objects--) {
304                 struct list_head *list;
305
306                 if (n_objects % 2)
307                         list = &holes;
308                 else
309                         list = &objects;
310
311                 obj = igt_object_create(mem, list, target,
312                                         I915_BO_ALLOC_CONTIGUOUS);
313                 if (IS_ERR(obj)) {
314                         err = PTR_ERR(obj);
315                         goto err_close_objects;
316                 }
317         }
318
319         close_objects(mem, &holes);
320
321         min = target;
322         target = total >> 1;
323
324         /* Make sure we can still allocate all the fragmented space */
325         obj = igt_object_create(mem, &objects, target, 0);
326         if (IS_ERR(obj)) {
327                 err = PTR_ERR(obj);
328                 goto err_close_objects;
329         }
330
331         igt_object_release(obj);
332
333         /*
334          * Even though we have enough free space, we don't have a big enough
335          * contiguous block. Make sure that holds true.
336          */
337
338         do {
339                 bool should_fail = target > min;
340
341                 obj = igt_object_create(mem, &objects, target,
342                                         I915_BO_ALLOC_CONTIGUOUS);
343                 if (should_fail != IS_ERR(obj)) {
344                         pr_err("%s target allocation(%llx) mismatch\n",
345                                __func__, target);
346                         err = -EINVAL;
347                         goto err_close_objects;
348                 }
349
350                 target >>= 1;
351         } while (target >= mem->mm.chunk_size);
352
353 err_close_objects:
354         list_splice_tail(&holes, &objects);
355         close_objects(mem, &objects);
356         return err;
357 }
358
359 static int igt_mock_splintered_region(void *arg)
360 {
361         struct intel_memory_region *mem = arg;
362         struct drm_i915_private *i915 = mem->i915;
363         struct drm_i915_gem_object *obj;
364         unsigned int expected_order;
365         LIST_HEAD(objects);
366         u64 size;
367         int err = 0;
368
369         /*
370          * Sanity check we can still allocate everything even if the
371          * mm.max_order != mm.size. i.e our starting address space size is not a
372          * power-of-two.
373          */
374
375         size = (SZ_4G - 1) & PAGE_MASK;
376         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
377         if (IS_ERR(mem))
378                 return PTR_ERR(mem);
379
380         if (mem->mm.size != size) {
381                 pr_err("%s size mismatch(%llu != %llu)\n",
382                        __func__, mem->mm.size, size);
383                 err = -EINVAL;
384                 goto out_put;
385         }
386
387         expected_order = get_order(rounddown_pow_of_two(size));
388         if (mem->mm.max_order != expected_order) {
389                 pr_err("%s order mismatch(%u != %u)\n",
390                        __func__, mem->mm.max_order, expected_order);
391                 err = -EINVAL;
392                 goto out_put;
393         }
394
395         obj = igt_object_create(mem, &objects, size, 0);
396         if (IS_ERR(obj)) {
397                 err = PTR_ERR(obj);
398                 goto out_close;
399         }
400
401         close_objects(mem, &objects);
402
403         /*
404          * While we should be able allocate everything without any flag
405          * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
406          * actually limited to the largest power-of-two for the region size i.e
407          * max_order, due to the inner workings of the buddy allocator. So make
408          * sure that does indeed hold true.
409          */
410
411         obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
412         if (!IS_ERR(obj)) {
413                 pr_err("%s too large contiguous allocation was not rejected\n",
414                        __func__);
415                 err = -EINVAL;
416                 goto out_close;
417         }
418
419         obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
420                                 I915_BO_ALLOC_CONTIGUOUS);
421         if (IS_ERR(obj)) {
422                 pr_err("%s largest possible contiguous allocation failed\n",
423                        __func__);
424                 err = PTR_ERR(obj);
425                 goto out_close;
426         }
427
428 out_close:
429         close_objects(mem, &objects);
430 out_put:
431         intel_memory_region_put(mem);
432         return err;
433 }
434
435 #ifndef SZ_8G
436 #define SZ_8G BIT_ULL(33)
437 #endif
438
439 static int igt_mock_max_segment(void *arg)
440 {
441         const unsigned int max_segment = i915_sg_segment_size();
442         struct intel_memory_region *mem = arg;
443         struct drm_i915_private *i915 = mem->i915;
444         struct drm_i915_gem_object *obj;
445         struct i915_buddy_block *block;
446         struct scatterlist *sg;
447         LIST_HEAD(objects);
448         u64 size;
449         int err = 0;
450
451         /*
452          * While we may create very large contiguous blocks, we may need
453          * to break those down for consumption elsewhere. In particular,
454          * dma-mapping with scatterlist elements have an implicit limit of
455          * UINT_MAX on each element.
456          */
457
458         size = SZ_8G;
459         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
460         if (IS_ERR(mem))
461                 return PTR_ERR(mem);
462
463         obj = igt_object_create(mem, &objects, size, 0);
464         if (IS_ERR(obj)) {
465                 err = PTR_ERR(obj);
466                 goto out_put;
467         }
468
469         size = 0;
470         list_for_each_entry(block, &obj->mm.blocks, link) {
471                 if (i915_buddy_block_size(&mem->mm, block) > size)
472                         size = i915_buddy_block_size(&mem->mm, block);
473         }
474         if (size < max_segment) {
475                 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
476                        __func__, max_segment, size);
477                 err = -EINVAL;
478                 goto out_close;
479         }
480
481         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
482                 if (sg->length > max_segment) {
483                         pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
484                                __func__, sg->length, max_segment);
485                         err = -EINVAL;
486                         goto out_close;
487                 }
488         }
489
490 out_close:
491         close_objects(mem, &objects);
492 out_put:
493         intel_memory_region_put(mem);
494         return err;
495 }
496
497 static int igt_gpu_write_dw(struct intel_context *ce,
498                             struct i915_vma *vma,
499                             u32 dword,
500                             u32 value)
501 {
502         return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
503                                vma->size >> PAGE_SHIFT, value);
504 }
505
506 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
507 {
508         unsigned long n = obj->base.size >> PAGE_SHIFT;
509         u32 *ptr;
510         int err;
511
512         err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
513         if (err)
514                 return err;
515
516         ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
517         if (IS_ERR(ptr))
518                 return PTR_ERR(ptr);
519
520         ptr += dword;
521         while (n--) {
522                 if (*ptr != val) {
523                         pr_err("base[%u]=%08x, val=%08x\n",
524                                dword, *ptr, val);
525                         err = -EINVAL;
526                         break;
527                 }
528
529                 ptr += PAGE_SIZE / sizeof(*ptr);
530         }
531
532         i915_gem_object_unpin_map(obj);
533         return err;
534 }
535
536 static int igt_gpu_write(struct i915_gem_context *ctx,
537                          struct drm_i915_gem_object *obj)
538 {
539         struct i915_gem_engines *engines;
540         struct i915_gem_engines_iter it;
541         struct i915_address_space *vm;
542         struct intel_context *ce;
543         I915_RND_STATE(prng);
544         IGT_TIMEOUT(end_time);
545         unsigned int count;
546         struct i915_vma *vma;
547         int *order;
548         int i, n;
549         int err = 0;
550
551         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
552
553         n = 0;
554         count = 0;
555         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
556                 count++;
557                 if (!intel_engine_can_store_dword(ce->engine))
558                         continue;
559
560                 vm = ce->vm;
561                 n++;
562         }
563         i915_gem_context_unlock_engines(ctx);
564         if (!n)
565                 return 0;
566
567         order = i915_random_order(count * count, &prng);
568         if (!order)
569                 return -ENOMEM;
570
571         vma = i915_vma_instance(obj, vm, NULL);
572         if (IS_ERR(vma)) {
573                 err = PTR_ERR(vma);
574                 goto out_free;
575         }
576
577         err = i915_vma_pin(vma, 0, 0, PIN_USER);
578         if (err)
579                 goto out_free;
580
581         i = 0;
582         engines = i915_gem_context_lock_engines(ctx);
583         do {
584                 u32 rng = prandom_u32_state(&prng);
585                 u32 dword = offset_in_page(rng) / 4;
586
587                 ce = engines->engines[order[i] % engines->num_engines];
588                 i = (i + 1) % (count * count);
589                 if (!ce || !intel_engine_can_store_dword(ce->engine))
590                         continue;
591
592                 err = igt_gpu_write_dw(ce, vma, dword, rng);
593                 if (err)
594                         break;
595
596                 err = igt_cpu_check(obj, dword, rng);
597                 if (err)
598                         break;
599         } while (!__igt_timeout(end_time, NULL));
600         i915_gem_context_unlock_engines(ctx);
601
602 out_free:
603         kfree(order);
604
605         if (err == -ENOMEM)
606                 err = 0;
607
608         return err;
609 }
610
611 static int igt_lmem_create(void *arg)
612 {
613         struct drm_i915_private *i915 = arg;
614         struct drm_i915_gem_object *obj;
615         int err = 0;
616
617         obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
618         if (IS_ERR(obj))
619                 return PTR_ERR(obj);
620
621         err = i915_gem_object_pin_pages_unlocked(obj);
622         if (err)
623                 goto out_put;
624
625         i915_gem_object_unpin_pages(obj);
626 out_put:
627         i915_gem_object_put(obj);
628
629         return err;
630 }
631
632 static int igt_lmem_write_gpu(void *arg)
633 {
634         struct drm_i915_private *i915 = arg;
635         struct drm_i915_gem_object *obj;
636         struct i915_gem_context *ctx;
637         struct file *file;
638         I915_RND_STATE(prng);
639         u32 sz;
640         int err;
641
642         file = mock_file(i915);
643         if (IS_ERR(file))
644                 return PTR_ERR(file);
645
646         ctx = live_context(i915, file);
647         if (IS_ERR(ctx)) {
648                 err = PTR_ERR(ctx);
649                 goto out_file;
650         }
651
652         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
653
654         obj = i915_gem_object_create_lmem(i915, sz, 0);
655         if (IS_ERR(obj)) {
656                 err = PTR_ERR(obj);
657                 goto out_file;
658         }
659
660         err = i915_gem_object_pin_pages_unlocked(obj);
661         if (err)
662                 goto out_put;
663
664         err = igt_gpu_write(ctx, obj);
665         if (err)
666                 pr_err("igt_gpu_write failed(%d)\n", err);
667
668         i915_gem_object_unpin_pages(obj);
669 out_put:
670         i915_gem_object_put(obj);
671 out_file:
672         fput(file);
673         return err;
674 }
675
676 static struct intel_engine_cs *
677 random_engine_class(struct drm_i915_private *i915,
678                     unsigned int class,
679                     struct rnd_state *prng)
680 {
681         struct intel_engine_cs *engine;
682         unsigned int count;
683
684         count = 0;
685         for (engine = intel_engine_lookup_user(i915, class, 0);
686              engine && engine->uabi_class == class;
687              engine = rb_entry_safe(rb_next(&engine->uabi_node),
688                                     typeof(*engine), uabi_node))
689                 count++;
690
691         count = i915_prandom_u32_max_state(count, prng);
692         return intel_engine_lookup_user(i915, class, count);
693 }
694
695 static int igt_lmem_write_cpu(void *arg)
696 {
697         struct drm_i915_private *i915 = arg;
698         struct drm_i915_gem_object *obj;
699         I915_RND_STATE(prng);
700         IGT_TIMEOUT(end_time);
701         u32 bytes[] = {
702                 0, /* rng placeholder */
703                 sizeof(u32),
704                 sizeof(u64),
705                 64, /* cl */
706                 PAGE_SIZE,
707                 PAGE_SIZE - sizeof(u32),
708                 PAGE_SIZE - sizeof(u64),
709                 PAGE_SIZE - 64,
710         };
711         struct intel_engine_cs *engine;
712         u32 *vaddr;
713         u32 sz;
714         u32 i;
715         int *order;
716         int count;
717         int err;
718
719         engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
720         if (!engine)
721                 return 0;
722
723         pr_info("%s: using %s\n", __func__, engine->name);
724
725         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
726         sz = max_t(u32, 2 * PAGE_SIZE, sz);
727
728         obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
729         if (IS_ERR(obj))
730                 return PTR_ERR(obj);
731
732         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
733         if (IS_ERR(vaddr)) {
734                 err = PTR_ERR(vaddr);
735                 goto out_put;
736         }
737
738         /* Put the pages into a known state -- from the gpu for added fun */
739         intel_engine_pm_get(engine);
740         err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
741         intel_engine_pm_put(engine);
742         if (err)
743                 goto out_unpin;
744
745         i915_gem_object_lock(obj, NULL);
746         err = i915_gem_object_set_to_wc_domain(obj, true);
747         i915_gem_object_unlock(obj);
748         if (err)
749                 goto out_unpin;
750
751         count = ARRAY_SIZE(bytes);
752         order = i915_random_order(count * count, &prng);
753         if (!order) {
754                 err = -ENOMEM;
755                 goto out_unpin;
756         }
757
758         /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
759         bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
760         GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
761
762         i = 0;
763         do {
764                 u32 offset;
765                 u32 align;
766                 u32 dword;
767                 u32 size;
768                 u32 val;
769
770                 size = bytes[order[i] % count];
771                 i = (i + 1) % (count * count);
772
773                 align = bytes[order[i] % count];
774                 i = (i + 1) % (count * count);
775
776                 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
777
778                 offset = igt_random_offset(&prng, 0, obj->base.size,
779                                            size, align);
780
781                 val = prandom_u32_state(&prng);
782                 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
783                          size / sizeof(u32));
784
785                 /*
786                  * Sample random dw -- don't waste precious time reading every
787                  * single dw.
788                  */
789                 dword = igt_random_offset(&prng, offset,
790                                           offset + size,
791                                           sizeof(u32), sizeof(u32));
792                 dword /= sizeof(u32);
793                 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
794                         pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
795                                __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
796                                size, align, offset);
797                         err = -EINVAL;
798                         break;
799                 }
800         } while (!__igt_timeout(end_time, NULL));
801
802 out_unpin:
803         i915_gem_object_unpin_map(obj);
804 out_put:
805         i915_gem_object_put(obj);
806
807         return err;
808 }
809
810 static const char *repr_type(u32 type)
811 {
812         switch (type) {
813         case I915_MAP_WB:
814                 return "WB";
815         case I915_MAP_WC:
816                 return "WC";
817         }
818
819         return "";
820 }
821
822 static struct drm_i915_gem_object *
823 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
824                           void **out_addr)
825 {
826         struct drm_i915_gem_object *obj;
827         void *addr;
828
829         obj = i915_gem_object_create_region(mr, size, 0);
830         if (IS_ERR(obj)) {
831                 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
832                         return ERR_PTR(-ENODEV);
833                 return obj;
834         }
835
836         addr = i915_gem_object_pin_map_unlocked(obj, type);
837         if (IS_ERR(addr)) {
838                 i915_gem_object_put(obj);
839                 if (PTR_ERR(addr) == -ENXIO)
840                         return ERR_PTR(-ENODEV);
841                 return addr;
842         }
843
844         *out_addr = addr;
845         return obj;
846 }
847
848 static int wrap_ktime_compare(const void *A, const void *B)
849 {
850         const ktime_t *a = A, *b = B;
851
852         return ktime_compare(*a, *b);
853 }
854
855 static void igt_memcpy_long(void *dst, const void *src, size_t size)
856 {
857         unsigned long *tmp = dst;
858         const unsigned long *s = src;
859
860         size = size / sizeof(unsigned long);
861         while (size--)
862                 *tmp++ = *s++;
863 }
864
865 static inline void igt_memcpy(void *dst, const void *src, size_t size)
866 {
867         memcpy(dst, src, size);
868 }
869
870 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
871 {
872         i915_memcpy_from_wc(dst, src, size);
873 }
874
875 static int _perf_memcpy(struct intel_memory_region *src_mr,
876                         struct intel_memory_region *dst_mr,
877                         u64 size, u32 src_type, u32 dst_type)
878 {
879         struct drm_i915_private *i915 = src_mr->i915;
880         const struct {
881                 const char *name;
882                 void (*copy)(void *dst, const void *src, size_t size);
883                 bool skip;
884         } tests[] = {
885                 {
886                         "memcpy",
887                         igt_memcpy,
888                 },
889                 {
890                         "memcpy_long",
891                         igt_memcpy_long,
892                 },
893                 {
894                         "memcpy_from_wc",
895                         igt_memcpy_from_wc,
896                         !i915_has_memcpy_from_wc(),
897                 },
898         };
899         struct drm_i915_gem_object *src, *dst;
900         void *src_addr, *dst_addr;
901         int ret = 0;
902         int i;
903
904         src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
905         if (IS_ERR(src)) {
906                 ret = PTR_ERR(src);
907                 goto out;
908         }
909
910         dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
911         if (IS_ERR(dst)) {
912                 ret = PTR_ERR(dst);
913                 goto out_unpin_src;
914         }
915
916         for (i = 0; i < ARRAY_SIZE(tests); ++i) {
917                 ktime_t t[5];
918                 int pass;
919
920                 if (tests[i].skip)
921                         continue;
922
923                 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
924                         ktime_t t0, t1;
925
926                         t0 = ktime_get();
927
928                         tests[i].copy(dst_addr, src_addr, size);
929
930                         t1 = ktime_get();
931                         t[pass] = ktime_sub(t1, t0);
932                 }
933
934                 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
935                 if (t[0] <= 0) {
936                         /* ignore the impossible to protect our sanity */
937                         pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
938                                  __func__,
939                                  src_mr->name, repr_type(src_type),
940                                  dst_mr->name, repr_type(dst_type),
941                                  tests[i].name, size >> 10,
942                                  t[0], t[4]);
943                         continue;
944                 }
945
946                 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
947                         __func__,
948                         src_mr->name, repr_type(src_type),
949                         dst_mr->name, repr_type(dst_type),
950                         tests[i].name, size >> 10,
951                         div64_u64(mul_u32_u32(4 * size,
952                                               1000 * 1000 * 1000),
953                                   t[1] + 2 * t[2] + t[3]) >> 20);
954
955                 cond_resched();
956         }
957
958         i915_gem_object_unpin_map(dst);
959         i915_gem_object_put(dst);
960 out_unpin_src:
961         i915_gem_object_unpin_map(src);
962         i915_gem_object_put(src);
963
964         i915_gem_drain_freed_objects(i915);
965 out:
966         if (ret == -ENODEV)
967                 ret = 0;
968
969         return ret;
970 }
971
972 static int perf_memcpy(void *arg)
973 {
974         struct drm_i915_private *i915 = arg;
975         static const u32 types[] = {
976                 I915_MAP_WB,
977                 I915_MAP_WC,
978         };
979         static const u32 sizes[] = {
980                 SZ_4K,
981                 SZ_64K,
982                 SZ_4M,
983         };
984         struct intel_memory_region *src_mr, *dst_mr;
985         int src_id, dst_id;
986         int i, j, k;
987         int ret;
988
989         for_each_memory_region(src_mr, i915, src_id) {
990                 for_each_memory_region(dst_mr, i915, dst_id) {
991                         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
992                                 for (j = 0; j < ARRAY_SIZE(types); ++j) {
993                                         for (k = 0; k < ARRAY_SIZE(types); ++k) {
994                                                 ret = _perf_memcpy(src_mr,
995                                                                    dst_mr,
996                                                                    sizes[i],
997                                                                    types[j],
998                                                                    types[k]);
999                                                 if (ret)
1000                                                         return ret;
1001                                         }
1002                                 }
1003                         }
1004                 }
1005         }
1006
1007         return 0;
1008 }
1009
1010 int intel_memory_region_mock_selftests(void)
1011 {
1012         static const struct i915_subtest tests[] = {
1013                 SUBTEST(igt_mock_reserve),
1014                 SUBTEST(igt_mock_fill),
1015                 SUBTEST(igt_mock_contiguous),
1016                 SUBTEST(igt_mock_splintered_region),
1017                 SUBTEST(igt_mock_max_segment),
1018         };
1019         struct intel_memory_region *mem;
1020         struct drm_i915_private *i915;
1021         int err;
1022
1023         i915 = mock_gem_device();
1024         if (!i915)
1025                 return -ENOMEM;
1026
1027         mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
1028         if (IS_ERR(mem)) {
1029                 pr_err("failed to create memory region\n");
1030                 err = PTR_ERR(mem);
1031                 goto out_unref;
1032         }
1033
1034         err = i915_subtests(tests, mem);
1035
1036         intel_memory_region_put(mem);
1037 out_unref:
1038         mock_destroy_device(i915);
1039         return err;
1040 }
1041
1042 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1043 {
1044         static const struct i915_subtest tests[] = {
1045                 SUBTEST(igt_lmem_create),
1046                 SUBTEST(igt_lmem_write_cpu),
1047                 SUBTEST(igt_lmem_write_gpu),
1048         };
1049
1050         if (!HAS_LMEM(i915)) {
1051                 pr_info("device lacks LMEM support, skipping\n");
1052                 return 0;
1053         }
1054
1055         if (intel_gt_is_wedged(&i915->gt))
1056                 return 0;
1057
1058         return i915_live_subtests(tests, i915);
1059 }
1060
1061 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1062 {
1063         static const struct i915_subtest tests[] = {
1064                 SUBTEST(perf_memcpy),
1065         };
1066
1067         if (intel_gt_is_wedged(&i915->gt))
1068                 return 0;
1069
1070         return i915_live_subtests(tests, i915);
1071 }