Merge tag 'ceph-for-5.12-rc1' of git://github.com/ceph/ceph-client
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / selftests / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
8
9 #include "../i915_selftest.h"
10
11 #include "mock_drm.h"
12 #include "mock_gem_device.h"
13 #include "mock_region.h"
14
15 #include "gem/i915_gem_context.h"
16 #include "gem/i915_gem_lmem.h"
17 #include "gem/i915_gem_region.h"
18 #include "gem/i915_gem_object_blt.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21 #include "gt/intel_engine_user.h"
22 #include "gt/intel_gt.h"
23 #include "i915_memcpy.h"
24 #include "selftests/igt_flush_test.h"
25 #include "selftests/i915_random.h"
26
27 static void close_objects(struct intel_memory_region *mem,
28                           struct list_head *objects)
29 {
30         struct drm_i915_private *i915 = mem->i915;
31         struct drm_i915_gem_object *obj, *on;
32
33         list_for_each_entry_safe(obj, on, objects, st_link) {
34                 if (i915_gem_object_has_pinned_pages(obj))
35                         i915_gem_object_unpin_pages(obj);
36                 /* No polluting the memory region between tests */
37                 __i915_gem_object_put_pages(obj);
38                 list_del(&obj->st_link);
39                 i915_gem_object_put(obj);
40         }
41
42         cond_resched();
43
44         i915_gem_drain_freed_objects(i915);
45 }
46
47 static int igt_mock_fill(void *arg)
48 {
49         struct intel_memory_region *mem = arg;
50         resource_size_t total = resource_size(&mem->region);
51         resource_size_t page_size;
52         resource_size_t rem;
53         unsigned long max_pages;
54         unsigned long page_num;
55         LIST_HEAD(objects);
56         int err = 0;
57
58         page_size = mem->mm.chunk_size;
59         max_pages = div64_u64(total, page_size);
60         rem = total;
61
62         for_each_prime_number_from(page_num, 1, max_pages) {
63                 resource_size_t size = page_num * page_size;
64                 struct drm_i915_gem_object *obj;
65
66                 obj = i915_gem_object_create_region(mem, size, 0);
67                 if (IS_ERR(obj)) {
68                         err = PTR_ERR(obj);
69                         break;
70                 }
71
72                 err = i915_gem_object_pin_pages(obj);
73                 if (err) {
74                         i915_gem_object_put(obj);
75                         break;
76                 }
77
78                 list_add(&obj->st_link, &objects);
79                 rem -= size;
80         }
81
82         if (err == -ENOMEM)
83                 err = 0;
84         if (err == -ENXIO) {
85                 if (page_num * page_size <= rem) {
86                         pr_err("%s failed, space still left in region\n",
87                                __func__);
88                         err = -EINVAL;
89                 } else {
90                         err = 0;
91                 }
92         }
93
94         close_objects(mem, &objects);
95
96         return err;
97 }
98
99 static struct drm_i915_gem_object *
100 igt_object_create(struct intel_memory_region *mem,
101                   struct list_head *objects,
102                   u64 size,
103                   unsigned int flags)
104 {
105         struct drm_i915_gem_object *obj;
106         int err;
107
108         obj = i915_gem_object_create_region(mem, size, flags);
109         if (IS_ERR(obj))
110                 return obj;
111
112         err = i915_gem_object_pin_pages(obj);
113         if (err)
114                 goto put;
115
116         list_add(&obj->st_link, objects);
117         return obj;
118
119 put:
120         i915_gem_object_put(obj);
121         return ERR_PTR(err);
122 }
123
124 static void igt_object_release(struct drm_i915_gem_object *obj)
125 {
126         i915_gem_object_unpin_pages(obj);
127         __i915_gem_object_put_pages(obj);
128         list_del(&obj->st_link);
129         i915_gem_object_put(obj);
130 }
131
132 static bool is_contiguous(struct drm_i915_gem_object *obj)
133 {
134         struct scatterlist *sg;
135         dma_addr_t addr = -1;
136
137         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
138                 if (addr != -1 && sg_dma_address(sg) != addr)
139                         return false;
140
141                 addr = sg_dma_address(sg) + sg_dma_len(sg);
142         }
143
144         return true;
145 }
146
147 static int igt_mock_contiguous(void *arg)
148 {
149         struct intel_memory_region *mem = arg;
150         struct drm_i915_gem_object *obj;
151         unsigned long n_objects;
152         LIST_HEAD(objects);
153         LIST_HEAD(holes);
154         I915_RND_STATE(prng);
155         resource_size_t total;
156         resource_size_t min;
157         u64 target;
158         int err = 0;
159
160         total = resource_size(&mem->region);
161
162         /* Min size */
163         obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
164                                 I915_BO_ALLOC_CONTIGUOUS);
165         if (IS_ERR(obj))
166                 return PTR_ERR(obj);
167
168         if (!is_contiguous(obj)) {
169                 pr_err("%s min object spans disjoint sg entries\n", __func__);
170                 err = -EINVAL;
171                 goto err_close_objects;
172         }
173
174         igt_object_release(obj);
175
176         /* Max size */
177         obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
178         if (IS_ERR(obj))
179                 return PTR_ERR(obj);
180
181         if (!is_contiguous(obj)) {
182                 pr_err("%s max object spans disjoint sg entries\n", __func__);
183                 err = -EINVAL;
184                 goto err_close_objects;
185         }
186
187         igt_object_release(obj);
188
189         /* Internal fragmentation should not bleed into the object size */
190         target = i915_prandom_u64_state(&prng);
191         div64_u64_rem(target, total, &target);
192         target = round_up(target, PAGE_SIZE);
193         target = max_t(u64, PAGE_SIZE, target);
194
195         obj = igt_object_create(mem, &objects, target,
196                                 I915_BO_ALLOC_CONTIGUOUS);
197         if (IS_ERR(obj))
198                 return PTR_ERR(obj);
199
200         if (obj->base.size != target) {
201                 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
202                        obj->base.size, target);
203                 err = -EINVAL;
204                 goto err_close_objects;
205         }
206
207         if (!is_contiguous(obj)) {
208                 pr_err("%s object spans disjoint sg entries\n", __func__);
209                 err = -EINVAL;
210                 goto err_close_objects;
211         }
212
213         igt_object_release(obj);
214
215         /*
216          * Try to fragment the address space, such that half of it is free, but
217          * the max contiguous block size is SZ_64K.
218          */
219
220         target = SZ_64K;
221         n_objects = div64_u64(total, target);
222
223         while (n_objects--) {
224                 struct list_head *list;
225
226                 if (n_objects % 2)
227                         list = &holes;
228                 else
229                         list = &objects;
230
231                 obj = igt_object_create(mem, list, target,
232                                         I915_BO_ALLOC_CONTIGUOUS);
233                 if (IS_ERR(obj)) {
234                         err = PTR_ERR(obj);
235                         goto err_close_objects;
236                 }
237         }
238
239         close_objects(mem, &holes);
240
241         min = target;
242         target = total >> 1;
243
244         /* Make sure we can still allocate all the fragmented space */
245         obj = igt_object_create(mem, &objects, target, 0);
246         if (IS_ERR(obj)) {
247                 err = PTR_ERR(obj);
248                 goto err_close_objects;
249         }
250
251         igt_object_release(obj);
252
253         /*
254          * Even though we have enough free space, we don't have a big enough
255          * contiguous block. Make sure that holds true.
256          */
257
258         do {
259                 bool should_fail = target > min;
260
261                 obj = igt_object_create(mem, &objects, target,
262                                         I915_BO_ALLOC_CONTIGUOUS);
263                 if (should_fail != IS_ERR(obj)) {
264                         pr_err("%s target allocation(%llx) mismatch\n",
265                                __func__, target);
266                         err = -EINVAL;
267                         goto err_close_objects;
268                 }
269
270                 target >>= 1;
271         } while (target >= mem->mm.chunk_size);
272
273 err_close_objects:
274         list_splice_tail(&holes, &objects);
275         close_objects(mem, &objects);
276         return err;
277 }
278
279 static int igt_mock_splintered_region(void *arg)
280 {
281         struct intel_memory_region *mem = arg;
282         struct drm_i915_private *i915 = mem->i915;
283         struct drm_i915_gem_object *obj;
284         unsigned int expected_order;
285         LIST_HEAD(objects);
286         u64 size;
287         int err = 0;
288
289         /*
290          * Sanity check we can still allocate everything even if the
291          * mm.max_order != mm.size. i.e our starting address space size is not a
292          * power-of-two.
293          */
294
295         size = (SZ_4G - 1) & PAGE_MASK;
296         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
297         if (IS_ERR(mem))
298                 return PTR_ERR(mem);
299
300         if (mem->mm.size != size) {
301                 pr_err("%s size mismatch(%llu != %llu)\n",
302                        __func__, mem->mm.size, size);
303                 err = -EINVAL;
304                 goto out_put;
305         }
306
307         expected_order = get_order(rounddown_pow_of_two(size));
308         if (mem->mm.max_order != expected_order) {
309                 pr_err("%s order mismatch(%u != %u)\n",
310                        __func__, mem->mm.max_order, expected_order);
311                 err = -EINVAL;
312                 goto out_put;
313         }
314
315         obj = igt_object_create(mem, &objects, size, 0);
316         if (IS_ERR(obj)) {
317                 err = PTR_ERR(obj);
318                 goto out_close;
319         }
320
321         close_objects(mem, &objects);
322
323         /*
324          * While we should be able allocate everything without any flag
325          * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
326          * actually limited to the largest power-of-two for the region size i.e
327          * max_order, due to the inner workings of the buddy allocator. So make
328          * sure that does indeed hold true.
329          */
330
331         obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
332         if (!IS_ERR(obj)) {
333                 pr_err("%s too large contiguous allocation was not rejected\n",
334                        __func__);
335                 err = -EINVAL;
336                 goto out_close;
337         }
338
339         obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
340                                 I915_BO_ALLOC_CONTIGUOUS);
341         if (IS_ERR(obj)) {
342                 pr_err("%s largest possible contiguous allocation failed\n",
343                        __func__);
344                 err = PTR_ERR(obj);
345                 goto out_close;
346         }
347
348 out_close:
349         close_objects(mem, &objects);
350 out_put:
351         intel_memory_region_put(mem);
352         return err;
353 }
354
355 #ifndef SZ_8G
356 #define SZ_8G BIT_ULL(33)
357 #endif
358
359 static int igt_mock_max_segment(void *arg)
360 {
361         const unsigned int max_segment = i915_sg_segment_size();
362         struct intel_memory_region *mem = arg;
363         struct drm_i915_private *i915 = mem->i915;
364         struct drm_i915_gem_object *obj;
365         struct i915_buddy_block *block;
366         struct scatterlist *sg;
367         LIST_HEAD(objects);
368         u64 size;
369         int err = 0;
370
371         /*
372          * While we may create very large contiguous blocks, we may need
373          * to break those down for consumption elsewhere. In particular,
374          * dma-mapping with scatterlist elements have an implicit limit of
375          * UINT_MAX on each element.
376          */
377
378         size = SZ_8G;
379         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
380         if (IS_ERR(mem))
381                 return PTR_ERR(mem);
382
383         obj = igt_object_create(mem, &objects, size, 0);
384         if (IS_ERR(obj)) {
385                 err = PTR_ERR(obj);
386                 goto out_put;
387         }
388
389         size = 0;
390         list_for_each_entry(block, &obj->mm.blocks, link) {
391                 if (i915_buddy_block_size(&mem->mm, block) > size)
392                         size = i915_buddy_block_size(&mem->mm, block);
393         }
394         if (size < max_segment) {
395                 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
396                        __func__, max_segment, size);
397                 err = -EINVAL;
398                 goto out_close;
399         }
400
401         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
402                 if (sg->length > max_segment) {
403                         pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
404                                __func__, sg->length, max_segment);
405                         err = -EINVAL;
406                         goto out_close;
407                 }
408         }
409
410 out_close:
411         close_objects(mem, &objects);
412 out_put:
413         intel_memory_region_put(mem);
414         return err;
415 }
416
417 static int igt_gpu_write_dw(struct intel_context *ce,
418                             struct i915_vma *vma,
419                             u32 dword,
420                             u32 value)
421 {
422         return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
423                                vma->size >> PAGE_SHIFT, value);
424 }
425
426 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
427 {
428         unsigned long n = obj->base.size >> PAGE_SHIFT;
429         u32 *ptr;
430         int err;
431
432         err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
433         if (err)
434                 return err;
435
436         ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
437         if (IS_ERR(ptr))
438                 return PTR_ERR(ptr);
439
440         ptr += dword;
441         while (n--) {
442                 if (*ptr != val) {
443                         pr_err("base[%u]=%08x, val=%08x\n",
444                                dword, *ptr, val);
445                         err = -EINVAL;
446                         break;
447                 }
448
449                 ptr += PAGE_SIZE / sizeof(*ptr);
450         }
451
452         i915_gem_object_unpin_map(obj);
453         return err;
454 }
455
456 static int igt_gpu_write(struct i915_gem_context *ctx,
457                          struct drm_i915_gem_object *obj)
458 {
459         struct i915_gem_engines *engines;
460         struct i915_gem_engines_iter it;
461         struct i915_address_space *vm;
462         struct intel_context *ce;
463         I915_RND_STATE(prng);
464         IGT_TIMEOUT(end_time);
465         unsigned int count;
466         struct i915_vma *vma;
467         int *order;
468         int i, n;
469         int err = 0;
470
471         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
472
473         n = 0;
474         count = 0;
475         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
476                 count++;
477                 if (!intel_engine_can_store_dword(ce->engine))
478                         continue;
479
480                 vm = ce->vm;
481                 n++;
482         }
483         i915_gem_context_unlock_engines(ctx);
484         if (!n)
485                 return 0;
486
487         order = i915_random_order(count * count, &prng);
488         if (!order)
489                 return -ENOMEM;
490
491         vma = i915_vma_instance(obj, vm, NULL);
492         if (IS_ERR(vma)) {
493                 err = PTR_ERR(vma);
494                 goto out_free;
495         }
496
497         err = i915_vma_pin(vma, 0, 0, PIN_USER);
498         if (err)
499                 goto out_free;
500
501         i = 0;
502         engines = i915_gem_context_lock_engines(ctx);
503         do {
504                 u32 rng = prandom_u32_state(&prng);
505                 u32 dword = offset_in_page(rng) / 4;
506
507                 ce = engines->engines[order[i] % engines->num_engines];
508                 i = (i + 1) % (count * count);
509                 if (!ce || !intel_engine_can_store_dword(ce->engine))
510                         continue;
511
512                 err = igt_gpu_write_dw(ce, vma, dword, rng);
513                 if (err)
514                         break;
515
516                 err = igt_cpu_check(obj, dword, rng);
517                 if (err)
518                         break;
519         } while (!__igt_timeout(end_time, NULL));
520         i915_gem_context_unlock_engines(ctx);
521
522 out_free:
523         kfree(order);
524
525         if (err == -ENOMEM)
526                 err = 0;
527
528         return err;
529 }
530
531 static int igt_lmem_create(void *arg)
532 {
533         struct drm_i915_private *i915 = arg;
534         struct drm_i915_gem_object *obj;
535         int err = 0;
536
537         obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
538         if (IS_ERR(obj))
539                 return PTR_ERR(obj);
540
541         err = i915_gem_object_pin_pages(obj);
542         if (err)
543                 goto out_put;
544
545         i915_gem_object_unpin_pages(obj);
546 out_put:
547         i915_gem_object_put(obj);
548
549         return err;
550 }
551
552 static int igt_lmem_write_gpu(void *arg)
553 {
554         struct drm_i915_private *i915 = arg;
555         struct drm_i915_gem_object *obj;
556         struct i915_gem_context *ctx;
557         struct file *file;
558         I915_RND_STATE(prng);
559         u32 sz;
560         int err;
561
562         file = mock_file(i915);
563         if (IS_ERR(file))
564                 return PTR_ERR(file);
565
566         ctx = live_context(i915, file);
567         if (IS_ERR(ctx)) {
568                 err = PTR_ERR(ctx);
569                 goto out_file;
570         }
571
572         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
573
574         obj = i915_gem_object_create_lmem(i915, sz, 0);
575         if (IS_ERR(obj)) {
576                 err = PTR_ERR(obj);
577                 goto out_file;
578         }
579
580         err = i915_gem_object_pin_pages(obj);
581         if (err)
582                 goto out_put;
583
584         err = igt_gpu_write(ctx, obj);
585         if (err)
586                 pr_err("igt_gpu_write failed(%d)\n", err);
587
588         i915_gem_object_unpin_pages(obj);
589 out_put:
590         i915_gem_object_put(obj);
591 out_file:
592         fput(file);
593         return err;
594 }
595
596 static struct intel_engine_cs *
597 random_engine_class(struct drm_i915_private *i915,
598                     unsigned int class,
599                     struct rnd_state *prng)
600 {
601         struct intel_engine_cs *engine;
602         unsigned int count;
603
604         count = 0;
605         for (engine = intel_engine_lookup_user(i915, class, 0);
606              engine && engine->uabi_class == class;
607              engine = rb_entry_safe(rb_next(&engine->uabi_node),
608                                     typeof(*engine), uabi_node))
609                 count++;
610
611         count = i915_prandom_u32_max_state(count, prng);
612         return intel_engine_lookup_user(i915, class, count);
613 }
614
615 static int igt_lmem_write_cpu(void *arg)
616 {
617         struct drm_i915_private *i915 = arg;
618         struct drm_i915_gem_object *obj;
619         I915_RND_STATE(prng);
620         IGT_TIMEOUT(end_time);
621         u32 bytes[] = {
622                 0, /* rng placeholder */
623                 sizeof(u32),
624                 sizeof(u64),
625                 64, /* cl */
626                 PAGE_SIZE,
627                 PAGE_SIZE - sizeof(u32),
628                 PAGE_SIZE - sizeof(u64),
629                 PAGE_SIZE - 64,
630         };
631         struct intel_engine_cs *engine;
632         u32 *vaddr;
633         u32 sz;
634         u32 i;
635         int *order;
636         int count;
637         int err;
638
639         engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
640         if (!engine)
641                 return 0;
642
643         pr_info("%s: using %s\n", __func__, engine->name);
644
645         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
646         sz = max_t(u32, 2 * PAGE_SIZE, sz);
647
648         obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
649         if (IS_ERR(obj))
650                 return PTR_ERR(obj);
651
652         vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
653         if (IS_ERR(vaddr)) {
654                 err = PTR_ERR(vaddr);
655                 goto out_put;
656         }
657
658         /* Put the pages into a known state -- from the gpu for added fun */
659         intel_engine_pm_get(engine);
660         err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
661         intel_engine_pm_put(engine);
662         if (err)
663                 goto out_unpin;
664
665         i915_gem_object_lock(obj, NULL);
666         err = i915_gem_object_set_to_wc_domain(obj, true);
667         i915_gem_object_unlock(obj);
668         if (err)
669                 goto out_unpin;
670
671         count = ARRAY_SIZE(bytes);
672         order = i915_random_order(count * count, &prng);
673         if (!order) {
674                 err = -ENOMEM;
675                 goto out_unpin;
676         }
677
678         /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
679         bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
680         GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
681
682         i = 0;
683         do {
684                 u32 offset;
685                 u32 align;
686                 u32 dword;
687                 u32 size;
688                 u32 val;
689
690                 size = bytes[order[i] % count];
691                 i = (i + 1) % (count * count);
692
693                 align = bytes[order[i] % count];
694                 i = (i + 1) % (count * count);
695
696                 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
697
698                 offset = igt_random_offset(&prng, 0, obj->base.size,
699                                            size, align);
700
701                 val = prandom_u32_state(&prng);
702                 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
703                          size / sizeof(u32));
704
705                 /*
706                  * Sample random dw -- don't waste precious time reading every
707                  * single dw.
708                  */
709                 dword = igt_random_offset(&prng, offset,
710                                           offset + size,
711                                           sizeof(u32), sizeof(u32));
712                 dword /= sizeof(u32);
713                 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
714                         pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
715                                __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
716                                size, align, offset);
717                         err = -EINVAL;
718                         break;
719                 }
720         } while (!__igt_timeout(end_time, NULL));
721
722 out_unpin:
723         i915_gem_object_unpin_map(obj);
724 out_put:
725         i915_gem_object_put(obj);
726
727         return err;
728 }
729
730 static const char *repr_type(u32 type)
731 {
732         switch (type) {
733         case I915_MAP_WB:
734                 return "WB";
735         case I915_MAP_WC:
736                 return "WC";
737         }
738
739         return "";
740 }
741
742 static struct drm_i915_gem_object *
743 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
744                           void **out_addr)
745 {
746         struct drm_i915_gem_object *obj;
747         void *addr;
748
749         obj = i915_gem_object_create_region(mr, size, 0);
750         if (IS_ERR(obj)) {
751                 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
752                         return ERR_PTR(-ENODEV);
753                 return obj;
754         }
755
756         addr = i915_gem_object_pin_map(obj, type);
757         if (IS_ERR(addr)) {
758                 i915_gem_object_put(obj);
759                 if (PTR_ERR(addr) == -ENXIO)
760                         return ERR_PTR(-ENODEV);
761                 return addr;
762         }
763
764         *out_addr = addr;
765         return obj;
766 }
767
768 static int wrap_ktime_compare(const void *A, const void *B)
769 {
770         const ktime_t *a = A, *b = B;
771
772         return ktime_compare(*a, *b);
773 }
774
775 static void igt_memcpy_long(void *dst, const void *src, size_t size)
776 {
777         unsigned long *tmp = dst;
778         const unsigned long *s = src;
779
780         size = size / sizeof(unsigned long);
781         while (size--)
782                 *tmp++ = *s++;
783 }
784
785 static inline void igt_memcpy(void *dst, const void *src, size_t size)
786 {
787         memcpy(dst, src, size);
788 }
789
790 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
791 {
792         i915_memcpy_from_wc(dst, src, size);
793 }
794
795 static int _perf_memcpy(struct intel_memory_region *src_mr,
796                         struct intel_memory_region *dst_mr,
797                         u64 size, u32 src_type, u32 dst_type)
798 {
799         struct drm_i915_private *i915 = src_mr->i915;
800         const struct {
801                 const char *name;
802                 void (*copy)(void *dst, const void *src, size_t size);
803                 bool skip;
804         } tests[] = {
805                 {
806                         "memcpy",
807                         igt_memcpy,
808                 },
809                 {
810                         "memcpy_long",
811                         igt_memcpy_long,
812                 },
813                 {
814                         "memcpy_from_wc",
815                         igt_memcpy_from_wc,
816                         !i915_has_memcpy_from_wc(),
817                 },
818         };
819         struct drm_i915_gem_object *src, *dst;
820         void *src_addr, *dst_addr;
821         int ret = 0;
822         int i;
823
824         src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
825         if (IS_ERR(src)) {
826                 ret = PTR_ERR(src);
827                 goto out;
828         }
829
830         dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
831         if (IS_ERR(dst)) {
832                 ret = PTR_ERR(dst);
833                 goto out_unpin_src;
834         }
835
836         for (i = 0; i < ARRAY_SIZE(tests); ++i) {
837                 ktime_t t[5];
838                 int pass;
839
840                 if (tests[i].skip)
841                         continue;
842
843                 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
844                         ktime_t t0, t1;
845
846                         t0 = ktime_get();
847
848                         tests[i].copy(dst_addr, src_addr, size);
849
850                         t1 = ktime_get();
851                         t[pass] = ktime_sub(t1, t0);
852                 }
853
854                 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
855                 if (t[0] <= 0) {
856                         /* ignore the impossible to protect our sanity */
857                         pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
858                                  __func__,
859                                  src_mr->name, repr_type(src_type),
860                                  dst_mr->name, repr_type(dst_type),
861                                  tests[i].name, size >> 10,
862                                  t[0], t[4]);
863                         continue;
864                 }
865
866                 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
867                         __func__,
868                         src_mr->name, repr_type(src_type),
869                         dst_mr->name, repr_type(dst_type),
870                         tests[i].name, size >> 10,
871                         div64_u64(mul_u32_u32(4 * size,
872                                               1000 * 1000 * 1000),
873                                   t[1] + 2 * t[2] + t[3]) >> 20);
874
875                 cond_resched();
876         }
877
878         i915_gem_object_unpin_map(dst);
879         i915_gem_object_put(dst);
880 out_unpin_src:
881         i915_gem_object_unpin_map(src);
882         i915_gem_object_put(src);
883
884         i915_gem_drain_freed_objects(i915);
885 out:
886         if (ret == -ENODEV)
887                 ret = 0;
888
889         return ret;
890 }
891
892 static int perf_memcpy(void *arg)
893 {
894         struct drm_i915_private *i915 = arg;
895         static const u32 types[] = {
896                 I915_MAP_WB,
897                 I915_MAP_WC,
898         };
899         static const u32 sizes[] = {
900                 SZ_4K,
901                 SZ_64K,
902                 SZ_4M,
903         };
904         struct intel_memory_region *src_mr, *dst_mr;
905         int src_id, dst_id;
906         int i, j, k;
907         int ret;
908
909         for_each_memory_region(src_mr, i915, src_id) {
910                 for_each_memory_region(dst_mr, i915, dst_id) {
911                         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
912                                 for (j = 0; j < ARRAY_SIZE(types); ++j) {
913                                         for (k = 0; k < ARRAY_SIZE(types); ++k) {
914                                                 ret = _perf_memcpy(src_mr,
915                                                                    dst_mr,
916                                                                    sizes[i],
917                                                                    types[j],
918                                                                    types[k]);
919                                                 if (ret)
920                                                         return ret;
921                                         }
922                                 }
923                         }
924                 }
925         }
926
927         return 0;
928 }
929
930 int intel_memory_region_mock_selftests(void)
931 {
932         static const struct i915_subtest tests[] = {
933                 SUBTEST(igt_mock_fill),
934                 SUBTEST(igt_mock_contiguous),
935                 SUBTEST(igt_mock_splintered_region),
936                 SUBTEST(igt_mock_max_segment),
937         };
938         struct intel_memory_region *mem;
939         struct drm_i915_private *i915;
940         int err;
941
942         i915 = mock_gem_device();
943         if (!i915)
944                 return -ENOMEM;
945
946         mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
947         if (IS_ERR(mem)) {
948                 pr_err("failed to create memory region\n");
949                 err = PTR_ERR(mem);
950                 goto out_unref;
951         }
952
953         err = i915_subtests(tests, mem);
954
955         intel_memory_region_put(mem);
956 out_unref:
957         mock_destroy_device(i915);
958         return err;
959 }
960
961 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
962 {
963         static const struct i915_subtest tests[] = {
964                 SUBTEST(igt_lmem_create),
965                 SUBTEST(igt_lmem_write_cpu),
966                 SUBTEST(igt_lmem_write_gpu),
967         };
968
969         if (!HAS_LMEM(i915)) {
970                 pr_info("device lacks LMEM support, skipping\n");
971                 return 0;
972         }
973
974         if (intel_gt_is_wedged(&i915->gt))
975                 return 0;
976
977         return i915_live_subtests(tests, i915);
978 }
979
980 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
981 {
982         static const struct i915_subtest tests[] = {
983                 SUBTEST(perf_memcpy),
984         };
985
986         if (intel_gt_is_wedged(&i915->gt))
987                 return 0;
988
989         return i915_live_subtests(tests, i915);
990 }