1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
9 #include "../i915_selftest.h"
12 #include "mock_gem_device.h"
13 #include "mock_region.h"
15 #include "gem/i915_gem_context.h"
16 #include "gem/i915_gem_lmem.h"
17 #include "gem/i915_gem_region.h"
18 #include "gem/i915_gem_object_blt.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21 #include "gt/intel_engine_user.h"
22 #include "gt/intel_gt.h"
23 #include "i915_memcpy.h"
24 #include "selftests/igt_flush_test.h"
25 #include "selftests/i915_random.h"
27 static void close_objects(struct intel_memory_region *mem,
28 struct list_head *objects)
30 struct drm_i915_private *i915 = mem->i915;
31 struct drm_i915_gem_object *obj, *on;
33 list_for_each_entry_safe(obj, on, objects, st_link) {
34 if (i915_gem_object_has_pinned_pages(obj))
35 i915_gem_object_unpin_pages(obj);
36 /* No polluting the memory region between tests */
37 __i915_gem_object_put_pages(obj);
38 list_del(&obj->st_link);
39 i915_gem_object_put(obj);
44 i915_gem_drain_freed_objects(i915);
47 static int igt_mock_fill(void *arg)
49 struct intel_memory_region *mem = arg;
50 resource_size_t total = resource_size(&mem->region);
51 resource_size_t page_size;
53 unsigned long max_pages;
54 unsigned long page_num;
58 page_size = mem->mm.chunk_size;
59 max_pages = div64_u64(total, page_size);
62 for_each_prime_number_from(page_num, 1, max_pages) {
63 resource_size_t size = page_num * page_size;
64 struct drm_i915_gem_object *obj;
66 obj = i915_gem_object_create_region(mem, size, 0);
72 err = i915_gem_object_pin_pages(obj);
74 i915_gem_object_put(obj);
78 list_add(&obj->st_link, &objects);
85 if (page_num * page_size <= rem) {
86 pr_err("%s failed, space still left in region\n",
94 close_objects(mem, &objects);
99 static struct drm_i915_gem_object *
100 igt_object_create(struct intel_memory_region *mem,
101 struct list_head *objects,
105 struct drm_i915_gem_object *obj;
108 obj = i915_gem_object_create_region(mem, size, flags);
112 err = i915_gem_object_pin_pages(obj);
116 list_add(&obj->st_link, objects);
120 i915_gem_object_put(obj);
124 static void igt_object_release(struct drm_i915_gem_object *obj)
126 i915_gem_object_unpin_pages(obj);
127 __i915_gem_object_put_pages(obj);
128 list_del(&obj->st_link);
129 i915_gem_object_put(obj);
132 static bool is_contiguous(struct drm_i915_gem_object *obj)
134 struct scatterlist *sg;
135 dma_addr_t addr = -1;
137 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
138 if (addr != -1 && sg_dma_address(sg) != addr)
141 addr = sg_dma_address(sg) + sg_dma_len(sg);
147 static int igt_mock_contiguous(void *arg)
149 struct intel_memory_region *mem = arg;
150 struct drm_i915_gem_object *obj;
151 unsigned long n_objects;
154 I915_RND_STATE(prng);
155 resource_size_t total;
160 total = resource_size(&mem->region);
163 obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
164 I915_BO_ALLOC_CONTIGUOUS);
168 if (!is_contiguous(obj)) {
169 pr_err("%s min object spans disjoint sg entries\n", __func__);
171 goto err_close_objects;
174 igt_object_release(obj);
177 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
181 if (!is_contiguous(obj)) {
182 pr_err("%s max object spans disjoint sg entries\n", __func__);
184 goto err_close_objects;
187 igt_object_release(obj);
189 /* Internal fragmentation should not bleed into the object size */
190 target = i915_prandom_u64_state(&prng);
191 div64_u64_rem(target, total, &target);
192 target = round_up(target, PAGE_SIZE);
193 target = max_t(u64, PAGE_SIZE, target);
195 obj = igt_object_create(mem, &objects, target,
196 I915_BO_ALLOC_CONTIGUOUS);
200 if (obj->base.size != target) {
201 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
202 obj->base.size, target);
204 goto err_close_objects;
207 if (!is_contiguous(obj)) {
208 pr_err("%s object spans disjoint sg entries\n", __func__);
210 goto err_close_objects;
213 igt_object_release(obj);
216 * Try to fragment the address space, such that half of it is free, but
217 * the max contiguous block size is SZ_64K.
221 n_objects = div64_u64(total, target);
223 while (n_objects--) {
224 struct list_head *list;
231 obj = igt_object_create(mem, list, target,
232 I915_BO_ALLOC_CONTIGUOUS);
235 goto err_close_objects;
239 close_objects(mem, &holes);
244 /* Make sure we can still allocate all the fragmented space */
245 obj = igt_object_create(mem, &objects, target, 0);
248 goto err_close_objects;
251 igt_object_release(obj);
254 * Even though we have enough free space, we don't have a big enough
255 * contiguous block. Make sure that holds true.
259 bool should_fail = target > min;
261 obj = igt_object_create(mem, &objects, target,
262 I915_BO_ALLOC_CONTIGUOUS);
263 if (should_fail != IS_ERR(obj)) {
264 pr_err("%s target allocation(%llx) mismatch\n",
267 goto err_close_objects;
271 } while (target >= mem->mm.chunk_size);
274 list_splice_tail(&holes, &objects);
275 close_objects(mem, &objects);
279 static int igt_mock_splintered_region(void *arg)
281 struct intel_memory_region *mem = arg;
282 struct drm_i915_private *i915 = mem->i915;
283 struct drm_i915_gem_object *obj;
284 unsigned int expected_order;
290 * Sanity check we can still allocate everything even if the
291 * mm.max_order != mm.size. i.e our starting address space size is not a
295 size = (SZ_4G - 1) & PAGE_MASK;
296 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
300 if (mem->mm.size != size) {
301 pr_err("%s size mismatch(%llu != %llu)\n",
302 __func__, mem->mm.size, size);
307 expected_order = get_order(rounddown_pow_of_two(size));
308 if (mem->mm.max_order != expected_order) {
309 pr_err("%s order mismatch(%u != %u)\n",
310 __func__, mem->mm.max_order, expected_order);
315 obj = igt_object_create(mem, &objects, size, 0);
321 close_objects(mem, &objects);
324 * While we should be able allocate everything without any flag
325 * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
326 * actually limited to the largest power-of-two for the region size i.e
327 * max_order, due to the inner workings of the buddy allocator. So make
328 * sure that does indeed hold true.
331 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
333 pr_err("%s too large contiguous allocation was not rejected\n",
339 obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
340 I915_BO_ALLOC_CONTIGUOUS);
342 pr_err("%s largest possible contiguous allocation failed\n",
349 close_objects(mem, &objects);
351 intel_memory_region_put(mem);
356 #define SZ_8G BIT_ULL(33)
359 static int igt_mock_max_segment(void *arg)
361 const unsigned int max_segment = i915_sg_segment_size();
362 struct intel_memory_region *mem = arg;
363 struct drm_i915_private *i915 = mem->i915;
364 struct drm_i915_gem_object *obj;
365 struct i915_buddy_block *block;
366 struct scatterlist *sg;
372 * While we may create very large contiguous blocks, we may need
373 * to break those down for consumption elsewhere. In particular,
374 * dma-mapping with scatterlist elements have an implicit limit of
375 * UINT_MAX on each element.
379 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
383 obj = igt_object_create(mem, &objects, size, 0);
390 list_for_each_entry(block, &obj->mm.blocks, link) {
391 if (i915_buddy_block_size(&mem->mm, block) > size)
392 size = i915_buddy_block_size(&mem->mm, block);
394 if (size < max_segment) {
395 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
396 __func__, max_segment, size);
401 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
402 if (sg->length > max_segment) {
403 pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
404 __func__, sg->length, max_segment);
411 close_objects(mem, &objects);
413 intel_memory_region_put(mem);
417 static int igt_gpu_write_dw(struct intel_context *ce,
418 struct i915_vma *vma,
422 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
423 vma->size >> PAGE_SHIFT, value);
426 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
428 unsigned long n = obj->base.size >> PAGE_SHIFT;
432 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
436 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
443 pr_err("base[%u]=%08x, val=%08x\n",
449 ptr += PAGE_SIZE / sizeof(*ptr);
452 i915_gem_object_unpin_map(obj);
456 static int igt_gpu_write(struct i915_gem_context *ctx,
457 struct drm_i915_gem_object *obj)
459 struct i915_gem_engines *engines;
460 struct i915_gem_engines_iter it;
461 struct i915_address_space *vm;
462 struct intel_context *ce;
463 I915_RND_STATE(prng);
464 IGT_TIMEOUT(end_time);
466 struct i915_vma *vma;
471 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
475 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
477 if (!intel_engine_can_store_dword(ce->engine))
483 i915_gem_context_unlock_engines(ctx);
487 order = i915_random_order(count * count, &prng);
491 vma = i915_vma_instance(obj, vm, NULL);
497 err = i915_vma_pin(vma, 0, 0, PIN_USER);
502 engines = i915_gem_context_lock_engines(ctx);
504 u32 rng = prandom_u32_state(&prng);
505 u32 dword = offset_in_page(rng) / 4;
507 ce = engines->engines[order[i] % engines->num_engines];
508 i = (i + 1) % (count * count);
509 if (!ce || !intel_engine_can_store_dword(ce->engine))
512 err = igt_gpu_write_dw(ce, vma, dword, rng);
516 err = igt_cpu_check(obj, dword, rng);
519 } while (!__igt_timeout(end_time, NULL));
520 i915_gem_context_unlock_engines(ctx);
531 static int igt_lmem_create(void *arg)
533 struct drm_i915_private *i915 = arg;
534 struct drm_i915_gem_object *obj;
537 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
541 err = i915_gem_object_pin_pages(obj);
545 i915_gem_object_unpin_pages(obj);
547 i915_gem_object_put(obj);
552 static int igt_lmem_write_gpu(void *arg)
554 struct drm_i915_private *i915 = arg;
555 struct drm_i915_gem_object *obj;
556 struct i915_gem_context *ctx;
558 I915_RND_STATE(prng);
562 file = mock_file(i915);
564 return PTR_ERR(file);
566 ctx = live_context(i915, file);
572 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
574 obj = i915_gem_object_create_lmem(i915, sz, 0);
580 err = i915_gem_object_pin_pages(obj);
584 err = igt_gpu_write(ctx, obj);
586 pr_err("igt_gpu_write failed(%d)\n", err);
588 i915_gem_object_unpin_pages(obj);
590 i915_gem_object_put(obj);
596 static struct intel_engine_cs *
597 random_engine_class(struct drm_i915_private *i915,
599 struct rnd_state *prng)
601 struct intel_engine_cs *engine;
605 for (engine = intel_engine_lookup_user(i915, class, 0);
606 engine && engine->uabi_class == class;
607 engine = rb_entry_safe(rb_next(&engine->uabi_node),
608 typeof(*engine), uabi_node))
611 count = i915_prandom_u32_max_state(count, prng);
612 return intel_engine_lookup_user(i915, class, count);
615 static int igt_lmem_write_cpu(void *arg)
617 struct drm_i915_private *i915 = arg;
618 struct drm_i915_gem_object *obj;
619 I915_RND_STATE(prng);
620 IGT_TIMEOUT(end_time);
622 0, /* rng placeholder */
627 PAGE_SIZE - sizeof(u32),
628 PAGE_SIZE - sizeof(u64),
631 struct intel_engine_cs *engine;
639 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
643 pr_info("%s: using %s\n", __func__, engine->name);
645 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
646 sz = max_t(u32, 2 * PAGE_SIZE, sz);
648 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
652 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
654 err = PTR_ERR(vaddr);
658 /* Put the pages into a known state -- from the gpu for added fun */
659 intel_engine_pm_get(engine);
660 err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
661 intel_engine_pm_put(engine);
665 i915_gem_object_lock(obj, NULL);
666 err = i915_gem_object_set_to_wc_domain(obj, true);
667 i915_gem_object_unlock(obj);
671 count = ARRAY_SIZE(bytes);
672 order = i915_random_order(count * count, &prng);
678 /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
679 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
680 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
690 size = bytes[order[i] % count];
691 i = (i + 1) % (count * count);
693 align = bytes[order[i] % count];
694 i = (i + 1) % (count * count);
696 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
698 offset = igt_random_offset(&prng, 0, obj->base.size,
701 val = prandom_u32_state(&prng);
702 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
706 * Sample random dw -- don't waste precious time reading every
709 dword = igt_random_offset(&prng, offset,
711 sizeof(u32), sizeof(u32));
712 dword /= sizeof(u32);
713 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
714 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
715 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
716 size, align, offset);
720 } while (!__igt_timeout(end_time, NULL));
723 i915_gem_object_unpin_map(obj);
725 i915_gem_object_put(obj);
730 static const char *repr_type(u32 type)
742 static struct drm_i915_gem_object *
743 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
746 struct drm_i915_gem_object *obj;
749 obj = i915_gem_object_create_region(mr, size, 0);
751 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
752 return ERR_PTR(-ENODEV);
756 addr = i915_gem_object_pin_map(obj, type);
758 i915_gem_object_put(obj);
759 if (PTR_ERR(addr) == -ENXIO)
760 return ERR_PTR(-ENODEV);
768 static int wrap_ktime_compare(const void *A, const void *B)
770 const ktime_t *a = A, *b = B;
772 return ktime_compare(*a, *b);
775 static void igt_memcpy_long(void *dst, const void *src, size_t size)
777 unsigned long *tmp = dst;
778 const unsigned long *s = src;
780 size = size / sizeof(unsigned long);
785 static inline void igt_memcpy(void *dst, const void *src, size_t size)
787 memcpy(dst, src, size);
790 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
792 i915_memcpy_from_wc(dst, src, size);
795 static int _perf_memcpy(struct intel_memory_region *src_mr,
796 struct intel_memory_region *dst_mr,
797 u64 size, u32 src_type, u32 dst_type)
799 struct drm_i915_private *i915 = src_mr->i915;
802 void (*copy)(void *dst, const void *src, size_t size);
816 !i915_has_memcpy_from_wc(),
819 struct drm_i915_gem_object *src, *dst;
820 void *src_addr, *dst_addr;
824 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
830 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
836 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
843 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
848 tests[i].copy(dst_addr, src_addr, size);
851 t[pass] = ktime_sub(t1, t0);
854 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
856 /* ignore the impossible to protect our sanity */
857 pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
859 src_mr->name, repr_type(src_type),
860 dst_mr->name, repr_type(dst_type),
861 tests[i].name, size >> 10,
866 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
868 src_mr->name, repr_type(src_type),
869 dst_mr->name, repr_type(dst_type),
870 tests[i].name, size >> 10,
871 div64_u64(mul_u32_u32(4 * size,
873 t[1] + 2 * t[2] + t[3]) >> 20);
878 i915_gem_object_unpin_map(dst);
879 i915_gem_object_put(dst);
881 i915_gem_object_unpin_map(src);
882 i915_gem_object_put(src);
884 i915_gem_drain_freed_objects(i915);
892 static int perf_memcpy(void *arg)
894 struct drm_i915_private *i915 = arg;
895 static const u32 types[] = {
899 static const u32 sizes[] = {
904 struct intel_memory_region *src_mr, *dst_mr;
909 for_each_memory_region(src_mr, i915, src_id) {
910 for_each_memory_region(dst_mr, i915, dst_id) {
911 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
912 for (j = 0; j < ARRAY_SIZE(types); ++j) {
913 for (k = 0; k < ARRAY_SIZE(types); ++k) {
914 ret = _perf_memcpy(src_mr,
930 int intel_memory_region_mock_selftests(void)
932 static const struct i915_subtest tests[] = {
933 SUBTEST(igt_mock_fill),
934 SUBTEST(igt_mock_contiguous),
935 SUBTEST(igt_mock_splintered_region),
936 SUBTEST(igt_mock_max_segment),
938 struct intel_memory_region *mem;
939 struct drm_i915_private *i915;
942 i915 = mock_gem_device();
946 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
948 pr_err("failed to create memory region\n");
953 err = i915_subtests(tests, mem);
955 intel_memory_region_put(mem);
957 mock_destroy_device(i915);
961 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
963 static const struct i915_subtest tests[] = {
964 SUBTEST(igt_lmem_create),
965 SUBTEST(igt_lmem_write_cpu),
966 SUBTEST(igt_lmem_write_gpu),
969 if (!HAS_LMEM(i915)) {
970 pr_info("device lacks LMEM support, skipping\n");
974 if (intel_gt_is_wedged(&i915->gt))
977 return i915_live_subtests(tests, i915);
980 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
982 static const struct i915_subtest tests[] = {
983 SUBTEST(perf_memcpy),
986 if (intel_gt_is_wedged(&i915->gt))
989 return i915_live_subtests(tests, i915);