1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "gt/intel_gt.h"
8 #include "i915_selftest.h"
10 #include "selftests/igt_flush_test.h"
11 #include "selftests/mock_drm.h"
12 #include "huge_gem_object.h"
13 #include "mock_context.h"
15 static int igt_fill_blt(void *arg)
17 struct drm_i915_private *i915 = arg;
18 struct intel_context *ce = i915->engine[BCS0]->kernel_context;
19 struct drm_i915_gem_object *obj;
20 struct rnd_state prng;
25 prandom_seed_state(&prng, i915_selftest.random_seed);
28 * XXX: needs some threads to scale all these tests, also maybe throw
29 * in submission from higher priority context to see if we are
30 * preempted for very large objects...
34 const u32 max_block_size = S16_MAX * PAGE_SIZE;
35 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
36 u32 phys_sz = sz % (max_block_size + 1);
37 u32 val = prandom_u32_state(&prng);
40 sz = round_up(sz, PAGE_SIZE);
41 phys_sz = round_up(phys_sz, PAGE_SIZE);
43 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
46 obj = huge_gem_object(i915, phys_sz, sz);
52 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
59 * Make sure the potentially async clflush does its job, if
62 memset32(vaddr, val ^ 0xdeadbeaf,
63 huge_gem_object_phys_size(obj) / sizeof(u32));
65 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
66 obj->cache_dirty = true;
68 err = i915_gem_object_fill_blt(obj, ce, val);
72 i915_gem_object_lock(obj);
73 err = i915_gem_object_set_to_cpu_domain(obj, false);
74 i915_gem_object_unlock(obj);
78 for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
79 if (vaddr[i] != val) {
80 pr_err("vaddr[%u]=%x, expected=%x\n", i,
87 i915_gem_object_unpin_map(obj);
88 i915_gem_object_put(obj);
89 } while (!time_after(jiffies, end));
94 i915_gem_object_unpin_map(obj);
96 i915_gem_object_put(obj);
104 static int igt_copy_blt(void *arg)
106 struct drm_i915_private *i915 = arg;
107 struct intel_context *ce = i915->engine[BCS0]->kernel_context;
108 struct drm_i915_gem_object *src, *dst;
109 struct rnd_state prng;
114 prandom_seed_state(&prng, i915_selftest.random_seed);
117 const u32 max_block_size = S16_MAX * PAGE_SIZE;
118 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
119 u32 phys_sz = sz % (max_block_size + 1);
120 u32 val = prandom_u32_state(&prng);
123 sz = round_up(sz, PAGE_SIZE);
124 phys_sz = round_up(phys_sz, PAGE_SIZE);
126 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
129 src = huge_gem_object(i915, phys_sz, sz);
135 vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
137 err = PTR_ERR(vaddr);
142 huge_gem_object_phys_size(src) / sizeof(u32));
144 i915_gem_object_unpin_map(src);
146 if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
147 src->cache_dirty = true;
149 dst = huge_gem_object(i915, phys_sz, sz);
155 vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
157 err = PTR_ERR(vaddr);
161 memset32(vaddr, val ^ 0xdeadbeaf,
162 huge_gem_object_phys_size(dst) / sizeof(u32));
164 if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
165 dst->cache_dirty = true;
167 err = i915_gem_object_copy_blt(src, dst, ce);
171 i915_gem_object_lock(dst);
172 err = i915_gem_object_set_to_cpu_domain(dst, false);
173 i915_gem_object_unlock(dst);
177 for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
178 if (vaddr[i] != val) {
179 pr_err("vaddr[%u]=%x, expected=%x\n", i,
186 i915_gem_object_unpin_map(dst);
188 i915_gem_object_put(src);
189 i915_gem_object_put(dst);
190 } while (!time_after(jiffies, end));
195 i915_gem_object_unpin_map(dst);
197 i915_gem_object_put(dst);
199 i915_gem_object_put(src);
207 int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
209 static const struct i915_subtest tests[] = {
210 SUBTEST(igt_fill_blt),
211 SUBTEST(igt_copy_blt),
214 if (intel_gt_is_wedged(&i915->gt))
217 if (!HAS_ENGINE(i915, BCS0))
220 return i915_live_subtests(tests, i915);