2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_ring.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
19 struct drm_i915_gem_object *obj;
20 struct intel_engine_cs *engine;
23 static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
25 unsigned int needs_clflush;
31 i915_gem_object_lock(ctx->obj, NULL);
32 err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
36 page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
37 map = kmap_atomic(page);
38 cpu = map + offset_in_page(offset);
40 if (needs_clflush & CLFLUSH_BEFORE)
41 drm_clflush_virt_range(cpu, sizeof(*cpu));
45 if (needs_clflush & CLFLUSH_AFTER)
46 drm_clflush_virt_range(cpu, sizeof(*cpu));
49 i915_gem_object_finish_access(ctx->obj);
52 i915_gem_object_unlock(ctx->obj);
56 static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
58 unsigned int needs_clflush;
64 i915_gem_object_lock(ctx->obj, NULL);
65 err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
69 page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
70 map = kmap_atomic(page);
71 cpu = map + offset_in_page(offset);
73 if (needs_clflush & CLFLUSH_BEFORE)
74 drm_clflush_virt_range(cpu, sizeof(*cpu));
79 i915_gem_object_finish_access(ctx->obj);
82 i915_gem_object_unlock(ctx->obj);
86 static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
92 i915_gem_object_lock(ctx->obj, NULL);
93 err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
94 i915_gem_object_unlock(ctx->obj);
98 vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
102 intel_gt_pm_get(vma->vm->gt);
104 map = i915_vma_pin_iomap(vma);
111 iowrite32(v, &map[offset / sizeof(*map)]);
112 i915_vma_unpin_iomap(vma);
115 intel_gt_pm_put(vma->vm->gt);
119 static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
121 struct i915_vma *vma;
125 i915_gem_object_lock(ctx->obj, NULL);
126 err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
127 i915_gem_object_unlock(ctx->obj);
131 vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
135 intel_gt_pm_get(vma->vm->gt);
137 map = i915_vma_pin_iomap(vma);
144 *v = ioread32(&map[offset / sizeof(*map)]);
145 i915_vma_unpin_iomap(vma);
148 intel_gt_pm_put(vma->vm->gt);
152 static int wc_set(struct context *ctx, unsigned long offset, u32 v)
157 i915_gem_object_lock(ctx->obj, NULL);
158 err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
159 i915_gem_object_unlock(ctx->obj);
163 map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
167 map[offset / sizeof(*map)] = v;
169 __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
170 i915_gem_object_unpin_map(ctx->obj);
175 static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
180 i915_gem_object_lock(ctx->obj, NULL);
181 err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
182 i915_gem_object_unlock(ctx->obj);
186 map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
190 *v = map[offset / sizeof(*map)];
191 i915_gem_object_unpin_map(ctx->obj);
196 static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
198 struct i915_request *rq;
199 struct i915_vma *vma;
203 vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
207 i915_gem_object_lock(ctx->obj, NULL);
208 err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
212 rq = intel_engine_create_kernel_request(ctx->engine);
218 cs = intel_ring_begin(rq, 4);
224 if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
225 *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
226 *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
227 *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
229 } else if (GRAPHICS_VER(ctx->engine->i915) >= 4) {
230 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
232 *cs++ = i915_ggtt_offset(vma) + offset;
235 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
236 *cs++ = i915_ggtt_offset(vma) + offset;
240 intel_ring_advance(rq, cs);
242 err = i915_request_await_object(rq, vma->obj, true);
244 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
247 i915_request_add(rq);
251 i915_gem_object_unlock(ctx->obj);
256 static bool always_valid(struct context *ctx)
261 static bool needs_fence_registers(struct context *ctx)
263 struct intel_gt *gt = ctx->engine->gt;
265 if (intel_gt_is_wedged(gt))
268 return gt->ggtt->num_fences;
271 static bool needs_mi_store_dword(struct context *ctx)
273 if (intel_gt_is_wedged(ctx->engine->gt))
276 return intel_engine_can_store_dword(ctx->engine);
279 static const struct igt_coherency_mode {
281 int (*set)(struct context *ctx, unsigned long offset, u32 v);
282 int (*get)(struct context *ctx, unsigned long offset, u32 *v);
283 bool (*valid)(struct context *ctx);
284 } igt_coherency_mode[] = {
285 { "cpu", cpu_set, cpu_get, always_valid },
286 { "gtt", gtt_set, gtt_get, needs_fence_registers },
287 { "wc", wc_set, wc_get, always_valid },
288 { "gpu", gpu_set, NULL, needs_mi_store_dword },
292 static struct intel_engine_cs *
293 random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
295 struct intel_engine_cs *engine;
299 for_each_uabi_engine(engine, i915)
302 count = i915_prandom_u32_max_state(count, prng);
303 for_each_uabi_engine(engine, i915)
310 static int igt_gem_coherency(void *arg)
312 const unsigned int ncachelines = PAGE_SIZE/64;
313 struct drm_i915_private *i915 = arg;
314 const struct igt_coherency_mode *read, *write, *over;
315 unsigned long count, n;
316 u32 *offsets, *values;
317 I915_RND_STATE(prng);
322 * We repeatedly write, overwrite and read from a sequence of
323 * cachelines in order to try and detect incoherency (unflushed writes
324 * from either the CPU or GPU). Each setter/getter uses our cache
325 * domain API which should prevent incoherency.
328 offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
331 for (count = 0; count < ncachelines; count++)
332 offsets[count] = count * 64 + 4 * (count % 16);
334 values = offsets + ncachelines;
336 ctx.engine = random_engine(i915, &prng);
341 pr_info("%s: using %s\n", __func__, ctx.engine->name);
342 intel_engine_pm_get(ctx.engine);
344 for (over = igt_coherency_mode; over->name; over++) {
348 if (!over->valid(&ctx))
351 for (write = igt_coherency_mode; write->name; write++) {
355 if (!write->valid(&ctx))
358 for (read = igt_coherency_mode; read->name; read++) {
362 if (!read->valid(&ctx))
365 for_each_prime_number_from(count, 1, ncachelines) {
366 ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
367 if (IS_ERR(ctx.obj)) {
368 err = PTR_ERR(ctx.obj);
372 i915_random_reorder(offsets, ncachelines, &prng);
373 for (n = 0; n < count; n++)
374 values[n] = prandom_u32_state(&prng);
376 for (n = 0; n < count; n++) {
377 err = over->set(&ctx, offsets[n], ~values[n]);
379 pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
380 n, count, over->name, err);
385 for (n = 0; n < count; n++) {
386 err = write->set(&ctx, offsets[n], values[n]);
388 pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
389 n, count, write->name, err);
394 for (n = 0; n < count; n++) {
397 err = read->get(&ctx, offsets[n], &found);
399 pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
400 n, count, read->name, err);
404 if (found != values[n]) {
405 pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
406 n, count, over->name,
407 write->name, values[n],
409 ~values[n], offsets[n]);
415 i915_gem_object_put(ctx.obj);
421 intel_engine_pm_put(ctx.engine);
427 i915_gem_object_put(ctx.obj);
431 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
433 static const struct i915_subtest tests[] = {
434 SUBTEST(igt_gem_coherency),
437 return i915_live_subtests(tests, i915);