1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "i915_selftest.h"
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_reset.h"
12 #include "selftests/igt_spinner.h"
15 struct drm_i915_mocs_table table;
16 struct drm_i915_mocs_table *mocs;
17 struct drm_i915_mocs_table *l3cc;
18 struct i915_vma *scratch;
22 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
24 struct intel_context *ce;
26 ce = intel_context_create(engine);
30 /* We build large requests to read the registers from the ring */
31 ce->ring_size = SZ_16K;
36 static int request_add_sync(struct i915_request *rq, int err)
40 if (i915_request_wait(rq, 0, HZ / 5) < 0)
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
53 if (spin && !igt_wait_for_spinner(spin, rq))
60 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
65 memset(arg, 0, sizeof(*arg));
67 flags = get_mocs_settings(gt->i915, &arg->table);
71 if (flags & HAS_RENDER_L3CC)
72 arg->l3cc = &arg->table;
74 if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
75 arg->mocs = &arg->table;
78 __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
79 if (IS_ERR(arg->scratch))
80 return PTR_ERR(arg->scratch);
82 arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
83 if (IS_ERR(arg->vaddr)) {
84 err = PTR_ERR(arg->vaddr);
91 i915_vma_unpin_and_release(&arg->scratch, 0);
95 static void live_mocs_fini(struct live_mocs *arg)
97 i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
100 static int read_regs(struct i915_request *rq,
101 u32 addr, unsigned int count,
107 GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
109 cs = intel_ring_begin(rq, 4 * count);
113 for (i = 0; i < count; i++) {
114 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
120 *offset += sizeof(u32);
123 intel_ring_advance(rq, cs);
128 static int read_mocs_table(struct i915_request *rq,
129 const struct drm_i915_mocs_table *table,
137 if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
138 addr = global_mocs_offset();
140 addr = mocs_offset(rq->engine);
142 return read_regs(rq, addr, table->n_entries, offset);
145 static int read_l3cc_table(struct i915_request *rq,
146 const struct drm_i915_mocs_table *table,
149 u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
154 return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
157 static int check_mocs_table(struct intel_engine_cs *engine,
158 const struct drm_i915_mocs_table *table,
167 for_each_mocs(expect, table, i) {
168 if (**vaddr != expect) {
169 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
170 engine->name, i, **vaddr, expect);
179 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
182 * Registers in this range are affected by the MCR selector
183 * which only controls CPU initiated MMIO. Routing does not
184 * work for CS access so we cannot verify them on this path.
186 return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
189 static int check_l3cc_table(struct intel_engine_cs *engine,
190 const struct drm_i915_mocs_table *table,
193 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
194 u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
201 for_each_l3cc(expect, table, i) {
202 if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
203 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
204 engine->name, i, **vaddr, expect);
214 static int check_mocs_engine(struct live_mocs *arg,
215 struct intel_context *ce)
217 struct i915_vma *vma = arg->scratch;
218 struct i915_request *rq;
223 memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
225 rq = intel_context_create_request(ce);
230 err = i915_request_await_object(rq, vma->obj, true);
232 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
233 i915_vma_unlock(vma);
235 /* Read the mocs tables back using SRM */
236 offset = i915_ggtt_offset(vma);
238 err = read_mocs_table(rq, arg->mocs, &offset);
239 if (!err && ce->engine->class == RENDER_CLASS)
240 err = read_l3cc_table(rq, arg->l3cc, &offset);
241 offset -= i915_ggtt_offset(vma);
242 GEM_BUG_ON(offset > PAGE_SIZE);
244 err = request_add_sync(rq, err);
248 /* Compare the results against the expected tables */
251 err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
252 if (!err && ce->engine->class == RENDER_CLASS)
253 err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
257 GEM_BUG_ON(arg->vaddr + offset != vaddr);
261 static int live_mocs_kernel(void *arg)
263 struct intel_gt *gt = arg;
264 struct intel_engine_cs *engine;
265 enum intel_engine_id id;
266 struct live_mocs mocs;
269 /* Basic check the system is configured with the expected mocs table */
271 err = live_mocs_init(&mocs, gt);
275 for_each_engine(engine, gt, id) {
276 intel_engine_pm_get(engine);
277 err = check_mocs_engine(&mocs, engine->kernel_context);
278 intel_engine_pm_put(engine);
283 live_mocs_fini(&mocs);
287 static int live_mocs_clean(void *arg)
289 struct intel_gt *gt = arg;
290 struct intel_engine_cs *engine;
291 enum intel_engine_id id;
292 struct live_mocs mocs;
295 /* Every new context should see the same mocs table */
297 err = live_mocs_init(&mocs, gt);
301 for_each_engine(engine, gt, id) {
302 struct intel_context *ce;
304 ce = mocs_context_create(engine);
310 err = check_mocs_engine(&mocs, ce);
311 intel_context_put(ce);
316 live_mocs_fini(&mocs);
320 static int active_engine_reset(struct intel_context *ce,
323 struct igt_spinner spin;
324 struct i915_request *rq;
327 err = igt_spinner_init(&spin, ce->engine->gt);
331 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
333 igt_spinner_fini(&spin);
337 err = request_add_spin(rq, &spin);
339 err = intel_engine_reset(ce->engine, reason);
341 igt_spinner_end(&spin);
342 igt_spinner_fini(&spin);
347 static int __live_mocs_reset(struct live_mocs *mocs,
348 struct intel_context *ce)
350 struct intel_gt *gt = ce->engine->gt;
353 if (intel_has_reset_engine(gt)) {
354 err = intel_engine_reset(ce->engine, "mocs");
358 err = check_mocs_engine(mocs, ce);
362 err = active_engine_reset(ce, "mocs");
366 err = check_mocs_engine(mocs, ce);
371 if (intel_has_gpu_reset(gt)) {
372 intel_gt_reset(gt, ce->engine->mask, "mocs");
374 err = check_mocs_engine(mocs, ce);
382 static int live_mocs_reset(void *arg)
384 struct intel_gt *gt = arg;
385 struct intel_engine_cs *engine;
386 enum intel_engine_id id;
387 struct live_mocs mocs;
390 /* Check the mocs setup is retained over per-engine and global resets */
392 err = live_mocs_init(&mocs, gt);
396 igt_global_reset_lock(gt);
397 for_each_engine(engine, gt, id) {
398 struct intel_context *ce;
400 ce = mocs_context_create(engine);
406 intel_engine_pm_get(engine);
407 err = __live_mocs_reset(&mocs, ce);
408 intel_engine_pm_put(engine);
410 intel_context_put(ce);
414 igt_global_reset_unlock(gt);
416 live_mocs_fini(&mocs);
420 int intel_mocs_live_selftests(struct drm_i915_private *i915)
422 static const struct i915_subtest tests[] = {
423 SUBTEST(live_mocs_kernel),
424 SUBTEST(live_mocs_clean),
425 SUBTEST(live_mocs_reset),
427 struct drm_i915_mocs_table table;
429 if (!get_mocs_settings(i915, &table))
432 return intel_gt_live_subtests(tests, &i915->gt);