2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
21 static const struct wo_register {
22 enum intel_platform platform;
25 { INTEL_GEMINILAKE, 0x731c }
29 struct i915_wa_list gt_wa_list;
31 struct i915_wa_list wa_list;
32 struct i915_wa_list ctx_wa_list;
33 } engine[I915_NUM_ENGINES];
36 static int request_add_sync(struct i915_request *rq, int err)
40 if (i915_request_wait(rq, 0, HZ / 5) < 0)
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
53 if (spin && !igt_wait_for_spinner(spin, rq))
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
63 struct intel_engine_cs *engine;
64 enum intel_engine_id id;
66 memset(lists, 0, sizeof(*lists));
68 wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 wa_init_finish(&lists->gt_wa_list);
72 for_each_engine(engine, gt, id) {
73 struct i915_wa_list *wal = &lists->engine[id].wa_list;
75 wa_init_start(wal, "REF", engine->name);
76 engine_init_workarounds(engine, wal);
79 __intel_engine_init_ctx_wa(engine,
80 &lists->engine[id].ctx_wa_list,
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
88 struct intel_engine_cs *engine;
89 enum intel_engine_id id;
91 for_each_engine(engine, gt, id)
92 intel_wa_list_free(&lists->engine[id].wa_list);
94 intel_wa_list_free(&lists->gt_wa_list);
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct intel_context *ce)
100 struct intel_engine_cs *engine = ce->engine;
101 const u32 base = engine->mmio_base;
102 struct drm_i915_gem_object *result;
103 struct i915_request *rq;
104 struct i915_vma *vma;
109 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
113 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
115 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
120 memset(cs, 0xc5, PAGE_SIZE);
121 i915_gem_object_flush_map(result);
122 i915_gem_object_unpin_map(result);
124 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
130 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
134 rq = intel_context_create_request(ce);
141 err = i915_request_await_object(rq, vma->obj, true);
143 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
144 i915_vma_unlock(vma);
148 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
149 if (INTEL_GEN(engine->i915) >= 8)
152 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
158 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
160 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
161 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
164 intel_ring_advance(rq, cs);
166 i915_request_add(rq);
172 i915_request_add(rq);
176 i915_gem_object_put(result);
181 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
183 i915_reg_t reg = i < engine->whitelist.count ?
184 engine->whitelist.list[i].reg :
185 RING_NOPID(engine->mmio_base);
187 return i915_mmio_reg_offset(reg);
191 print_results(const struct intel_engine_cs *engine, const u32 *results)
195 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
196 u32 expected = get_whitelist_reg(engine, i);
197 u32 actual = results[i];
199 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
200 i, expected, actual);
204 static int check_whitelist(struct intel_context *ce)
206 struct intel_engine_cs *engine = ce->engine;
207 struct drm_i915_gem_object *results;
208 struct intel_wedge_me wedge;
213 results = read_nonprivs(ce);
215 return PTR_ERR(results);
218 i915_gem_object_lock(results, NULL);
219 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
220 err = i915_gem_object_set_to_cpu_domain(results, false);
221 i915_gem_object_unlock(results);
222 if (intel_gt_is_wedged(engine->gt))
227 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
229 err = PTR_ERR(vaddr);
233 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
234 u32 expected = get_whitelist_reg(engine, i);
235 u32 actual = vaddr[i];
237 if (expected != actual) {
238 print_results(engine, vaddr);
239 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
240 i, expected, actual);
247 i915_gem_object_unpin_map(results);
249 i915_gem_object_put(results);
253 static int do_device_reset(struct intel_engine_cs *engine)
255 intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
259 static int do_engine_reset(struct intel_engine_cs *engine)
261 return intel_engine_reset(engine, "live_workarounds");
265 switch_to_scratch_context(struct intel_engine_cs *engine,
266 struct igt_spinner *spin)
268 struct intel_context *ce;
269 struct i915_request *rq;
272 ce = intel_context_create(engine);
276 rq = igt_spinner_create_request(spin, ce, MI_NOOP);
277 intel_context_put(ce);
285 err = request_add_spin(rq, spin);
288 igt_spinner_end(spin);
293 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
294 int (*reset)(struct intel_engine_cs *),
297 struct intel_context *ce, *tmp;
298 struct igt_spinner spin;
299 intel_wakeref_t wakeref;
302 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 engine->whitelist.count, engine->name, name);
305 ce = intel_context_create(engine);
309 err = igt_spinner_init(&spin, engine->gt);
313 err = check_whitelist(ce);
315 pr_err("Invalid whitelist *before* %s reset!\n", name);
319 err = switch_to_scratch_context(engine, &spin);
323 with_intel_runtime_pm(engine->uncore->rpm, wakeref)
326 igt_spinner_end(&spin);
329 pr_err("%s reset failed\n", name);
333 err = check_whitelist(ce);
335 pr_err("Whitelist not preserved in context across %s reset!\n",
340 tmp = intel_context_create(engine);
345 intel_context_put(ce);
348 err = check_whitelist(ce);
350 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
356 igt_spinner_fini(&spin);
358 intel_context_put(ce);
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
364 struct drm_i915_gem_object *obj;
365 struct i915_vma *vma;
368 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
370 return ERR_CAST(obj);
372 vma = i915_vma_instance(obj, vm, NULL);
378 err = i915_vma_pin(vma, 0, 0, PIN_USER);
385 i915_gem_object_put(obj);
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
391 if (rsvd == 0x0000ffff) {
393 old |= new & (new >> 16);
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
404 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
407 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 RING_FORCE_TO_NONPRIV_ACCESS_WR)
411 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 if (wo_registers[i].platform == platform &&
413 wo_registers[i].reg == reg)
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
422 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
434 static bool ro_register(u32 reg)
436 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437 RING_FORCE_TO_NONPRIV_ACCESS_RD)
443 static int whitelist_writable_count(struct intel_engine_cs *engine)
445 int count = engine->whitelist.count;
448 for (i = 0; i < engine->whitelist.count; i++) {
449 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
451 if (ro_register(reg))
458 static int check_dirty_whitelist(struct intel_context *ce)
460 const u32 values[] = {
486 struct intel_engine_cs *engine = ce->engine;
487 struct i915_vma *scratch;
488 struct i915_vma *batch;
489 int err = 0, i, v, sz;
492 sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
493 scratch = __vm_create_scratch_for_read(ce->vm, sz);
495 return PTR_ERR(scratch);
497 batch = create_batch(ce->vm);
499 err = PTR_ERR(batch);
503 for (i = 0; i < engine->whitelist.count; i++) {
504 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
505 u64 addr = scratch->node.start;
506 struct i915_request *rq;
512 if (wo_register(engine, reg))
515 if (timestamp(engine, reg))
516 continue; /* timestamps are expected to autoincrement */
518 ro_reg = ro_register(reg);
520 /* Clear non priv flags */
521 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
523 srm = MI_STORE_REGISTER_MEM;
524 lrm = MI_LOAD_REGISTER_MEM;
525 if (INTEL_GEN(engine->i915) >= 8)
528 pr_debug("%s: Writing garbage to %x\n",
531 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
540 *cs++ = lower_32_bits(addr);
541 *cs++ = upper_32_bits(addr);
544 for (v = 0; v < ARRAY_SIZE(values); v++) {
546 *cs++ = MI_LOAD_REGISTER_IMM(1);
553 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
554 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
557 for (v = 0; v < ARRAY_SIZE(values); v++) {
559 *cs++ = MI_LOAD_REGISTER_IMM(1);
566 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
567 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
570 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
572 /* LRM original -- don't leave garbage in the context! */
575 *cs++ = lower_32_bits(addr);
576 *cs++ = upper_32_bits(addr);
578 *cs++ = MI_BATCH_BUFFER_END;
580 i915_gem_object_flush_map(batch->obj);
581 i915_gem_object_unpin_map(batch->obj);
582 intel_gt_chipset_flush(engine->gt);
584 rq = intel_context_create_request(ce);
590 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
591 err = engine->emit_init_breadcrumb(rq);
596 i915_vma_lock(batch);
597 err = i915_request_await_object(rq, batch->obj, false);
599 err = i915_vma_move_to_active(batch, rq, 0);
600 i915_vma_unlock(batch);
604 i915_vma_lock(scratch);
605 err = i915_request_await_object(rq, scratch->obj, true);
607 err = i915_vma_move_to_active(scratch, rq,
609 i915_vma_unlock(scratch);
613 err = engine->emit_bb_start(rq,
614 batch->node.start, PAGE_SIZE,
620 err = request_add_sync(rq, err);
622 pr_err("%s: Futzing %x timedout; cancelling test\n",
624 intel_gt_set_wedged(engine->gt);
628 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
629 if (IS_ERR(results)) {
630 err = PTR_ERR(results);
634 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
636 /* detect write masking */
637 rsvd = results[ARRAY_SIZE(values)];
639 pr_err("%s: Unable to write to whitelisted register %x\n",
650 for (v = 0; v < ARRAY_SIZE(values); v++) {
654 expect = reg_write(expect, values[v], rsvd);
656 if (results[idx] != expect)
660 for (v = 0; v < ARRAY_SIZE(values); v++) {
664 expect = reg_write(expect, ~values[v], rsvd);
666 if (results[idx] != expect)
671 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
672 engine->name, err, reg);
675 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
676 engine->name, reg, results[0]);
678 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
679 engine->name, reg, results[0], rsvd);
683 for (v = 0; v < ARRAY_SIZE(values); v++) {
689 expect = reg_write(expect, w, rsvd);
690 pr_info("Wrote %08x, read %08x, expect %08x\n",
691 w, results[idx], expect);
694 for (v = 0; v < ARRAY_SIZE(values); v++) {
700 expect = reg_write(expect, w, rsvd);
701 pr_info("Wrote %08x, read %08x, expect %08x\n",
702 w, results[idx], expect);
709 i915_gem_object_unpin_map(scratch->obj);
714 if (igt_flush_test(engine->i915))
717 i915_vma_unpin_and_release(&batch, 0);
719 i915_vma_unpin_and_release(&scratch, 0);
723 static int live_dirty_whitelist(void *arg)
725 struct intel_gt *gt = arg;
726 struct intel_engine_cs *engine;
727 enum intel_engine_id id;
729 /* Can the user write to the whitelisted registers? */
731 if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
734 for_each_engine(engine, gt, id) {
735 struct intel_context *ce;
738 if (engine->whitelist.count == 0)
741 ce = intel_context_create(engine);
745 err = check_dirty_whitelist(ce);
746 intel_context_put(ce);
754 static int live_reset_whitelist(void *arg)
756 struct intel_gt *gt = arg;
757 struct intel_engine_cs *engine;
758 enum intel_engine_id id;
761 /* If we reset the gpu, we should not lose the RING_NONPRIV */
762 igt_global_reset_lock(gt);
764 for_each_engine(engine, gt, id) {
765 if (engine->whitelist.count == 0)
768 if (intel_has_reset_engine(gt)) {
769 err = check_whitelist_across_reset(engine,
776 if (intel_has_gpu_reset(gt)) {
777 err = check_whitelist_across_reset(engine,
786 igt_global_reset_unlock(gt);
790 static int read_whitelisted_registers(struct intel_context *ce,
791 struct i915_vma *results)
793 struct intel_engine_cs *engine = ce->engine;
794 struct i915_request *rq;
798 rq = intel_context_create_request(ce);
802 i915_vma_lock(results);
803 err = i915_request_await_object(rq, results->obj, true);
805 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
806 i915_vma_unlock(results);
810 srm = MI_STORE_REGISTER_MEM;
811 if (INTEL_GEN(engine->i915) >= 8)
814 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
820 for (i = 0; i < engine->whitelist.count; i++) {
821 u64 offset = results->node.start + sizeof(u32) * i;
822 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
824 /* Clear non priv flags */
825 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
829 *cs++ = lower_32_bits(offset);
830 *cs++ = upper_32_bits(offset);
832 intel_ring_advance(rq, cs);
835 return request_add_sync(rq, err);
838 static int scrub_whitelisted_registers(struct intel_context *ce)
840 struct intel_engine_cs *engine = ce->engine;
841 struct i915_request *rq;
842 struct i915_vma *batch;
846 batch = create_batch(ce->vm);
848 return PTR_ERR(batch);
850 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
856 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
857 for (i = 0; i < engine->whitelist.count; i++) {
858 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
860 if (ro_register(reg))
863 /* Clear non priv flags */
864 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
869 *cs++ = MI_BATCH_BUFFER_END;
871 i915_gem_object_flush_map(batch->obj);
872 intel_gt_chipset_flush(engine->gt);
874 rq = intel_context_create_request(ce);
880 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
881 err = engine->emit_init_breadcrumb(rq);
886 i915_vma_lock(batch);
887 err = i915_request_await_object(rq, batch->obj, false);
889 err = i915_vma_move_to_active(batch, rq, 0);
890 i915_vma_unlock(batch);
894 /* Perform the writes from an unprivileged "user" batch */
895 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
898 err = request_add_sync(rq, err);
901 i915_gem_object_unpin_map(batch->obj);
903 i915_vma_unpin_and_release(&batch, 0);
909 unsigned long gen_mask;
912 static bool find_reg(struct drm_i915_private *i915,
914 const struct regmask *tbl,
917 u32 offset = i915_mmio_reg_offset(reg);
920 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
921 i915_mmio_reg_offset(tbl->reg) == offset)
929 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
931 /* Alas, we must pardon some whitelists. Mistakes already made */
932 static const struct regmask pardon[] = {
933 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
934 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
937 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
940 static bool result_eq(struct intel_engine_cs *engine,
941 u32 a, u32 b, i915_reg_t reg)
943 if (a != b && !pardon_reg(engine->i915, reg)) {
944 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
945 i915_mmio_reg_offset(reg), a, b);
952 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
954 /* Some registers do not seem to behave and our writes unreadable */
955 static const struct regmask wo[] = {
956 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
959 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
962 static bool result_neq(struct intel_engine_cs *engine,
963 u32 a, u32 b, i915_reg_t reg)
965 if (a == b && !writeonly_reg(engine->i915, reg)) {
966 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
967 i915_mmio_reg_offset(reg), a);
975 check_whitelisted_registers(struct intel_engine_cs *engine,
978 bool (*fn)(struct intel_engine_cs *engine,
985 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
989 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
996 for (i = 0; i < engine->whitelist.count; i++) {
997 const struct i915_wa *wa = &engine->whitelist.list[i];
999 if (i915_mmio_reg_offset(wa->reg) &
1000 RING_FORCE_TO_NONPRIV_ACCESS_RD)
1003 if (!fn(engine, a[i], b[i], wa->reg))
1007 i915_gem_object_unpin_map(B->obj);
1009 i915_gem_object_unpin_map(A->obj);
1013 static int live_isolated_whitelist(void *arg)
1015 struct intel_gt *gt = arg;
1017 struct i915_vma *scratch[2];
1019 struct intel_engine_cs *engine;
1020 enum intel_engine_id id;
1024 * Check that a write into a whitelist register works, but
1025 * invisible to a second context.
1028 if (!intel_engines_has_context_isolation(gt->i915))
1031 for (i = 0; i < ARRAY_SIZE(client); i++) {
1032 client[i].scratch[0] =
1033 __vm_create_scratch_for_read(gt->vm, 4096);
1034 if (IS_ERR(client[i].scratch[0])) {
1035 err = PTR_ERR(client[i].scratch[0]);
1039 client[i].scratch[1] =
1040 __vm_create_scratch_for_read(gt->vm, 4096);
1041 if (IS_ERR(client[i].scratch[1])) {
1042 err = PTR_ERR(client[i].scratch[1]);
1043 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1048 for_each_engine(engine, gt, id) {
1049 struct intel_context *ce[2];
1051 if (!engine->kernel_context->vm)
1054 if (!whitelist_writable_count(engine))
1057 ce[0] = intel_context_create(engine);
1058 if (IS_ERR(ce[0])) {
1059 err = PTR_ERR(ce[0]);
1062 ce[1] = intel_context_create(engine);
1063 if (IS_ERR(ce[1])) {
1064 err = PTR_ERR(ce[1]);
1065 intel_context_put(ce[0]);
1069 /* Read default values */
1070 err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1074 /* Try to overwrite registers (should only affect ctx0) */
1075 err = scrub_whitelisted_registers(ce[0]);
1079 /* Read values from ctx1, we expect these to be defaults */
1080 err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1084 /* Verify that both reads return the same default values */
1085 err = check_whitelisted_registers(engine,
1086 client[0].scratch[0],
1087 client[1].scratch[0],
1092 /* Read back the updated values in ctx0 */
1093 err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1097 /* User should be granted privilege to overwhite regs */
1098 err = check_whitelisted_registers(engine,
1099 client[0].scratch[0],
1100 client[0].scratch[1],
1103 intel_context_put(ce[1]);
1104 intel_context_put(ce[0]);
1110 for (i = 0; i < ARRAY_SIZE(client); i++) {
1111 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1112 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1115 if (igt_flush_test(gt->i915))
1122 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1125 struct intel_engine_cs *engine;
1126 enum intel_engine_id id;
1129 ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
1131 for_each_engine(engine, gt, id) {
1132 struct intel_context *ce;
1134 ce = intel_context_create(engine);
1138 ok &= engine_wa_list_verify(ce,
1139 &lists->engine[id].wa_list,
1142 ok &= engine_wa_list_verify(ce,
1143 &lists->engine[id].ctx_wa_list,
1146 intel_context_put(ce);
1153 live_gpu_reset_workarounds(void *arg)
1155 struct intel_gt *gt = arg;
1156 intel_wakeref_t wakeref;
1157 struct wa_lists lists;
1160 if (!intel_has_gpu_reset(gt))
1163 pr_info("Verifying after GPU reset...\n");
1165 igt_global_reset_lock(gt);
1166 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1168 reference_lists_init(gt, &lists);
1170 ok = verify_wa_lists(gt, &lists, "before reset");
1174 intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1176 ok = verify_wa_lists(gt, &lists, "after reset");
1179 reference_lists_fini(gt, &lists);
1180 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1181 igt_global_reset_unlock(gt);
1183 return ok ? 0 : -ESRCH;
1187 live_engine_reset_workarounds(void *arg)
1189 struct intel_gt *gt = arg;
1190 struct intel_engine_cs *engine;
1191 enum intel_engine_id id;
1192 struct intel_context *ce;
1193 struct igt_spinner spin;
1194 struct i915_request *rq;
1195 intel_wakeref_t wakeref;
1196 struct wa_lists lists;
1199 if (!intel_has_reset_engine(gt))
1202 igt_global_reset_lock(gt);
1203 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1205 reference_lists_init(gt, &lists);
1207 for_each_engine(engine, gt, id) {
1210 pr_info("Verifying after %s reset...\n", engine->name);
1211 ce = intel_context_create(engine);
1217 ok = verify_wa_lists(gt, &lists, "before reset");
1223 intel_engine_reset(engine, "live_workarounds:idle");
1225 ok = verify_wa_lists(gt, &lists, "after idle reset");
1231 ret = igt_spinner_init(&spin, engine->gt);
1235 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1238 igt_spinner_fini(&spin);
1242 ret = request_add_spin(rq, &spin);
1244 pr_err("Spinner failed to start\n");
1245 igt_spinner_fini(&spin);
1249 intel_engine_reset(engine, "live_workarounds:active");
1251 igt_spinner_end(&spin);
1252 igt_spinner_fini(&spin);
1254 ok = verify_wa_lists(gt, &lists, "after busy reset");
1261 intel_context_put(ce);
1266 reference_lists_fini(gt, &lists);
1267 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1268 igt_global_reset_unlock(gt);
1270 igt_flush_test(gt->i915);
1275 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1277 static const struct i915_subtest tests[] = {
1278 SUBTEST(live_dirty_whitelist),
1279 SUBTEST(live_reset_whitelist),
1280 SUBTEST(live_isolated_whitelist),
1281 SUBTEST(live_gpu_reset_workarounds),
1282 SUBTEST(live_engine_reset_workarounds),
1285 if (intel_gt_is_wedged(&i915->gt))
1288 return intel_gt_live_subtests(tests, &i915->gt);