1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include <linux/prime_numbers.h>
8 #include "i915_selftest.h"
9 #include "intel_engine_heartbeat.h"
10 #include "intel_engine_pm.h"
11 #include "intel_reset.h"
12 #include "intel_ring.h"
13 #include "selftest_engine_heartbeat.h"
14 #include "selftests/i915_random.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_live_test.h"
17 #include "selftests/igt_spinner.h"
18 #include "selftests/lib_sw_fence.h"
19 #include "shmem_utils.h"
21 #include "gem/selftests/igt_gem_utils.h"
22 #include "gem/selftests/mock_context.h"
24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
26 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
28 static struct i915_vma *create_scratch(struct intel_gt *gt)
30 return __vm_create_scratch_for_read(>->ggtt->vm, PAGE_SIZE);
33 static bool is_active(struct i915_request *rq)
35 if (i915_request_is_active(rq))
38 if (i915_request_on_hold(rq))
41 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
47 static int wait_for_submit(struct intel_engine_cs *engine,
48 struct i915_request *rq,
49 unsigned long timeout)
51 /* Ignore our own attempts to suppress excess tasklets */
52 tasklet_hi_schedule(&engine->execlists.tasklet);
56 bool done = time_after(jiffies, timeout);
58 if (i915_request_completed(rq)) /* that was quick! */
61 /* Wait until the HW has acknowleged the submission (or err) */
62 intel_engine_flush_submission(engine);
63 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
73 static int emit_semaphore_signal(struct intel_context *ce, void *slot)
76 i915_ggtt_offset(ce->engine->status_page.vma) +
78 struct i915_request *rq;
81 rq = intel_context_create_request(ce);
85 cs = intel_ring_begin(rq, 4);
91 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
96 intel_ring_advance(rq, cs);
98 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
103 static int context_flush(struct intel_context *ce, long timeout)
105 struct i915_request *rq;
106 struct dma_fence *fence;
109 rq = intel_engine_create_kernel_request(ce->engine);
113 fence = i915_active_fence_get(&ce->timeline->last_request);
115 i915_request_await_dma_fence(rq, fence);
116 dma_fence_put(fence);
119 rq = i915_request_get(rq);
120 i915_request_add(rq);
121 if (i915_request_wait(rq, 0, timeout) < 0)
123 i915_request_put(rq);
125 rmb(); /* We know the request is written, make sure all state is too! */
129 static int live_lrc_layout(void *arg)
131 struct intel_gt *gt = arg;
132 struct intel_engine_cs *engine;
133 enum intel_engine_id id;
138 * Check the registers offsets we use to create the initial reg state
139 * match the layout saved by HW.
142 lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
147 for_each_engine(engine, gt, id) {
151 if (!engine->default_state)
154 hw = shmem_pin_map(engine->default_state);
159 hw += LRC_STATE_OFFSET / sizeof(*hw);
161 __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
162 engine->kernel_context, engine, true);
174 pr_debug("%s: skipped instruction %x at dword %d\n",
175 engine->name, lri, dw);
180 if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
181 pr_err("%s: Expected LRI command at dword %d, found %08x\n",
182 engine->name, dw, lri);
187 if (lrc[dw] != lri) {
188 pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
189 engine->name, dw, lri, lrc[dw]);
199 if (hw[dw] != lrc[dw]) {
200 pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
201 engine->name, dw, hw[dw], lrc[dw]);
207 * Skip over the actual register value as we
208 * expect that to differ.
213 } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
216 pr_info("%s: HW register image:\n", engine->name);
217 igt_hexdump(hw, PAGE_SIZE);
219 pr_info("%s: SW register image:\n", engine->name);
220 igt_hexdump(lrc, PAGE_SIZE);
223 shmem_unpin_map(engine->default_state, hw);
232 static int find_offset(const u32 *lri, u32 offset)
236 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
237 if (lri[i] == offset)
243 static int live_lrc_fixed(void *arg)
245 struct intel_gt *gt = arg;
246 struct intel_engine_cs *engine;
247 enum intel_engine_id id;
251 * Check the assumed register offsets match the actual locations in
255 for_each_engine(engine, gt, id) {
262 i915_mmio_reg_offset(RING_START(engine->mmio_base)),
267 i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
272 i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
277 i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
282 i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
283 lrc_ring_mi_mode(engine),
287 i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
292 i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
293 lrc_ring_wa_bb_per_ctx(engine),
294 "RING_BB_PER_CTX_PTR"
297 i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
298 lrc_ring_indirect_ptr(engine),
299 "RING_INDIRECT_CTX_PTR"
302 i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
303 lrc_ring_indirect_offset(engine),
304 "RING_INDIRECT_CTX_OFFSET"
307 i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
312 i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
313 lrc_ring_gpr0(engine),
317 i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
318 lrc_ring_cmd_buf_cctl(engine),
325 if (!engine->default_state)
328 hw = shmem_pin_map(engine->default_state);
333 hw += LRC_STATE_OFFSET / sizeof(*hw);
335 for (t = tbl; t->name; t++) {
336 int dw = find_offset(hw, t->reg);
338 if (dw != t->offset) {
339 pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
349 shmem_unpin_map(engine->default_state, hw);
355 static int __live_lrc_state(struct intel_engine_cs *engine,
356 struct i915_vma *scratch)
358 struct intel_context *ce;
359 struct i915_request *rq;
360 struct i915_gem_ww_ctx ww;
366 u32 expected[MAX_IDX];
371 ce = intel_context_create(engine);
375 i915_gem_ww_ctx_init(&ww, false);
377 err = i915_gem_object_lock(scratch->obj, &ww);
379 err = intel_context_pin_ww(ce, &ww);
383 rq = i915_request_create(ce);
389 cs = intel_ring_begin(rq, 4 * MAX_IDX);
392 i915_request_add(rq);
396 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
397 *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
398 *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
401 expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
403 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
404 *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
405 *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
408 err = i915_request_await_object(rq, scratch->obj, true);
410 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
412 i915_request_get(rq);
413 i915_request_add(rq);
417 intel_engine_flush_submission(engine);
418 expected[RING_TAIL_IDX] = ce->ring->tail;
420 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
425 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
431 for (n = 0; n < MAX_IDX; n++) {
432 if (cs[n] != expected[n]) {
433 pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
434 engine->name, n, cs[n], expected[n]);
440 i915_gem_object_unpin_map(scratch->obj);
443 i915_request_put(rq);
445 intel_context_unpin(ce);
447 if (err == -EDEADLK) {
448 err = i915_gem_ww_ctx_backoff(&ww);
452 i915_gem_ww_ctx_fini(&ww);
453 intel_context_put(ce);
457 static int live_lrc_state(void *arg)
459 struct intel_gt *gt = arg;
460 struct intel_engine_cs *engine;
461 struct i915_vma *scratch;
462 enum intel_engine_id id;
466 * Check the live register state matches what we expect for this
470 scratch = create_scratch(gt);
472 return PTR_ERR(scratch);
474 for_each_engine(engine, gt, id) {
475 err = __live_lrc_state(engine, scratch);
480 if (igt_flush_test(gt->i915))
483 i915_vma_unpin_and_release(&scratch, 0);
487 static int gpr_make_dirty(struct intel_context *ce)
489 struct i915_request *rq;
493 rq = intel_context_create_request(ce);
497 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
499 i915_request_add(rq);
503 *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
504 for (n = 0; n < NUM_GPR_DW; n++) {
505 *cs++ = CS_GPR(ce->engine, n);
510 intel_ring_advance(rq, cs);
512 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
513 i915_request_add(rq);
518 static struct i915_request *
519 __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
522 i915_ggtt_offset(ce->engine->status_page.vma) +
523 offset_in_page(slot);
524 struct i915_request *rq;
529 rq = intel_context_create_request(ce);
533 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
535 i915_request_add(rq);
539 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
542 *cs++ = MI_SEMAPHORE_WAIT |
543 MI_SEMAPHORE_GLOBAL_GTT |
545 MI_SEMAPHORE_SAD_NEQ_SDD;
550 for (n = 0; n < NUM_GPR_DW; n++) {
551 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
552 *cs++ = CS_GPR(ce->engine, n);
553 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
557 i915_vma_lock(scratch);
558 err = i915_request_await_object(rq, scratch->obj, true);
560 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
561 i915_vma_unlock(scratch);
563 i915_request_get(rq);
564 i915_request_add(rq);
566 i915_request_put(rq);
573 static int __live_lrc_gpr(struct intel_engine_cs *engine,
574 struct i915_vma *scratch,
577 u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
578 struct intel_context *ce;
579 struct i915_request *rq;
584 if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
585 return 0; /* GPR only on rcs0 for gen8 */
587 err = gpr_make_dirty(engine->kernel_context);
591 ce = intel_context_create(engine);
595 rq = __gpr_read(ce, scratch, slot);
601 err = wait_for_submit(engine, rq, HZ / 2);
606 err = gpr_make_dirty(engine->kernel_context);
610 err = emit_semaphore_signal(engine->kernel_context, slot);
614 err = wait_for_submit(engine, rq, HZ / 2);
622 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
627 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
633 for (n = 0; n < NUM_GPR_DW; n++) {
635 pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
637 n / 2, n & 1 ? "udw" : "ldw",
644 i915_gem_object_unpin_map(scratch->obj);
647 memset32(&slot[0], -1, 4);
649 i915_request_put(rq);
651 intel_context_put(ce);
655 static int live_lrc_gpr(void *arg)
657 struct intel_gt *gt = arg;
658 struct intel_engine_cs *engine;
659 struct i915_vma *scratch;
660 enum intel_engine_id id;
664 * Check that GPR registers are cleared in new contexts as we need
665 * to avoid leaking any information from previous contexts.
668 scratch = create_scratch(gt);
670 return PTR_ERR(scratch);
672 for_each_engine(engine, gt, id) {
673 st_engine_heartbeat_disable(engine);
675 err = __live_lrc_gpr(engine, scratch, false);
679 err = __live_lrc_gpr(engine, scratch, true);
684 st_engine_heartbeat_enable(engine);
685 if (igt_flush_test(gt->i915))
691 i915_vma_unpin_and_release(&scratch, 0);
695 static struct i915_request *
696 create_timestamp(struct intel_context *ce, void *slot, int idx)
699 i915_ggtt_offset(ce->engine->status_page.vma) +
700 offset_in_page(slot);
701 struct i915_request *rq;
705 rq = intel_context_create_request(ce);
709 cs = intel_ring_begin(rq, 10);
715 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
718 *cs++ = MI_SEMAPHORE_WAIT |
719 MI_SEMAPHORE_GLOBAL_GTT |
721 MI_SEMAPHORE_SAD_NEQ_SDD;
726 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
727 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
728 *cs++ = offset + idx * sizeof(u32);
731 intel_ring_advance(rq, cs);
733 rq->sched.attr.priority = I915_PRIORITY_MASK;
736 i915_request_get(rq);
737 i915_request_add(rq);
739 i915_request_put(rq);
746 struct lrc_timestamp {
747 struct intel_engine_cs *engine;
748 struct intel_context *ce[2];
752 static bool timestamp_advanced(u32 start, u32 end)
754 return (s32)(end - start) > 0;
757 static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
759 u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
760 struct i915_request *rq;
764 arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
765 rq = create_timestamp(arg->ce[0], slot, 1);
769 err = wait_for_submit(rq->engine, rq, HZ / 2);
774 arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
775 err = emit_semaphore_signal(arg->ce[1], slot);
783 /* And wait for switch to kernel (to save our context to memory) */
784 err = context_flush(arg->ce[0], HZ / 2);
788 if (!timestamp_advanced(arg->poison, slot[1])) {
789 pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
790 arg->engine->name, preempt ? "preempt" : "simple",
791 arg->poison, slot[1]);
795 timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
796 if (!timestamp_advanced(slot[1], timestamp)) {
797 pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
798 arg->engine->name, preempt ? "preempt" : "simple",
804 memset32(slot, -1, 4);
805 i915_request_put(rq);
809 static int live_lrc_timestamp(void *arg)
811 struct lrc_timestamp data = {};
812 struct intel_gt *gt = arg;
813 enum intel_engine_id id;
814 const u32 poison[] = {
822 * We want to verify that the timestamp is saved and restore across
823 * context switches and is monotonic.
825 * So we do this with a little bit of LRC poisoning to check various
826 * boundary conditions, and see what happens if we preempt the context
827 * with a second request (carrying more poison into the timestamp).
830 for_each_engine(data.engine, gt, id) {
833 st_engine_heartbeat_disable(data.engine);
835 for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
836 struct intel_context *tmp;
838 tmp = intel_context_create(data.engine);
844 err = intel_context_pin(tmp);
846 intel_context_put(tmp);
853 for (i = 0; i < ARRAY_SIZE(poison); i++) {
854 data.poison = poison[i];
856 err = __lrc_timestamp(&data, false);
860 err = __lrc_timestamp(&data, true);
866 st_engine_heartbeat_enable(data.engine);
867 for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
871 intel_context_unpin(data.ce[i]);
872 intel_context_put(data.ce[i]);
875 if (igt_flush_test(gt->i915))
884 static struct i915_vma *
885 create_user_vma(struct i915_address_space *vm, unsigned long size)
887 struct drm_i915_gem_object *obj;
888 struct i915_vma *vma;
891 obj = i915_gem_object_create_internal(vm->i915, size);
893 return ERR_CAST(obj);
895 vma = i915_vma_instance(obj, vm, NULL);
897 i915_gem_object_put(obj);
901 err = i915_vma_pin(vma, 0, 0, PIN_USER);
903 i915_gem_object_put(obj);
910 static struct i915_vma *
911 store_context(struct intel_context *ce, struct i915_vma *scratch)
913 struct i915_vma *batch;
917 batch = create_user_vma(ce->vm, SZ_64K);
921 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
927 defaults = shmem_pin_map(ce->engine->default_state);
929 i915_gem_object_unpin_map(batch->obj);
931 return ERR_PTR(-ENOMEM);
937 hw += LRC_STATE_OFFSET / sizeof(*hw);
939 u32 len = hw[dw] & 0x7f;
946 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
954 *cs++ = MI_STORE_REGISTER_MEM_GEN8;
956 *cs++ = lower_32_bits(scratch->node.start + x);
957 *cs++ = upper_32_bits(scratch->node.start + x);
962 } while (dw < PAGE_SIZE / sizeof(u32) &&
963 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
965 *cs++ = MI_BATCH_BUFFER_END;
967 shmem_unpin_map(ce->engine->default_state, defaults);
969 i915_gem_object_flush_map(batch->obj);
970 i915_gem_object_unpin_map(batch->obj);
975 static int move_to_active(struct i915_request *rq,
976 struct i915_vma *vma,
982 err = i915_request_await_object(rq, vma->obj, flags);
984 err = i915_vma_move_to_active(vma, rq, flags);
985 i915_vma_unlock(vma);
990 static struct i915_request *
991 record_registers(struct intel_context *ce,
992 struct i915_vma *before,
993 struct i915_vma *after,
996 struct i915_vma *b_before, *b_after;
997 struct i915_request *rq;
1001 b_before = store_context(ce, before);
1002 if (IS_ERR(b_before))
1003 return ERR_CAST(b_before);
1005 b_after = store_context(ce, after);
1006 if (IS_ERR(b_after)) {
1007 rq = ERR_CAST(b_after);
1011 rq = intel_context_create_request(ce);
1015 err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
1019 err = move_to_active(rq, b_before, 0);
1023 err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
1027 err = move_to_active(rq, b_after, 0);
1031 cs = intel_ring_begin(rq, 14);
1037 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1038 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1039 *cs++ = lower_32_bits(b_before->node.start);
1040 *cs++ = upper_32_bits(b_before->node.start);
1042 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1043 *cs++ = MI_SEMAPHORE_WAIT |
1044 MI_SEMAPHORE_GLOBAL_GTT |
1046 MI_SEMAPHORE_SAD_NEQ_SDD;
1048 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1049 offset_in_page(sema);
1053 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1054 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1055 *cs++ = lower_32_bits(b_after->node.start);
1056 *cs++ = upper_32_bits(b_after->node.start);
1058 intel_ring_advance(rq, cs);
1060 WRITE_ONCE(*sema, 0);
1061 i915_request_get(rq);
1062 i915_request_add(rq);
1064 i915_vma_put(b_after);
1066 i915_vma_put(b_before);
1070 i915_request_add(rq);
1075 static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
1077 struct i915_vma *batch;
1081 batch = create_user_vma(ce->vm, SZ_64K);
1085 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
1087 i915_vma_put(batch);
1088 return ERR_CAST(cs);
1091 defaults = shmem_pin_map(ce->engine->default_state);
1093 i915_gem_object_unpin_map(batch->obj);
1094 i915_vma_put(batch);
1095 return ERR_PTR(-ENOMEM);
1100 hw += LRC_STATE_OFFSET / sizeof(*hw);
1102 u32 len = hw[dw] & 0x7f;
1109 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1115 len = (len + 1) / 2;
1116 *cs++ = MI_LOAD_REGISTER_IMM(len);
1122 } while (dw < PAGE_SIZE / sizeof(u32) &&
1123 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1125 *cs++ = MI_BATCH_BUFFER_END;
1127 shmem_unpin_map(ce->engine->default_state, defaults);
1129 i915_gem_object_flush_map(batch->obj);
1130 i915_gem_object_unpin_map(batch->obj);
1135 static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
1137 struct i915_request *rq;
1138 struct i915_vma *batch;
1142 batch = load_context(ce, poison);
1144 return PTR_ERR(batch);
1146 rq = intel_context_create_request(ce);
1152 err = move_to_active(rq, batch, 0);
1156 cs = intel_ring_begin(rq, 8);
1162 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1163 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1164 *cs++ = lower_32_bits(batch->node.start);
1165 *cs++ = upper_32_bits(batch->node.start);
1167 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1168 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1169 offset_in_page(sema);
1173 intel_ring_advance(rq, cs);
1175 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1177 i915_request_add(rq);
1179 i915_vma_put(batch);
1183 static bool is_moving(u32 a, u32 b)
1188 static int compare_isolation(struct intel_engine_cs *engine,
1189 struct i915_vma *ref[2],
1190 struct i915_vma *result[2],
1191 struct intel_context *ce,
1194 u32 x, dw, *hw, *lrc;
1199 A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
1201 return PTR_ERR(A[0]);
1203 A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
1205 err = PTR_ERR(A[1]);
1209 B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
1211 err = PTR_ERR(B[0]);
1215 B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
1217 err = PTR_ERR(B[1]);
1221 lrc = i915_gem_object_pin_map(ce->state->obj,
1222 i915_coherent_map_type(engine->i915));
1227 lrc += LRC_STATE_OFFSET / sizeof(*hw);
1229 defaults = shmem_pin_map(ce->engine->default_state);
1238 hw += LRC_STATE_OFFSET / sizeof(*hw);
1240 u32 len = hw[dw] & 0x7f;
1247 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1253 len = (len + 1) / 2;
1255 if (!is_moving(A[0][x], A[1][x]) &&
1256 (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
1257 switch (hw[dw] & 4095) {
1258 case 0x30: /* RING_HEAD */
1259 case 0x34: /* RING_TAIL */
1263 pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
1266 A[0][x], B[0][x], B[1][x],
1267 poison, lrc[dw + 1]);
1274 } while (dw < PAGE_SIZE / sizeof(u32) &&
1275 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1277 shmem_unpin_map(ce->engine->default_state, defaults);
1279 i915_gem_object_unpin_map(ce->state->obj);
1281 i915_gem_object_unpin_map(result[1]->obj);
1283 i915_gem_object_unpin_map(result[0]->obj);
1285 i915_gem_object_unpin_map(ref[1]->obj);
1287 i915_gem_object_unpin_map(ref[0]->obj);
1291 static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
1293 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
1294 struct i915_vma *ref[2], *result[2];
1295 struct intel_context *A, *B;
1296 struct i915_request *rq;
1299 A = intel_context_create(engine);
1303 B = intel_context_create(engine);
1309 ref[0] = create_user_vma(A->vm, SZ_64K);
1310 if (IS_ERR(ref[0])) {
1311 err = PTR_ERR(ref[0]);
1315 ref[1] = create_user_vma(A->vm, SZ_64K);
1316 if (IS_ERR(ref[1])) {
1317 err = PTR_ERR(ref[1]);
1321 rq = record_registers(A, ref[0], ref[1], sema);
1327 WRITE_ONCE(*sema, 1);
1330 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1331 i915_request_put(rq);
1335 i915_request_put(rq);
1337 result[0] = create_user_vma(A->vm, SZ_64K);
1338 if (IS_ERR(result[0])) {
1339 err = PTR_ERR(result[0]);
1343 result[1] = create_user_vma(A->vm, SZ_64K);
1344 if (IS_ERR(result[1])) {
1345 err = PTR_ERR(result[1]);
1349 rq = record_registers(A, result[0], result[1], sema);
1355 err = poison_registers(B, poison, sema);
1357 WRITE_ONCE(*sema, -1);
1358 i915_request_put(rq);
1362 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1363 i915_request_put(rq);
1367 i915_request_put(rq);
1369 err = compare_isolation(engine, ref, result, A, poison);
1372 i915_vma_put(result[1]);
1374 i915_vma_put(result[0]);
1376 i915_vma_put(ref[1]);
1378 i915_vma_put(ref[0]);
1380 intel_context_put(B);
1382 intel_context_put(A);
1386 static bool skip_isolation(const struct intel_engine_cs *engine)
1388 if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
1391 if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
1397 static int live_lrc_isolation(void *arg)
1399 struct intel_gt *gt = arg;
1400 struct intel_engine_cs *engine;
1401 enum intel_engine_id id;
1402 const u32 poison[] = {
1412 * Our goal is try and verify that per-context state cannot be
1413 * tampered with by another non-privileged client.
1415 * We take the list of context registers from the LRI in the default
1416 * context image and attempt to modify that list from a remote context.
1419 for_each_engine(engine, gt, id) {
1422 /* Just don't even ask */
1423 if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
1424 skip_isolation(engine))
1427 intel_engine_pm_get(engine);
1428 for (i = 0; i < ARRAY_SIZE(poison); i++) {
1431 result = __lrc_isolation(engine, poison[i]);
1435 result = __lrc_isolation(engine, ~poison[i]);
1439 intel_engine_pm_put(engine);
1440 if (igt_flush_test(gt->i915)) {
1449 static int indirect_ctx_submit_req(struct intel_context *ce)
1451 struct i915_request *rq;
1454 rq = intel_context_create_request(ce);
1458 i915_request_get(rq);
1459 i915_request_add(rq);
1461 if (i915_request_wait(rq, 0, HZ / 5) < 0)
1464 i915_request_put(rq);
1469 #define CTX_BB_CANARY_OFFSET (3 * 1024)
1470 #define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
1473 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
1475 *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
1476 MI_SRM_LRM_GLOBAL_GTT |
1478 *cs++ = i915_mmio_reg_offset(RING_START(0));
1479 *cs++ = i915_ggtt_offset(ce->state) +
1480 context_wa_bb_offset(ce) +
1481 CTX_BB_CANARY_OFFSET;
1488 indirect_ctx_bb_setup(struct intel_context *ce)
1490 u32 *cs = context_indirect_bb(ce);
1492 cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
1494 setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
1497 static bool check_ring_start(struct intel_context *ce)
1499 const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
1500 LRC_STATE_OFFSET + context_wa_bb_offset(ce);
1502 if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
1505 pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
1506 ctx_bb[CTX_BB_CANARY_INDEX],
1507 ce->lrc_reg_state[CTX_RING_START]);
1512 static int indirect_ctx_bb_check(struct intel_context *ce)
1516 err = indirect_ctx_submit_req(ce);
1520 if (!check_ring_start(ce))
1526 static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
1528 struct intel_context *a, *b;
1531 a = intel_context_create(engine);
1534 err = intel_context_pin(a);
1538 b = intel_context_create(engine);
1543 err = intel_context_pin(b);
1547 /* We use the already reserved extra page in context state */
1548 if (!a->wa_bb_page) {
1549 GEM_BUG_ON(b->wa_bb_page);
1550 GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
1555 * In order to test that our per context bb is truly per context,
1556 * and executes at the intended spot on context restoring process,
1557 * make the batch store the ring start value to memory.
1558 * As ring start is restored apriori of starting the indirect ctx bb and
1559 * as it will be different for each context, it fits to this purpose.
1561 indirect_ctx_bb_setup(a);
1562 indirect_ctx_bb_setup(b);
1564 err = indirect_ctx_bb_check(a);
1568 err = indirect_ctx_bb_check(b);
1571 intel_context_unpin(b);
1573 intel_context_put(b);
1575 intel_context_unpin(a);
1577 intel_context_put(a);
1582 static int live_lrc_indirect_ctx_bb(void *arg)
1584 struct intel_gt *gt = arg;
1585 struct intel_engine_cs *engine;
1586 enum intel_engine_id id;
1589 for_each_engine(engine, gt, id) {
1590 intel_engine_pm_get(engine);
1591 err = __live_lrc_indirect_ctx_bb(engine);
1592 intel_engine_pm_put(engine);
1594 if (igt_flush_test(gt->i915))
1604 static void garbage_reset(struct intel_engine_cs *engine,
1605 struct i915_request *rq)
1607 const unsigned int bit = I915_RESET_ENGINE + engine->id;
1608 unsigned long *lock = &engine->gt->reset.flags;
1611 if (!test_and_set_bit(bit, lock)) {
1612 tasklet_disable(&engine->execlists.tasklet);
1614 if (!rq->fence.error)
1615 __intel_engine_reset_bh(engine, NULL);
1617 tasklet_enable(&engine->execlists.tasklet);
1618 clear_and_wake_up_bit(bit, lock);
1623 static struct i915_request *garbage(struct intel_context *ce,
1624 struct rnd_state *prng)
1626 struct i915_request *rq;
1629 err = intel_context_pin(ce);
1631 return ERR_PTR(err);
1633 prandom_bytes_state(prng,
1635 ce->engine->context_size -
1638 rq = intel_context_create_request(ce);
1644 i915_request_get(rq);
1645 i915_request_add(rq);
1649 intel_context_unpin(ce);
1650 return ERR_PTR(err);
1653 static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
1655 struct intel_context *ce;
1656 struct i915_request *hang;
1659 ce = intel_context_create(engine);
1663 hang = garbage(ce, prng);
1665 err = PTR_ERR(hang);
1669 if (wait_for_submit(engine, hang, HZ / 2)) {
1670 i915_request_put(hang);
1675 intel_context_set_banned(ce);
1676 garbage_reset(engine, hang);
1678 intel_engine_flush_submission(engine);
1679 if (!hang->fence.error) {
1680 i915_request_put(hang);
1681 pr_err("%s: corrupted context was not reset\n",
1687 if (i915_request_wait(hang, 0, HZ / 2) < 0) {
1688 pr_err("%s: corrupted context did not recover\n",
1690 i915_request_put(hang);
1694 i915_request_put(hang);
1697 intel_context_put(ce);
1701 static int live_lrc_garbage(void *arg)
1703 struct intel_gt *gt = arg;
1704 struct intel_engine_cs *engine;
1705 enum intel_engine_id id;
1708 * Verify that we can recover if one context state is completely
1712 if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
1715 for_each_engine(engine, gt, id) {
1716 I915_RND_STATE(prng);
1719 if (!intel_has_reset_engine(engine->gt))
1722 intel_engine_pm_get(engine);
1723 for (i = 0; i < 3; i++) {
1724 err = __lrc_garbage(engine, &prng);
1728 intel_engine_pm_put(engine);
1730 if (igt_flush_test(gt->i915))
1739 static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
1741 struct intel_context *ce;
1742 struct i915_request *rq;
1743 IGT_TIMEOUT(end_time);
1746 ce = intel_context_create(engine);
1750 ce->runtime.num_underflow = 0;
1751 ce->runtime.max_underflow = 0;
1754 unsigned int loop = 1024;
1757 rq = intel_context_create_request(ce);
1764 i915_request_get(rq);
1766 i915_request_add(rq);
1769 if (__igt_timeout(end_time, NULL))
1772 i915_request_put(rq);
1775 err = i915_request_wait(rq, 0, HZ / 5);
1777 pr_err("%s: request not completed!\n", engine->name);
1781 igt_flush_test(engine->i915);
1783 pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
1785 intel_context_get_total_runtime_ns(ce),
1786 intel_context_get_avg_runtime_ns(ce));
1789 if (ce->runtime.num_underflow) {
1790 pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
1792 ce->runtime.num_underflow,
1793 ce->runtime.max_underflow);
1799 i915_request_put(rq);
1801 intel_context_put(ce);
1805 static int live_pphwsp_runtime(void *arg)
1807 struct intel_gt *gt = arg;
1808 struct intel_engine_cs *engine;
1809 enum intel_engine_id id;
1813 * Check that cumulative context runtime as stored in the pphwsp[16]
1817 for_each_engine(engine, gt, id) {
1818 err = __live_pphwsp_runtime(engine);
1823 if (igt_flush_test(gt->i915))
1829 int intel_lrc_live_selftests(struct drm_i915_private *i915)
1831 static const struct i915_subtest tests[] = {
1832 SUBTEST(live_lrc_layout),
1833 SUBTEST(live_lrc_fixed),
1834 SUBTEST(live_lrc_state),
1835 SUBTEST(live_lrc_gpr),
1836 SUBTEST(live_lrc_isolation),
1837 SUBTEST(live_lrc_timestamp),
1838 SUBTEST(live_lrc_garbage),
1839 SUBTEST(live_pphwsp_runtime),
1840 SUBTEST(live_lrc_indirect_ctx_bb),
1843 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
1846 return intel_gt_live_subtests(tests, &i915->gt);