1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include <linux/prime_numbers.h>
8 #include "i915_selftest.h"
9 #include "intel_engine_heartbeat.h"
10 #include "intel_engine_pm.h"
11 #include "intel_reset.h"
12 #include "intel_ring.h"
13 #include "selftest_engine_heartbeat.h"
14 #include "selftests/i915_random.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_live_test.h"
17 #include "selftests/igt_spinner.h"
18 #include "selftests/lib_sw_fence.h"
19 #include "shmem_utils.h"
21 #include "gem/selftests/igt_gem_utils.h"
22 #include "gem/selftests/mock_context.h"
24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
26 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
28 static struct i915_vma *create_scratch(struct intel_gt *gt)
30 return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
33 static bool is_active(struct i915_request *rq)
35 if (i915_request_is_active(rq))
38 if (i915_request_on_hold(rq))
41 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
47 static int wait_for_submit(struct intel_engine_cs *engine,
48 struct i915_request *rq,
49 unsigned long timeout)
51 /* Ignore our own attempts to suppress excess tasklets */
52 tasklet_hi_schedule(&engine->sched_engine->tasklet);
56 bool done = time_after(jiffies, timeout);
58 if (i915_request_completed(rq)) /* that was quick! */
61 /* Wait until the HW has acknowleged the submission (or err) */
62 intel_engine_flush_submission(engine);
63 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
73 static int emit_semaphore_signal(struct intel_context *ce, void *slot)
76 i915_ggtt_offset(ce->engine->status_page.vma) +
78 struct i915_request *rq;
81 rq = intel_context_create_request(ce);
85 cs = intel_ring_begin(rq, 4);
91 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
96 intel_ring_advance(rq, cs);
98 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
103 static int context_flush(struct intel_context *ce, long timeout)
105 struct i915_request *rq;
106 struct dma_fence *fence;
109 rq = intel_engine_create_kernel_request(ce->engine);
113 fence = i915_active_fence_get(&ce->timeline->last_request);
115 i915_request_await_dma_fence(rq, fence);
116 dma_fence_put(fence);
119 rq = i915_request_get(rq);
120 i915_request_add(rq);
121 if (i915_request_wait(rq, 0, timeout) < 0)
123 i915_request_put(rq);
125 rmb(); /* We know the request is written, make sure all state is too! */
129 static int live_lrc_layout(void *arg)
131 struct intel_gt *gt = arg;
132 struct intel_engine_cs *engine;
133 enum intel_engine_id id;
138 * Check the registers offsets we use to create the initial reg state
139 * match the layout saved by HW.
142 lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
145 GEM_BUG_ON(offset_in_page(lrc));
148 for_each_engine(engine, gt, id) {
152 if (!engine->default_state)
155 hw = shmem_pin_map(engine->default_state);
160 hw += LRC_STATE_OFFSET / sizeof(*hw);
162 __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
163 engine->kernel_context, engine, true);
167 u32 lri = READ_ONCE(hw[dw]);
175 pr_debug("%s: skipped instruction %x at dword %d\n",
176 engine->name, lri, dw);
181 if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
182 pr_err("%s: Expected LRI command at dword %d, found %08x\n",
183 engine->name, dw, lri);
188 if (lrc[dw] != lri) {
189 pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
190 engine->name, dw, lri, lrc[dw]);
200 u32 offset = READ_ONCE(hw[dw]);
202 if (offset != lrc[dw]) {
203 pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
204 engine->name, dw, offset, lrc[dw]);
210 * Skip over the actual register value as we
211 * expect that to differ.
216 } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
219 pr_info("%s: HW register image:\n", engine->name);
220 igt_hexdump(hw, PAGE_SIZE);
222 pr_info("%s: SW register image:\n", engine->name);
223 igt_hexdump(lrc, PAGE_SIZE);
226 shmem_unpin_map(engine->default_state, hw);
231 free_page((unsigned long)lrc);
235 static int find_offset(const u32 *lri, u32 offset)
239 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
240 if (lri[i] == offset)
246 static int live_lrc_fixed(void *arg)
248 struct intel_gt *gt = arg;
249 struct intel_engine_cs *engine;
250 enum intel_engine_id id;
254 * Check the assumed register offsets match the actual locations in
258 for_each_engine(engine, gt, id) {
265 i915_mmio_reg_offset(RING_START(engine->mmio_base)),
270 i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
275 i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
280 i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
285 i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
286 lrc_ring_mi_mode(engine),
290 i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
295 i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
296 lrc_ring_wa_bb_per_ctx(engine),
297 "RING_BB_PER_CTX_PTR"
300 i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
301 lrc_ring_indirect_ptr(engine),
302 "RING_INDIRECT_CTX_PTR"
305 i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
306 lrc_ring_indirect_offset(engine),
307 "RING_INDIRECT_CTX_OFFSET"
310 i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
315 i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
316 lrc_ring_gpr0(engine),
320 i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
321 lrc_ring_cmd_buf_cctl(engine),
328 if (!engine->default_state)
331 hw = shmem_pin_map(engine->default_state);
336 hw += LRC_STATE_OFFSET / sizeof(*hw);
338 for (t = tbl; t->name; t++) {
339 int dw = find_offset(hw, t->reg);
341 if (dw != t->offset) {
342 pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
352 shmem_unpin_map(engine->default_state, hw);
358 static int __live_lrc_state(struct intel_engine_cs *engine,
359 struct i915_vma *scratch)
361 struct intel_context *ce;
362 struct i915_request *rq;
363 struct i915_gem_ww_ctx ww;
369 u32 expected[MAX_IDX];
374 ce = intel_context_create(engine);
378 i915_gem_ww_ctx_init(&ww, false);
380 err = i915_gem_object_lock(scratch->obj, &ww);
382 err = intel_context_pin_ww(ce, &ww);
386 rq = i915_request_create(ce);
392 cs = intel_ring_begin(rq, 4 * MAX_IDX);
395 i915_request_add(rq);
399 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
400 *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
401 *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
404 expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
406 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
407 *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
408 *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
411 err = i915_request_await_object(rq, scratch->obj, true);
413 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
415 i915_request_get(rq);
416 i915_request_add(rq);
420 intel_engine_flush_submission(engine);
421 expected[RING_TAIL_IDX] = ce->ring->tail;
423 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
428 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
434 for (n = 0; n < MAX_IDX; n++) {
435 if (cs[n] != expected[n]) {
436 pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
437 engine->name, n, cs[n], expected[n]);
443 i915_gem_object_unpin_map(scratch->obj);
446 i915_request_put(rq);
448 intel_context_unpin(ce);
450 if (err == -EDEADLK) {
451 err = i915_gem_ww_ctx_backoff(&ww);
455 i915_gem_ww_ctx_fini(&ww);
456 intel_context_put(ce);
460 static int live_lrc_state(void *arg)
462 struct intel_gt *gt = arg;
463 struct intel_engine_cs *engine;
464 struct i915_vma *scratch;
465 enum intel_engine_id id;
469 * Check the live register state matches what we expect for this
473 scratch = create_scratch(gt);
475 return PTR_ERR(scratch);
477 for_each_engine(engine, gt, id) {
478 err = __live_lrc_state(engine, scratch);
483 if (igt_flush_test(gt->i915))
486 i915_vma_unpin_and_release(&scratch, 0);
490 static int gpr_make_dirty(struct intel_context *ce)
492 struct i915_request *rq;
496 rq = intel_context_create_request(ce);
500 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
502 i915_request_add(rq);
506 *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
507 for (n = 0; n < NUM_GPR_DW; n++) {
508 *cs++ = CS_GPR(ce->engine, n);
513 intel_ring_advance(rq, cs);
515 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
516 i915_request_add(rq);
521 static struct i915_request *
522 __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
525 i915_ggtt_offset(ce->engine->status_page.vma) +
526 offset_in_page(slot);
527 struct i915_request *rq;
532 rq = intel_context_create_request(ce);
536 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
538 i915_request_add(rq);
542 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
545 *cs++ = MI_SEMAPHORE_WAIT |
546 MI_SEMAPHORE_GLOBAL_GTT |
548 MI_SEMAPHORE_SAD_NEQ_SDD;
553 for (n = 0; n < NUM_GPR_DW; n++) {
554 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
555 *cs++ = CS_GPR(ce->engine, n);
556 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
560 i915_vma_lock(scratch);
561 err = i915_request_await_object(rq, scratch->obj, true);
563 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
564 i915_vma_unlock(scratch);
566 i915_request_get(rq);
567 i915_request_add(rq);
569 i915_request_put(rq);
576 static int __live_lrc_gpr(struct intel_engine_cs *engine,
577 struct i915_vma *scratch,
580 u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
581 struct intel_context *ce;
582 struct i915_request *rq;
587 if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
588 return 0; /* GPR only on rcs0 for gen8 */
590 err = gpr_make_dirty(engine->kernel_context);
594 ce = intel_context_create(engine);
598 rq = __gpr_read(ce, scratch, slot);
604 err = wait_for_submit(engine, rq, HZ / 2);
609 err = gpr_make_dirty(engine->kernel_context);
613 err = emit_semaphore_signal(engine->kernel_context, slot);
617 err = wait_for_submit(engine, rq, HZ / 2);
625 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
630 cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
636 for (n = 0; n < NUM_GPR_DW; n++) {
638 pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
640 n / 2, n & 1 ? "udw" : "ldw",
647 i915_gem_object_unpin_map(scratch->obj);
650 memset32(&slot[0], -1, 4);
652 i915_request_put(rq);
654 intel_context_put(ce);
658 static int live_lrc_gpr(void *arg)
660 struct intel_gt *gt = arg;
661 struct intel_engine_cs *engine;
662 struct i915_vma *scratch;
663 enum intel_engine_id id;
667 * Check that GPR registers are cleared in new contexts as we need
668 * to avoid leaking any information from previous contexts.
671 scratch = create_scratch(gt);
673 return PTR_ERR(scratch);
675 for_each_engine(engine, gt, id) {
676 st_engine_heartbeat_disable(engine);
678 err = __live_lrc_gpr(engine, scratch, false);
682 err = __live_lrc_gpr(engine, scratch, true);
687 st_engine_heartbeat_enable(engine);
688 if (igt_flush_test(gt->i915))
694 i915_vma_unpin_and_release(&scratch, 0);
698 static struct i915_request *
699 create_timestamp(struct intel_context *ce, void *slot, int idx)
702 i915_ggtt_offset(ce->engine->status_page.vma) +
703 offset_in_page(slot);
704 struct i915_request *rq;
708 rq = intel_context_create_request(ce);
712 cs = intel_ring_begin(rq, 10);
718 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
721 *cs++ = MI_SEMAPHORE_WAIT |
722 MI_SEMAPHORE_GLOBAL_GTT |
724 MI_SEMAPHORE_SAD_NEQ_SDD;
729 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
730 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
731 *cs++ = offset + idx * sizeof(u32);
734 intel_ring_advance(rq, cs);
738 i915_request_get(rq);
739 i915_request_add(rq);
741 i915_request_put(rq);
748 struct lrc_timestamp {
749 struct intel_engine_cs *engine;
750 struct intel_context *ce[2];
754 static bool timestamp_advanced(u32 start, u32 end)
756 return (s32)(end - start) > 0;
759 static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
761 u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
762 struct i915_request *rq;
766 arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
767 rq = create_timestamp(arg->ce[0], slot, 1);
771 err = wait_for_submit(rq->engine, rq, HZ / 2);
776 arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
777 err = emit_semaphore_signal(arg->ce[1], slot);
785 /* And wait for switch to kernel (to save our context to memory) */
786 err = context_flush(arg->ce[0], HZ / 2);
790 if (!timestamp_advanced(arg->poison, slot[1])) {
791 pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
792 arg->engine->name, preempt ? "preempt" : "simple",
793 arg->poison, slot[1]);
797 timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
798 if (!timestamp_advanced(slot[1], timestamp)) {
799 pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
800 arg->engine->name, preempt ? "preempt" : "simple",
806 memset32(slot, -1, 4);
807 i915_request_put(rq);
811 static int live_lrc_timestamp(void *arg)
813 struct lrc_timestamp data = {};
814 struct intel_gt *gt = arg;
815 enum intel_engine_id id;
816 const u32 poison[] = {
824 * We want to verify that the timestamp is saved and restore across
825 * context switches and is monotonic.
827 * So we do this with a little bit of LRC poisoning to check various
828 * boundary conditions, and see what happens if we preempt the context
829 * with a second request (carrying more poison into the timestamp).
832 for_each_engine(data.engine, gt, id) {
835 st_engine_heartbeat_disable(data.engine);
837 for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
838 struct intel_context *tmp;
840 tmp = intel_context_create(data.engine);
846 err = intel_context_pin(tmp);
848 intel_context_put(tmp);
855 for (i = 0; i < ARRAY_SIZE(poison); i++) {
856 data.poison = poison[i];
858 err = __lrc_timestamp(&data, false);
862 err = __lrc_timestamp(&data, true);
868 st_engine_heartbeat_enable(data.engine);
869 for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
873 intel_context_unpin(data.ce[i]);
874 intel_context_put(data.ce[i]);
877 if (igt_flush_test(gt->i915))
886 static struct i915_vma *
887 create_user_vma(struct i915_address_space *vm, unsigned long size)
889 struct drm_i915_gem_object *obj;
890 struct i915_vma *vma;
893 obj = i915_gem_object_create_internal(vm->i915, size);
895 return ERR_CAST(obj);
897 vma = i915_vma_instance(obj, vm, NULL);
899 i915_gem_object_put(obj);
903 err = i915_vma_pin(vma, 0, 0, PIN_USER);
905 i915_gem_object_put(obj);
912 static struct i915_vma *
913 store_context(struct intel_context *ce, struct i915_vma *scratch)
915 struct i915_vma *batch;
919 batch = create_user_vma(ce->vm, SZ_64K);
923 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
929 defaults = shmem_pin_map(ce->engine->default_state);
931 i915_gem_object_unpin_map(batch->obj);
933 return ERR_PTR(-ENOMEM);
939 hw += LRC_STATE_OFFSET / sizeof(*hw);
941 u32 len = hw[dw] & 0x7f;
948 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
956 *cs++ = MI_STORE_REGISTER_MEM_GEN8;
958 *cs++ = lower_32_bits(scratch->node.start + x);
959 *cs++ = upper_32_bits(scratch->node.start + x);
964 } while (dw < PAGE_SIZE / sizeof(u32) &&
965 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
967 *cs++ = MI_BATCH_BUFFER_END;
969 shmem_unpin_map(ce->engine->default_state, defaults);
971 i915_gem_object_flush_map(batch->obj);
972 i915_gem_object_unpin_map(batch->obj);
977 static int move_to_active(struct i915_request *rq,
978 struct i915_vma *vma,
984 err = i915_request_await_object(rq, vma->obj, flags);
986 err = i915_vma_move_to_active(vma, rq, flags);
987 i915_vma_unlock(vma);
992 static struct i915_request *
993 record_registers(struct intel_context *ce,
994 struct i915_vma *before,
995 struct i915_vma *after,
998 struct i915_vma *b_before, *b_after;
999 struct i915_request *rq;
1003 b_before = store_context(ce, before);
1004 if (IS_ERR(b_before))
1005 return ERR_CAST(b_before);
1007 b_after = store_context(ce, after);
1008 if (IS_ERR(b_after)) {
1009 rq = ERR_CAST(b_after);
1013 rq = intel_context_create_request(ce);
1017 err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
1021 err = move_to_active(rq, b_before, 0);
1025 err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
1029 err = move_to_active(rq, b_after, 0);
1033 cs = intel_ring_begin(rq, 14);
1039 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1040 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1041 *cs++ = lower_32_bits(b_before->node.start);
1042 *cs++ = upper_32_bits(b_before->node.start);
1044 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1045 *cs++ = MI_SEMAPHORE_WAIT |
1046 MI_SEMAPHORE_GLOBAL_GTT |
1048 MI_SEMAPHORE_SAD_NEQ_SDD;
1050 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1051 offset_in_page(sema);
1055 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1056 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1057 *cs++ = lower_32_bits(b_after->node.start);
1058 *cs++ = upper_32_bits(b_after->node.start);
1060 intel_ring_advance(rq, cs);
1062 WRITE_ONCE(*sema, 0);
1063 i915_request_get(rq);
1064 i915_request_add(rq);
1066 i915_vma_put(b_after);
1068 i915_vma_put(b_before);
1072 i915_request_add(rq);
1077 static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
1079 struct i915_vma *batch;
1083 batch = create_user_vma(ce->vm, SZ_64K);
1087 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
1089 i915_vma_put(batch);
1090 return ERR_CAST(cs);
1093 defaults = shmem_pin_map(ce->engine->default_state);
1095 i915_gem_object_unpin_map(batch->obj);
1096 i915_vma_put(batch);
1097 return ERR_PTR(-ENOMEM);
1102 hw += LRC_STATE_OFFSET / sizeof(*hw);
1104 u32 len = hw[dw] & 0x7f;
1111 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1117 len = (len + 1) / 2;
1118 *cs++ = MI_LOAD_REGISTER_IMM(len);
1124 } while (dw < PAGE_SIZE / sizeof(u32) &&
1125 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1127 *cs++ = MI_BATCH_BUFFER_END;
1129 shmem_unpin_map(ce->engine->default_state, defaults);
1131 i915_gem_object_flush_map(batch->obj);
1132 i915_gem_object_unpin_map(batch->obj);
1137 static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
1139 struct i915_request *rq;
1140 struct i915_vma *batch;
1144 batch = load_context(ce, poison);
1146 return PTR_ERR(batch);
1148 rq = intel_context_create_request(ce);
1154 err = move_to_active(rq, batch, 0);
1158 cs = intel_ring_begin(rq, 8);
1164 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1165 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1166 *cs++ = lower_32_bits(batch->node.start);
1167 *cs++ = upper_32_bits(batch->node.start);
1169 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1170 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1171 offset_in_page(sema);
1175 intel_ring_advance(rq, cs);
1177 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1179 i915_request_add(rq);
1181 i915_vma_put(batch);
1185 static bool is_moving(u32 a, u32 b)
1190 static int compare_isolation(struct intel_engine_cs *engine,
1191 struct i915_vma *ref[2],
1192 struct i915_vma *result[2],
1193 struct intel_context *ce,
1196 u32 x, dw, *hw, *lrc;
1201 A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
1203 return PTR_ERR(A[0]);
1205 A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
1207 err = PTR_ERR(A[1]);
1211 B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
1213 err = PTR_ERR(B[0]);
1217 B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
1219 err = PTR_ERR(B[1]);
1223 lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
1224 i915_coherent_map_type(engine->i915,
1231 lrc += LRC_STATE_OFFSET / sizeof(*hw);
1233 defaults = shmem_pin_map(ce->engine->default_state);
1242 hw += LRC_STATE_OFFSET / sizeof(*hw);
1244 u32 len = hw[dw] & 0x7f;
1251 if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
1257 len = (len + 1) / 2;
1259 if (!is_moving(A[0][x], A[1][x]) &&
1260 (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
1261 switch (hw[dw] & 4095) {
1262 case 0x30: /* RING_HEAD */
1263 case 0x34: /* RING_TAIL */
1267 pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
1270 A[0][x], B[0][x], B[1][x],
1271 poison, lrc[dw + 1]);
1278 } while (dw < PAGE_SIZE / sizeof(u32) &&
1279 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
1281 shmem_unpin_map(ce->engine->default_state, defaults);
1283 i915_gem_object_unpin_map(ce->state->obj);
1285 i915_gem_object_unpin_map(result[1]->obj);
1287 i915_gem_object_unpin_map(result[0]->obj);
1289 i915_gem_object_unpin_map(ref[1]->obj);
1291 i915_gem_object_unpin_map(ref[0]->obj);
1295 static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
1297 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
1298 struct i915_vma *ref[2], *result[2];
1299 struct intel_context *A, *B;
1300 struct i915_request *rq;
1303 A = intel_context_create(engine);
1307 B = intel_context_create(engine);
1313 ref[0] = create_user_vma(A->vm, SZ_64K);
1314 if (IS_ERR(ref[0])) {
1315 err = PTR_ERR(ref[0]);
1319 ref[1] = create_user_vma(A->vm, SZ_64K);
1320 if (IS_ERR(ref[1])) {
1321 err = PTR_ERR(ref[1]);
1325 rq = record_registers(A, ref[0], ref[1], sema);
1331 WRITE_ONCE(*sema, 1);
1334 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1335 i915_request_put(rq);
1339 i915_request_put(rq);
1341 result[0] = create_user_vma(A->vm, SZ_64K);
1342 if (IS_ERR(result[0])) {
1343 err = PTR_ERR(result[0]);
1347 result[1] = create_user_vma(A->vm, SZ_64K);
1348 if (IS_ERR(result[1])) {
1349 err = PTR_ERR(result[1]);
1353 rq = record_registers(A, result[0], result[1], sema);
1359 err = poison_registers(B, poison, sema);
1361 WRITE_ONCE(*sema, -1);
1362 i915_request_put(rq);
1366 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1367 i915_request_put(rq);
1371 i915_request_put(rq);
1373 err = compare_isolation(engine, ref, result, A, poison);
1376 i915_vma_put(result[1]);
1378 i915_vma_put(result[0]);
1380 i915_vma_put(ref[1]);
1382 i915_vma_put(ref[0]);
1384 intel_context_put(B);
1386 intel_context_put(A);
1390 static bool skip_isolation(const struct intel_engine_cs *engine)
1392 if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
1395 if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
1401 static int live_lrc_isolation(void *arg)
1403 struct intel_gt *gt = arg;
1404 struct intel_engine_cs *engine;
1405 enum intel_engine_id id;
1406 const u32 poison[] = {
1416 * Our goal is try and verify that per-context state cannot be
1417 * tampered with by another non-privileged client.
1419 * We take the list of context registers from the LRI in the default
1420 * context image and attempt to modify that list from a remote context.
1423 for_each_engine(engine, gt, id) {
1426 /* Just don't even ask */
1427 if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
1428 skip_isolation(engine))
1431 intel_engine_pm_get(engine);
1432 for (i = 0; i < ARRAY_SIZE(poison); i++) {
1435 result = __lrc_isolation(engine, poison[i]);
1439 result = __lrc_isolation(engine, ~poison[i]);
1443 intel_engine_pm_put(engine);
1444 if (igt_flush_test(gt->i915)) {
1453 static int indirect_ctx_submit_req(struct intel_context *ce)
1455 struct i915_request *rq;
1458 rq = intel_context_create_request(ce);
1462 i915_request_get(rq);
1463 i915_request_add(rq);
1465 if (i915_request_wait(rq, 0, HZ / 5) < 0)
1468 i915_request_put(rq);
1473 #define CTX_BB_CANARY_OFFSET (3 * 1024)
1474 #define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
1477 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
1479 *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
1480 MI_SRM_LRM_GLOBAL_GTT |
1482 *cs++ = i915_mmio_reg_offset(RING_START(0));
1483 *cs++ = i915_ggtt_offset(ce->state) +
1484 context_wa_bb_offset(ce) +
1485 CTX_BB_CANARY_OFFSET;
1492 indirect_ctx_bb_setup(struct intel_context *ce)
1494 u32 *cs = context_indirect_bb(ce);
1496 cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
1498 setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
1501 static bool check_ring_start(struct intel_context *ce)
1503 const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
1504 LRC_STATE_OFFSET + context_wa_bb_offset(ce);
1506 if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
1509 pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
1510 ctx_bb[CTX_BB_CANARY_INDEX],
1511 ce->lrc_reg_state[CTX_RING_START]);
1516 static int indirect_ctx_bb_check(struct intel_context *ce)
1520 err = indirect_ctx_submit_req(ce);
1524 if (!check_ring_start(ce))
1530 static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
1532 struct intel_context *a, *b;
1535 a = intel_context_create(engine);
1538 err = intel_context_pin(a);
1542 b = intel_context_create(engine);
1547 err = intel_context_pin(b);
1551 /* We use the already reserved extra page in context state */
1552 if (!a->wa_bb_page) {
1553 GEM_BUG_ON(b->wa_bb_page);
1554 GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
1559 * In order to test that our per context bb is truly per context,
1560 * and executes at the intended spot on context restoring process,
1561 * make the batch store the ring start value to memory.
1562 * As ring start is restored apriori of starting the indirect ctx bb and
1563 * as it will be different for each context, it fits to this purpose.
1565 indirect_ctx_bb_setup(a);
1566 indirect_ctx_bb_setup(b);
1568 err = indirect_ctx_bb_check(a);
1572 err = indirect_ctx_bb_check(b);
1575 intel_context_unpin(b);
1577 intel_context_put(b);
1579 intel_context_unpin(a);
1581 intel_context_put(a);
1586 static int live_lrc_indirect_ctx_bb(void *arg)
1588 struct intel_gt *gt = arg;
1589 struct intel_engine_cs *engine;
1590 enum intel_engine_id id;
1593 for_each_engine(engine, gt, id) {
1594 intel_engine_pm_get(engine);
1595 err = __live_lrc_indirect_ctx_bb(engine);
1596 intel_engine_pm_put(engine);
1598 if (igt_flush_test(gt->i915))
1608 static void garbage_reset(struct intel_engine_cs *engine,
1609 struct i915_request *rq)
1611 const unsigned int bit = I915_RESET_ENGINE + engine->id;
1612 unsigned long *lock = &engine->gt->reset.flags;
1615 if (!test_and_set_bit(bit, lock)) {
1616 tasklet_disable(&engine->sched_engine->tasklet);
1618 if (!rq->fence.error)
1619 __intel_engine_reset_bh(engine, NULL);
1621 tasklet_enable(&engine->sched_engine->tasklet);
1622 clear_and_wake_up_bit(bit, lock);
1627 static struct i915_request *garbage(struct intel_context *ce,
1628 struct rnd_state *prng)
1630 struct i915_request *rq;
1633 err = intel_context_pin(ce);
1635 return ERR_PTR(err);
1637 prandom_bytes_state(prng,
1639 ce->engine->context_size -
1642 rq = intel_context_create_request(ce);
1648 i915_request_get(rq);
1649 i915_request_add(rq);
1653 intel_context_unpin(ce);
1654 return ERR_PTR(err);
1657 static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
1659 struct intel_context *ce;
1660 struct i915_request *hang;
1663 ce = intel_context_create(engine);
1667 hang = garbage(ce, prng);
1669 err = PTR_ERR(hang);
1673 if (wait_for_submit(engine, hang, HZ / 2)) {
1674 i915_request_put(hang);
1679 intel_context_set_banned(ce);
1680 garbage_reset(engine, hang);
1682 intel_engine_flush_submission(engine);
1683 if (!hang->fence.error) {
1684 i915_request_put(hang);
1685 pr_err("%s: corrupted context was not reset\n",
1691 if (i915_request_wait(hang, 0, HZ / 2) < 0) {
1692 pr_err("%s: corrupted context did not recover\n",
1694 i915_request_put(hang);
1698 i915_request_put(hang);
1701 intel_context_put(ce);
1705 static int live_lrc_garbage(void *arg)
1707 struct intel_gt *gt = arg;
1708 struct intel_engine_cs *engine;
1709 enum intel_engine_id id;
1712 * Verify that we can recover if one context state is completely
1716 if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
1719 for_each_engine(engine, gt, id) {
1720 I915_RND_STATE(prng);
1723 if (!intel_has_reset_engine(engine->gt))
1726 intel_engine_pm_get(engine);
1727 for (i = 0; i < 3; i++) {
1728 err = __lrc_garbage(engine, &prng);
1732 intel_engine_pm_put(engine);
1734 if (igt_flush_test(gt->i915))
1743 static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
1745 struct intel_context *ce;
1746 struct i915_request *rq;
1747 IGT_TIMEOUT(end_time);
1750 ce = intel_context_create(engine);
1754 ce->runtime.num_underflow = 0;
1755 ce->runtime.max_underflow = 0;
1758 unsigned int loop = 1024;
1761 rq = intel_context_create_request(ce);
1768 i915_request_get(rq);
1770 i915_request_add(rq);
1773 if (__igt_timeout(end_time, NULL))
1776 i915_request_put(rq);
1779 err = i915_request_wait(rq, 0, HZ / 5);
1781 pr_err("%s: request not completed!\n", engine->name);
1785 igt_flush_test(engine->i915);
1787 pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
1789 intel_context_get_total_runtime_ns(ce),
1790 intel_context_get_avg_runtime_ns(ce));
1793 if (ce->runtime.num_underflow) {
1794 pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
1796 ce->runtime.num_underflow,
1797 ce->runtime.max_underflow);
1803 i915_request_put(rq);
1805 intel_context_put(ce);
1809 static int live_pphwsp_runtime(void *arg)
1811 struct intel_gt *gt = arg;
1812 struct intel_engine_cs *engine;
1813 enum intel_engine_id id;
1817 * Check that cumulative context runtime as stored in the pphwsp[16]
1821 for_each_engine(engine, gt, id) {
1822 err = __live_pphwsp_runtime(engine);
1827 if (igt_flush_test(gt->i915))
1833 int intel_lrc_live_selftests(struct drm_i915_private *i915)
1835 static const struct i915_subtest tests[] = {
1836 SUBTEST(live_lrc_layout),
1837 SUBTEST(live_lrc_fixed),
1838 SUBTEST(live_lrc_state),
1839 SUBTEST(live_lrc_gpr),
1840 SUBTEST(live_lrc_isolation),
1841 SUBTEST(live_lrc_timestamp),
1842 SUBTEST(live_lrc_garbage),
1843 SUBTEST(live_pphwsp_runtime),
1844 SUBTEST(live_lrc_indirect_ctx_bb),
1847 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
1850 return intel_gt_live_subtests(tests, &i915->gt);