1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016 Intel Corporation
6 #include <linux/kthread.h>
8 #include "gem/i915_gem_context.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
13 #include "selftest_engine_heartbeat.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_reset.h"
19 #include "selftests/igt_atomic.h"
20 #include "selftests/igt_spinner.h"
21 #include "selftests/intel_scheduler_helpers.h"
23 #include "selftests/mock_drm.h"
25 #include "gem/selftests/mock_context.h"
26 #include "gem/selftests/igt_gem_utils.h"
28 #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
32 struct drm_i915_gem_object *hws;
33 struct drm_i915_gem_object *obj;
34 struct i915_gem_context *ctx;
39 static int hang_init(struct hang *h, struct intel_gt *gt)
44 memset(h, 0, sizeof(*h));
47 h->ctx = kernel_context(gt->i915, NULL);
49 return PTR_ERR(h->ctx);
51 GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
53 h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
55 err = PTR_ERR(h->hws);
59 h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
61 err = PTR_ERR(h->obj);
65 i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
66 vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
71 h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
73 vaddr = i915_gem_object_pin_map_unlocked(h->obj,
74 i915_coherent_map_type(gt->i915, h->obj, false));
84 i915_gem_object_unpin_map(h->hws);
86 i915_gem_object_put(h->obj);
88 i915_gem_object_put(h->hws);
90 kernel_context_close(h->ctx);
94 static u64 hws_address(const struct i915_vma *hws,
95 const struct i915_request *rq)
97 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
100 static int move_to_active(struct i915_vma *vma,
101 struct i915_request *rq,
107 err = i915_request_await_object(rq, vma->obj,
108 flags & EXEC_OBJECT_WRITE);
110 err = i915_vma_move_to_active(vma, rq, flags);
111 i915_vma_unlock(vma);
116 static struct i915_request *
117 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
119 struct intel_gt *gt = h->gt;
120 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
121 struct drm_i915_gem_object *obj;
122 struct i915_request *rq = NULL;
123 struct i915_vma *hws, *vma;
129 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
132 return ERR_CAST(obj);
135 vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
137 i915_gem_object_put(obj);
139 return ERR_CAST(vaddr);
142 i915_gem_object_unpin_map(h->obj);
143 i915_gem_object_put(h->obj);
148 vma = i915_vma_instance(h->obj, vm, NULL);
151 return ERR_CAST(vma);
154 hws = i915_vma_instance(h->hws, vm, NULL);
157 return ERR_CAST(hws);
160 err = i915_vma_pin(vma, 0, 0, PIN_USER);
166 err = i915_vma_pin(hws, 0, 0, PIN_USER);
170 rq = igt_request_alloc(h->ctx, engine);
176 err = move_to_active(vma, rq, 0);
180 err = move_to_active(hws, rq, 0);
185 if (GRAPHICS_VER(gt->i915) >= 8) {
186 *batch++ = MI_STORE_DWORD_IMM_GEN4;
187 *batch++ = lower_32_bits(hws_address(hws, rq));
188 *batch++ = upper_32_bits(hws_address(hws, rq));
189 *batch++ = rq->fence.seqno;
192 memset(batch, 0, 1024);
193 batch += 1024 / sizeof(*batch);
196 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
197 *batch++ = lower_32_bits(vma->node.start);
198 *batch++ = upper_32_bits(vma->node.start);
199 } else if (GRAPHICS_VER(gt->i915) >= 6) {
200 *batch++ = MI_STORE_DWORD_IMM_GEN4;
202 *batch++ = lower_32_bits(hws_address(hws, rq));
203 *batch++ = rq->fence.seqno;
206 memset(batch, 0, 1024);
207 batch += 1024 / sizeof(*batch);
210 *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
211 *batch++ = lower_32_bits(vma->node.start);
212 } else if (GRAPHICS_VER(gt->i915) >= 4) {
213 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
215 *batch++ = lower_32_bits(hws_address(hws, rq));
216 *batch++ = rq->fence.seqno;
219 memset(batch, 0, 1024);
220 batch += 1024 / sizeof(*batch);
223 *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
224 *batch++ = lower_32_bits(vma->node.start);
226 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
227 *batch++ = lower_32_bits(hws_address(hws, rq));
228 *batch++ = rq->fence.seqno;
231 memset(batch, 0, 1024);
232 batch += 1024 / sizeof(*batch);
235 *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
236 *batch++ = lower_32_bits(vma->node.start);
238 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
239 intel_gt_chipset_flush(engine->gt);
241 if (rq->engine->emit_init_breadcrumb) {
242 err = rq->engine->emit_init_breadcrumb(rq);
248 if (GRAPHICS_VER(gt->i915) <= 5)
249 flags |= I915_DISPATCH_SECURE;
251 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
255 i915_request_set_error_once(rq, err);
256 i915_request_add(rq);
263 return err ? ERR_PTR(err) : rq;
266 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
268 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
271 static void hang_fini(struct hang *h)
273 *h->batch = MI_BATCH_BUFFER_END;
274 intel_gt_chipset_flush(h->gt);
276 i915_gem_object_unpin_map(h->obj);
277 i915_gem_object_put(h->obj);
279 i915_gem_object_unpin_map(h->hws);
280 i915_gem_object_put(h->hws);
282 kernel_context_close(h->ctx);
284 igt_flush_test(h->gt->i915);
287 static bool wait_until_running(struct hang *h, struct i915_request *rq)
289 return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
292 wait_for(i915_seqno_passed(hws_seqno(h, rq),
297 static int igt_hang_sanitycheck(void *arg)
299 struct intel_gt *gt = arg;
300 struct i915_request *rq;
301 struct intel_engine_cs *engine;
302 enum intel_engine_id id;
306 /* Basic check that we can execute our hanging batch */
308 err = hang_init(&h, gt);
312 for_each_engine(engine, gt, id) {
313 struct intel_wedge_me w;
316 if (!intel_engine_can_store_dword(engine))
319 rq = hang_create_request(&h, engine);
322 pr_err("Failed to create request for %s, err=%d\n",
327 i915_request_get(rq);
329 *h.batch = MI_BATCH_BUFFER_END;
330 intel_gt_chipset_flush(engine->gt);
332 i915_request_add(rq);
335 intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
336 timeout = i915_request_wait(rq, 0,
337 MAX_SCHEDULE_TIMEOUT);
338 if (intel_gt_is_wedged(gt))
341 i915_request_put(rq);
345 pr_err("Wait for request failed on %s, err=%d\n",
356 static bool wait_for_idle(struct intel_engine_cs *engine)
358 return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
361 static int igt_reset_nop(void *arg)
363 struct intel_gt *gt = arg;
364 struct i915_gpu_error *global = >->i915->gpu_error;
365 struct intel_engine_cs *engine;
366 unsigned int reset_count, count;
367 enum intel_engine_id id;
368 IGT_TIMEOUT(end_time);
371 /* Check that we can reset during non-user portions of requests */
373 reset_count = i915_reset_count(global);
376 for_each_engine(engine, gt, id) {
377 struct intel_context *ce;
380 ce = intel_context_create(engine);
383 pr_err("[%s] Create context failed: %d!\n", engine->name, err);
387 for (i = 0; i < 16; i++) {
388 struct i915_request *rq;
390 rq = intel_context_create_request(ce);
393 pr_err("[%s] Create request failed: %d!\n",
398 i915_request_add(rq);
401 intel_context_put(ce);
404 igt_global_reset_lock(gt);
405 intel_gt_reset(gt, ALL_ENGINES, NULL);
406 igt_global_reset_unlock(gt);
408 if (intel_gt_is_wedged(gt)) {
409 pr_err("[%s] GT is wedged!\n", engine->name);
414 if (i915_reset_count(global) != reset_count + ++count) {
415 pr_err("[%s] Reset not recorded: %d vs %d + %d!\n",
416 engine->name, i915_reset_count(global), reset_count, count);
421 err = igt_flush_test(gt->i915);
423 pr_err("[%s] Flush failed: %d!\n", engine->name, err);
426 } while (time_before(jiffies, end_time));
427 pr_info("%s: %d resets\n", __func__, count);
429 if (igt_flush_test(gt->i915)) {
430 pr_err("Post flush failed: %d!\n", err);
437 static int igt_reset_nop_engine(void *arg)
439 struct intel_gt *gt = arg;
440 struct i915_gpu_error *global = >->i915->gpu_error;
441 struct intel_engine_cs *engine;
442 enum intel_engine_id id;
444 /* Check that we can engine-reset during non-user portions */
446 if (!intel_has_reset_engine(gt))
449 for_each_engine(engine, gt, id) {
450 unsigned int reset_count, reset_engine_count, count;
451 struct intel_context *ce;
452 IGT_TIMEOUT(end_time);
455 if (intel_engine_uses_guc(engine)) {
456 /* Engine level resets are triggered by GuC when a hang
457 * is detected. They can't be triggered by the KMD any
458 * more. Thus a nop batch cannot be used as a reset test
463 ce = intel_context_create(engine);
465 pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
469 reset_count = i915_reset_count(global);
470 reset_engine_count = i915_reset_engine_count(global, engine);
473 st_engine_heartbeat_disable(engine);
474 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
478 if (!wait_for_idle(engine)) {
479 pr_err("%s failed to idle before reset\n",
485 for (i = 0; i < 16; i++) {
486 struct i915_request *rq;
488 rq = intel_context_create_request(ce);
490 struct drm_printer p =
491 drm_info_printer(gt->i915->drm.dev);
492 intel_engine_dump(engine, &p,
493 "%s(%s): failed to submit request\n",
497 GEM_TRACE("%s(%s): failed to submit request\n",
502 intel_gt_set_wedged(gt);
508 i915_request_add(rq);
510 err = intel_engine_reset(engine, NULL);
512 pr_err("intel_engine_reset(%s) failed, err:%d\n",
517 if (i915_reset_count(global) != reset_count) {
518 pr_err("Full GPU reset recorded! (engine reset expected)\n");
523 if (i915_reset_engine_count(global, engine) !=
524 reset_engine_count + ++count) {
525 pr_err("%s engine reset not recorded!\n",
530 } while (time_before(jiffies, end_time));
531 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
532 st_engine_heartbeat_enable(engine);
534 pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
536 intel_context_put(ce);
537 if (igt_flush_test(gt->i915))
546 static void force_reset_timeout(struct intel_engine_cs *engine)
548 engine->reset_timeout.probability = 999;
549 atomic_set(&engine->reset_timeout.times, -1);
552 static void cancel_reset_timeout(struct intel_engine_cs *engine)
554 memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
557 static int igt_reset_fail_engine(void *arg)
559 struct intel_gt *gt = arg;
560 struct intel_engine_cs *engine;
561 enum intel_engine_id id;
563 /* Check that we can recover from engine-reset failues */
565 if (!intel_has_reset_engine(gt))
568 for_each_engine(engine, gt, id) {
570 struct intel_context *ce;
571 IGT_TIMEOUT(end_time);
574 /* Can't manually break the reset if i915 doesn't perform it */
575 if (intel_engine_uses_guc(engine))
578 ce = intel_context_create(engine);
580 pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
584 st_engine_heartbeat_disable(engine);
585 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
587 force_reset_timeout(engine);
588 err = intel_engine_reset(engine, NULL);
589 cancel_reset_timeout(engine);
590 if (err == 0) /* timeouts only generated on gen8+ */
595 struct i915_request *last = NULL;
598 if (!wait_for_idle(engine)) {
599 pr_err("%s failed to idle before reset\n",
605 for (i = 0; i < count % 15; i++) {
606 struct i915_request *rq;
608 rq = intel_context_create_request(ce);
610 struct drm_printer p =
611 drm_info_printer(gt->i915->drm.dev);
612 intel_engine_dump(engine, &p,
613 "%s(%s): failed to submit request\n",
617 GEM_TRACE("%s(%s): failed to submit request\n",
622 intel_gt_set_wedged(gt);
624 i915_request_put(last);
631 i915_request_put(last);
632 last = i915_request_get(rq);
633 i915_request_add(rq);
637 err = intel_engine_reset(engine, NULL);
639 GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
642 i915_request_put(last);
646 force_reset_timeout(engine);
647 err = intel_engine_reset(engine, NULL);
648 cancel_reset_timeout(engine);
649 if (err != -ETIMEDOUT) {
650 pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
652 i915_request_put(last);
659 if (i915_request_wait(last, 0, HZ / 2) < 0) {
660 struct drm_printer p =
661 drm_info_printer(gt->i915->drm.dev);
663 intel_engine_dump(engine, &p,
664 "%s(%s): failed to complete request\n",
668 GEM_TRACE("%s(%s): failed to complete request\n",
675 i915_request_put(last);
678 } while (err == 0 && time_before(jiffies, end_time));
680 pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
682 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
683 st_engine_heartbeat_enable(engine);
684 intel_context_put(ce);
686 if (igt_flush_test(gt->i915))
695 static int __igt_reset_engine(struct intel_gt *gt, bool active)
697 struct i915_gpu_error *global = >->i915->gpu_error;
698 struct intel_engine_cs *engine;
699 enum intel_engine_id id;
703 /* Check that we can issue an engine reset on an idle engine (no-op) */
705 if (!intel_has_reset_engine(gt))
709 err = hang_init(&h, gt);
714 for_each_engine(engine, gt, id) {
715 unsigned int reset_count, reset_engine_count;
717 bool using_guc = intel_engine_uses_guc(engine);
718 IGT_TIMEOUT(end_time);
720 if (using_guc && !active)
723 if (active && !intel_engine_can_store_dword(engine))
726 if (!wait_for_idle(engine)) {
727 pr_err("%s failed to idle before reset\n",
733 reset_count = i915_reset_count(global);
734 reset_engine_count = i915_reset_engine_count(global, engine);
736 st_engine_heartbeat_disable(engine);
737 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
740 struct i915_request *rq = NULL;
741 struct intel_selftest_saved_policy saved;
744 err = intel_selftest_modify_policy(engine, &saved,
745 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
747 pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
752 rq = hang_create_request(&h, engine);
755 pr_err("[%s] Create hang request failed: %d!\n",
760 i915_request_get(rq);
761 i915_request_add(rq);
763 if (!wait_until_running(&h, rq)) {
764 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
766 pr_err("%s: Failed to start request %llx, at %x\n",
767 __func__, rq->fence.seqno, hws_seqno(&h, rq));
768 intel_engine_dump(engine, &p,
769 "%s\n", engine->name);
771 i915_request_put(rq);
778 err = intel_engine_reset(engine, NULL);
780 pr_err("intel_engine_reset(%s) failed, err:%d\n",
787 /* Ensure the reset happens and kills the engine */
788 err = intel_selftest_wait_for_rq(rq);
790 pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
791 engine->name, rq->fence.context,
792 rq->fence.seqno, rq->context->guc_id, err);
797 i915_request_put(rq);
799 if (i915_reset_count(global) != reset_count) {
800 pr_err("Full GPU reset recorded! (engine reset expected)\n");
805 /* GuC based resets are not logged per engine */
807 if (i915_reset_engine_count(global, engine) !=
808 ++reset_engine_count) {
809 pr_err("%s engine reset not recorded!\n",
819 err2 = intel_selftest_restore_policy(engine, &saved);
821 pr_err("[%s] Restore policy failed: %d!\n", engine->name, err);
826 } while (time_before(jiffies, end_time));
827 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
828 st_engine_heartbeat_enable(engine);
829 pr_info("%s: Completed %lu %s resets\n",
830 engine->name, count, active ? "active" : "idle");
835 err = igt_flush_test(gt->i915);
837 pr_err("[%s] Flush failed: %d!\n", engine->name, err);
842 if (intel_gt_is_wedged(gt)) {
843 pr_err("GT is wedged!\n");
853 static int igt_reset_idle_engine(void *arg)
855 return __igt_reset_engine(arg, false);
858 static int igt_reset_active_engine(void *arg)
860 return __igt_reset_engine(arg, true);
863 struct active_engine {
864 struct task_struct *task;
865 struct intel_engine_cs *engine;
866 unsigned long resets;
870 #define TEST_ACTIVE BIT(0)
871 #define TEST_OTHERS BIT(1)
872 #define TEST_SELF BIT(2)
873 #define TEST_PRIORITY BIT(3)
875 static int active_request_put(struct i915_request *rq)
882 if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
883 GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
889 intel_gt_set_wedged(rq->engine->gt);
893 i915_request_put(rq);
898 static int active_engine(void *data)
900 I915_RND_STATE(prng);
901 struct active_engine *arg = data;
902 struct intel_engine_cs *engine = arg->engine;
903 struct i915_request *rq[8] = {};
904 struct intel_context *ce[ARRAY_SIZE(rq)];
908 for (count = 0; count < ARRAY_SIZE(ce); count++) {
909 ce[count] = intel_context_create(engine);
910 if (IS_ERR(ce[count])) {
911 err = PTR_ERR(ce[count]);
912 pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
914 intel_context_put(ce[count]);
920 while (!kthread_should_stop()) {
921 unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
922 struct i915_request *old = rq[idx];
923 struct i915_request *new;
925 new = intel_context_create_request(ce[idx]);
928 pr_err("[%s] Create request #%d failed: %d!\n", engine->name, idx, err);
932 rq[idx] = i915_request_get(new);
933 i915_request_add(new);
935 if (engine->sched_engine->schedule && arg->flags & TEST_PRIORITY) {
936 struct i915_sched_attr attr = {
938 i915_prandom_u32_max_state(512, &prng),
940 engine->sched_engine->schedule(rq[idx], &attr);
943 err = active_request_put(old);
945 pr_err("[%s] Request put failed: %d!\n", engine->name, err);
952 for (count = 0; count < ARRAY_SIZE(rq); count++) {
953 int err__ = active_request_put(rq[count]);
956 pr_err("[%s] Request put #%ld failed: %d!\n", engine->name, count, err);
958 /* Keep the first error */
962 intel_context_put(ce[count]);
968 static int __igt_reset_engines(struct intel_gt *gt,
969 const char *test_name,
972 struct i915_gpu_error *global = >->i915->gpu_error;
973 struct intel_engine_cs *engine, *other;
974 enum intel_engine_id id, tmp;
978 /* Check that issuing a reset on one engine does not interfere
979 * with any other engine.
982 if (!intel_has_reset_engine(gt))
985 if (flags & TEST_ACTIVE) {
986 err = hang_init(&h, gt);
990 if (flags & TEST_PRIORITY)
991 h.ctx->sched.priority = 1024;
994 for_each_engine(engine, gt, id) {
995 struct active_engine threads[I915_NUM_ENGINES] = {};
996 unsigned long device = i915_reset_count(global);
997 unsigned long count = 0, reported;
998 bool using_guc = intel_engine_uses_guc(engine);
999 IGT_TIMEOUT(end_time);
1001 if (flags & TEST_ACTIVE) {
1002 if (!intel_engine_can_store_dword(engine))
1004 } else if (using_guc)
1007 if (!wait_for_idle(engine)) {
1008 pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
1009 engine->name, test_name);
1014 memset(threads, 0, sizeof(threads));
1015 for_each_engine(other, gt, tmp) {
1016 struct task_struct *tsk;
1018 threads[tmp].resets =
1019 i915_reset_engine_count(global, other);
1021 if (other == engine && !(flags & TEST_SELF))
1024 if (other != engine && !(flags & TEST_OTHERS))
1027 threads[tmp].engine = other;
1028 threads[tmp].flags = flags;
1030 tsk = kthread_run(active_engine, &threads[tmp],
1031 "igt/%s", other->name);
1034 pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
1038 threads[tmp].task = tsk;
1039 get_task_struct(tsk);
1042 yield(); /* start all threads before we begin */
1044 st_engine_heartbeat_disable_no_pm(engine);
1045 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
1047 struct i915_request *rq = NULL;
1048 struct intel_selftest_saved_policy saved;
1051 err = intel_selftest_modify_policy(engine, &saved,
1052 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
1054 pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
1058 if (flags & TEST_ACTIVE) {
1059 rq = hang_create_request(&h, engine);
1062 pr_err("[%s] Create hang request failed: %d!\n",
1067 i915_request_get(rq);
1068 i915_request_add(rq);
1070 if (!wait_until_running(&h, rq)) {
1071 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1073 pr_err("%s: Failed to start request %llx, at %x\n",
1074 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1075 intel_engine_dump(engine, &p,
1076 "%s\n", engine->name);
1078 i915_request_put(rq);
1083 intel_engine_pm_get(engine);
1087 err = intel_engine_reset(engine, NULL);
1089 pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
1090 engine->name, test_name, err);
1096 /* Ensure the reset happens and kills the engine */
1097 err = intel_selftest_wait_for_rq(rq);
1099 pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
1100 engine->name, rq->fence.context,
1101 rq->fence.seqno, rq->context->guc_id, err);
1107 if (rq->fence.error != -EIO) {
1108 pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
1109 engine->name, test_name,
1111 rq->fence.seqno, rq->context->guc_id);
1112 i915_request_put(rq);
1115 intel_gt_set_wedged(gt);
1120 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1121 struct drm_printer p =
1122 drm_info_printer(gt->i915->drm.dev);
1124 pr_err("i915_reset_engine(%s:%s):"
1125 " failed to complete request %llx:%lld after reset\n",
1126 engine->name, test_name,
1129 intel_engine_dump(engine, &p,
1130 "%s\n", engine->name);
1131 i915_request_put(rq);
1134 intel_gt_set_wedged(gt);
1139 i915_request_put(rq);
1142 if (!(flags & TEST_ACTIVE))
1143 intel_engine_pm_put(engine);
1145 if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
1146 struct drm_printer p =
1147 drm_info_printer(gt->i915->drm.dev);
1149 pr_err("i915_reset_engine(%s:%s):"
1150 " failed to idle after reset\n",
1151 engine->name, test_name);
1152 intel_engine_dump(engine, &p,
1153 "%s\n", engine->name);
1160 err2 = intel_selftest_restore_policy(engine, &saved);
1162 pr_err("[%s] Restore policy failed: %d!\n", engine->name, err2);
1167 } while (time_before(jiffies, end_time));
1168 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
1169 st_engine_heartbeat_enable_no_pm(engine);
1171 pr_info("i915_reset_engine(%s:%s): %lu resets\n",
1172 engine->name, test_name, count);
1174 /* GuC based resets are not logged per engine */
1176 reported = i915_reset_engine_count(global, engine);
1177 reported -= threads[engine->id].resets;
1178 if (reported != count) {
1179 pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
1180 engine->name, test_name, count, reported);
1187 for_each_engine(other, gt, tmp) {
1190 if (!threads[tmp].task)
1193 ret = kthread_stop(threads[tmp].task);
1195 pr_err("kthread for other engine %s failed, err=%d\n",
1200 put_task_struct(threads[tmp].task);
1202 /* GuC based resets are not logged per engine */
1204 if (other->uabi_class != engine->uabi_class &&
1205 threads[tmp].resets !=
1206 i915_reset_engine_count(global, other)) {
1207 pr_err("Innocent engine %s was reset (count=%ld)\n",
1209 i915_reset_engine_count(global, other) -
1210 threads[tmp].resets);
1217 if (device != i915_reset_count(global)) {
1218 pr_err("Global reset (count=%ld)!\n",
1219 i915_reset_count(global) - device);
1227 err = igt_flush_test(gt->i915);
1229 pr_err("[%s] Flush failed: %d!\n", engine->name, err);
1234 if (intel_gt_is_wedged(gt))
1237 if (flags & TEST_ACTIVE)
1243 static int igt_reset_engines(void *arg)
1245 static const struct {
1250 { "active", TEST_ACTIVE },
1251 { "others-idle", TEST_OTHERS },
1252 { "others-active", TEST_OTHERS | TEST_ACTIVE },
1255 TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
1259 TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
1263 struct intel_gt *gt = arg;
1267 for (p = phases; p->name; p++) {
1268 if (p->flags & TEST_PRIORITY) {
1269 if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1273 err = __igt_reset_engines(arg, p->name, p->flags);
1281 static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
1283 u32 count = i915_reset_count(>->i915->gpu_error);
1285 intel_gt_reset(gt, mask, NULL);
1290 static int igt_reset_wait(void *arg)
1292 struct intel_gt *gt = arg;
1293 struct i915_gpu_error *global = >->i915->gpu_error;
1294 struct intel_engine_cs *engine = gt->engine[RCS0];
1295 struct i915_request *rq;
1296 unsigned int reset_count;
1301 if (!engine || !intel_engine_can_store_dword(engine))
1304 /* Check that we detect a stuck waiter and issue a reset */
1306 igt_global_reset_lock(gt);
1308 err = hang_init(&h, gt);
1310 pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1314 rq = hang_create_request(&h, engine);
1317 pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1321 i915_request_get(rq);
1322 i915_request_add(rq);
1324 if (!wait_until_running(&h, rq)) {
1325 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1327 pr_err("%s: Failed to start request %llx, at %x\n",
1328 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1329 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1331 intel_gt_set_wedged(gt);
1337 reset_count = fake_hangcheck(gt, ALL_ENGINES);
1339 timeout = i915_request_wait(rq, 0, 10);
1341 pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
1347 if (i915_reset_count(global) == reset_count) {
1348 pr_err("No GPU reset recorded!\n");
1354 i915_request_put(rq);
1358 igt_global_reset_unlock(gt);
1360 if (intel_gt_is_wedged(gt))
1367 struct completion completion;
1368 struct i915_vma *vma;
1371 static int evict_vma(void *data)
1373 struct evict_vma *arg = data;
1374 struct i915_address_space *vm = arg->vma->vm;
1375 struct drm_mm_node evict = arg->vma->node;
1378 complete(&arg->completion);
1380 mutex_lock(&vm->mutex);
1381 err = i915_gem_evict_for_node(vm, &evict, 0);
1382 mutex_unlock(&vm->mutex);
1387 static int evict_fence(void *data)
1389 struct evict_vma *arg = data;
1392 complete(&arg->completion);
1394 /* Mark the fence register as dirty to force the mmio update. */
1395 err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
1397 pr_err("Invalid Y-tiling settings; err:%d\n", err);
1401 err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
1403 pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
1407 err = i915_vma_pin_fence(arg->vma);
1408 i915_vma_unpin(arg->vma);
1410 pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
1414 i915_vma_unpin_fence(arg->vma);
1419 static int __igt_reset_evict_vma(struct intel_gt *gt,
1420 struct i915_address_space *vm,
1424 struct intel_engine_cs *engine = gt->engine[RCS0];
1425 struct drm_i915_gem_object *obj;
1426 struct task_struct *tsk = NULL;
1427 struct i915_request *rq;
1428 struct evict_vma arg;
1430 unsigned int pin_flags;
1433 if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
1436 if (!engine || !intel_engine_can_store_dword(engine))
1439 /* Check that we can recover an unbind stuck on a hanging request */
1441 err = hang_init(&h, gt);
1443 pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1447 obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
1450 pr_err("[%s] Create object failed: %d!\n", engine->name, err);
1454 if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1455 err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
1457 pr_err("Invalid X-tiling settings; err:%d\n", err);
1462 arg.vma = i915_vma_instance(obj, vm, NULL);
1463 if (IS_ERR(arg.vma)) {
1464 err = PTR_ERR(arg.vma);
1465 pr_err("[%s] VMA instance failed: %d!\n", engine->name, err);
1469 rq = hang_create_request(&h, engine);
1472 pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1476 pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
1478 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1479 pin_flags |= PIN_MAPPABLE;
1481 err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
1483 i915_request_add(rq);
1484 pr_err("[%s] VMA pin failed: %d!\n", engine->name, err);
1488 if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1489 err = i915_vma_pin_fence(arg.vma);
1491 pr_err("Unable to pin X-tiled fence; err:%d\n", err);
1492 i915_vma_unpin(arg.vma);
1493 i915_request_add(rq);
1498 i915_vma_lock(arg.vma);
1499 err = i915_request_await_object(rq, arg.vma->obj,
1500 flags & EXEC_OBJECT_WRITE);
1502 err = i915_vma_move_to_active(arg.vma, rq, flags);
1504 pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
1506 pr_err("[%s] Request await failed: %d!\n", engine->name, err);
1509 i915_vma_unlock(arg.vma);
1511 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1512 i915_vma_unpin_fence(arg.vma);
1513 i915_vma_unpin(arg.vma);
1515 i915_request_get(rq);
1516 i915_request_add(rq);
1520 if (!wait_until_running(&h, rq)) {
1521 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1523 pr_err("%s: Failed to start request %llx, at %x\n",
1524 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1525 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1527 intel_gt_set_wedged(gt);
1531 init_completion(&arg.completion);
1533 tsk = kthread_run(fn, &arg, "igt/evict_vma");
1536 pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
1540 get_task_struct(tsk);
1542 wait_for_completion(&arg.completion);
1544 if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1545 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1547 pr_err("igt/evict_vma kthread did not wait\n");
1548 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1550 intel_gt_set_wedged(gt);
1555 igt_global_reset_lock(gt);
1556 fake_hangcheck(gt, rq->engine->mask);
1557 igt_global_reset_unlock(gt);
1560 struct intel_wedge_me w;
1562 /* The reset, even indirectly, should take less than 10ms. */
1563 intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
1564 err = kthread_stop(tsk);
1566 put_task_struct(tsk);
1570 i915_request_put(rq);
1572 i915_gem_object_put(obj);
1575 if (intel_gt_is_wedged(gt))
1581 static int igt_reset_evict_ggtt(void *arg)
1583 struct intel_gt *gt = arg;
1585 return __igt_reset_evict_vma(gt, >->ggtt->vm,
1586 evict_vma, EXEC_OBJECT_WRITE);
1589 static int igt_reset_evict_ppgtt(void *arg)
1591 struct intel_gt *gt = arg;
1592 struct i915_ppgtt *ppgtt;
1595 /* aliasing == global gtt locking, covered above */
1596 if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
1599 ppgtt = i915_ppgtt_create(gt);
1601 return PTR_ERR(ppgtt);
1603 err = __igt_reset_evict_vma(gt, &ppgtt->vm,
1604 evict_vma, EXEC_OBJECT_WRITE);
1605 i915_vm_put(&ppgtt->vm);
1610 static int igt_reset_evict_fence(void *arg)
1612 struct intel_gt *gt = arg;
1614 return __igt_reset_evict_vma(gt, >->ggtt->vm,
1615 evict_fence, EXEC_OBJECT_NEEDS_FENCE);
1618 static int wait_for_others(struct intel_gt *gt,
1619 struct intel_engine_cs *exclude)
1621 struct intel_engine_cs *engine;
1622 enum intel_engine_id id;
1624 for_each_engine(engine, gt, id) {
1625 if (engine == exclude)
1628 if (!wait_for_idle(engine))
1635 static int igt_reset_queue(void *arg)
1637 struct intel_gt *gt = arg;
1638 struct i915_gpu_error *global = >->i915->gpu_error;
1639 struct intel_engine_cs *engine;
1640 enum intel_engine_id id;
1644 /* Check that we replay pending requests following a hang */
1646 igt_global_reset_lock(gt);
1648 err = hang_init(&h, gt);
1652 for_each_engine(engine, gt, id) {
1653 struct intel_selftest_saved_policy saved;
1654 struct i915_request *prev;
1655 IGT_TIMEOUT(end_time);
1657 bool using_guc = intel_engine_uses_guc(engine);
1659 if (!intel_engine_can_store_dword(engine))
1663 err = intel_selftest_modify_policy(engine, &saved,
1664 SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK);
1666 pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
1671 prev = hang_create_request(&h, engine);
1673 err = PTR_ERR(prev);
1674 pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
1678 i915_request_get(prev);
1679 i915_request_add(prev);
1683 struct i915_request *rq;
1684 unsigned int reset_count;
1686 rq = hang_create_request(&h, engine);
1689 pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1693 i915_request_get(rq);
1694 i915_request_add(rq);
1697 * XXX We don't handle resetting the kernel context
1698 * very well. If we trigger a device reset twice in
1699 * quick succession while the kernel context is
1700 * executing, we may end up skipping the breadcrumb.
1701 * This is really only a problem for the selftest as
1702 * normally there is a large interlude between resets
1703 * (hangcheck), or we focus on resetting just one
1704 * engine and so avoid repeatedly resetting innocents.
1706 err = wait_for_others(gt, engine);
1708 pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
1709 __func__, engine->name);
1710 i915_request_put(rq);
1711 i915_request_put(prev);
1714 intel_gt_set_wedged(gt);
1718 if (!wait_until_running(&h, prev)) {
1719 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1721 pr_err("%s(%s): Failed to start request %llx, at %x\n",
1722 __func__, engine->name,
1723 prev->fence.seqno, hws_seqno(&h, prev));
1724 intel_engine_dump(engine, &p,
1725 "%s\n", engine->name);
1727 i915_request_put(rq);
1728 i915_request_put(prev);
1730 intel_gt_set_wedged(gt);
1736 reset_count = fake_hangcheck(gt, BIT(id));
1738 if (prev->fence.error != -EIO) {
1739 pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
1741 i915_request_put(rq);
1742 i915_request_put(prev);
1747 if (rq->fence.error) {
1748 pr_err("Fence error status not zero [%d] after unrelated reset\n",
1750 i915_request_put(rq);
1751 i915_request_put(prev);
1756 if (i915_reset_count(global) == reset_count) {
1757 pr_err("No GPU reset recorded!\n");
1758 i915_request_put(rq);
1759 i915_request_put(prev);
1764 i915_request_put(prev);
1767 } while (time_before(jiffies, end_time));
1768 pr_info("%s: Completed %d queued resets\n",
1769 engine->name, count);
1771 *h.batch = MI_BATCH_BUFFER_END;
1772 intel_gt_chipset_flush(engine->gt);
1774 i915_request_put(prev);
1778 int err2 = intel_selftest_restore_policy(engine, &saved);
1781 pr_err("%s:%d> [%s] Restore policy failed: %d!\n",
1782 __func__, __LINE__, engine->name, err2);
1789 err = igt_flush_test(gt->i915);
1791 pr_err("[%s] Flush failed: %d!\n", engine->name, err);
1799 igt_global_reset_unlock(gt);
1801 if (intel_gt_is_wedged(gt))
1807 static int igt_handle_error(void *arg)
1809 struct intel_gt *gt = arg;
1810 struct i915_gpu_error *global = >->i915->gpu_error;
1811 struct intel_engine_cs *engine = gt->engine[RCS0];
1813 struct i915_request *rq;
1814 struct i915_gpu_coredump *error;
1817 /* Check that we can issue a global GPU and engine reset */
1819 if (!intel_has_reset_engine(gt))
1822 if (!engine || !intel_engine_can_store_dword(engine))
1825 err = hang_init(&h, gt);
1827 pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1831 rq = hang_create_request(&h, engine);
1834 pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1838 i915_request_get(rq);
1839 i915_request_add(rq);
1841 if (!wait_until_running(&h, rq)) {
1842 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1844 pr_err("%s: Failed to start request %llx, at %x\n",
1845 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1846 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1848 intel_gt_set_wedged(gt);
1854 /* Temporarily disable error capture */
1855 error = xchg(&global->first_error, (void *)-1);
1857 intel_gt_handle_error(gt, engine->mask, 0, NULL);
1859 xchg(&global->first_error, error);
1861 if (rq->fence.error != -EIO) {
1862 pr_err("Guilty request not identified!\n");
1868 i915_request_put(rq);
1874 static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1875 const struct igt_atomic_section *p,
1878 struct tasklet_struct * const t = &engine->sched_engine->tasklet;
1881 GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
1882 engine->name, mode, p->name);
1886 if (strcmp(p->name, "softirq"))
1888 p->critical_section_begin();
1890 err = __intel_engine_reset_bh(engine, NULL);
1892 p->critical_section_end();
1893 if (strcmp(p->name, "softirq"))
1897 tasklet_hi_schedule(t);
1901 pr_err("i915_reset_engine(%s:%s) failed under %s\n",
1902 engine->name, mode, p->name);
1907 static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1908 const struct igt_atomic_section *p)
1910 struct i915_request *rq;
1914 err = __igt_atomic_reset_engine(engine, p, "idle");
1918 err = hang_init(&h, engine->gt);
1920 pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
1924 rq = hang_create_request(&h, engine);
1927 pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
1931 i915_request_get(rq);
1932 i915_request_add(rq);
1934 if (wait_until_running(&h, rq)) {
1935 err = __igt_atomic_reset_engine(engine, p, "active");
1937 pr_err("%s(%s): Failed to start request %llx, at %x\n",
1938 __func__, engine->name,
1939 rq->fence.seqno, hws_seqno(&h, rq));
1940 intel_gt_set_wedged(engine->gt);
1945 struct intel_wedge_me w;
1947 intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
1948 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1949 if (intel_gt_is_wedged(engine->gt))
1953 i915_request_put(rq);
1959 static int igt_reset_engines_atomic(void *arg)
1961 struct intel_gt *gt = arg;
1962 const typeof(*igt_atomic_phases) *p;
1965 /* Check that the engines resets are usable from atomic context */
1967 if (!intel_has_reset_engine(gt))
1970 if (intel_uc_uses_guc_submission(>->uc))
1973 igt_global_reset_lock(gt);
1975 /* Flush any requests before we get started and check basics */
1976 if (!igt_force_reset(gt))
1979 for (p = igt_atomic_phases; p->name; p++) {
1980 struct intel_engine_cs *engine;
1981 enum intel_engine_id id;
1983 for_each_engine(engine, gt, id) {
1984 err = igt_atomic_reset_engine(engine, p);
1991 /* As we poke around the guts, do a full reset before continuing. */
1992 igt_force_reset(gt);
1994 igt_global_reset_unlock(gt);
1999 int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
2001 static const struct i915_subtest tests[] = {
2002 SUBTEST(igt_hang_sanitycheck),
2003 SUBTEST(igt_reset_nop),
2004 SUBTEST(igt_reset_nop_engine),
2005 SUBTEST(igt_reset_idle_engine),
2006 SUBTEST(igt_reset_active_engine),
2007 SUBTEST(igt_reset_fail_engine),
2008 SUBTEST(igt_reset_engines),
2009 SUBTEST(igt_reset_engines_atomic),
2010 SUBTEST(igt_reset_queue),
2011 SUBTEST(igt_reset_wait),
2012 SUBTEST(igt_reset_evict_ggtt),
2013 SUBTEST(igt_reset_evict_ppgtt),
2014 SUBTEST(igt_reset_evict_fence),
2015 SUBTEST(igt_handle_error),
2017 struct intel_gt *gt = &i915->gt;
2018 intel_wakeref_t wakeref;
2021 if (!intel_has_gpu_reset(gt))
2024 if (intel_gt_is_wedged(gt))
2025 return -EIO; /* we're long past hope of a successful reset */
2027 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
2029 err = intel_gt_live_subtests(tests, gt);
2031 intel_runtime_pm_put(gt->uncore->rpm, wakeref);