1 // SPDX-License-Identifier: MIT
3 * Copyright © 2008-2018 Intel Corporation
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
10 #include "display/intel_display.h"
11 #include "display/intel_overlay.h"
13 #include "gem/i915_gem_context.h"
15 #include "gt/intel_gt_regs.h"
18 #include "i915_file_private.h"
19 #include "i915_gpu_error.h"
21 #include "intel_breadcrumbs.h"
22 #include "intel_engine_pm.h"
23 #include "intel_engine_regs.h"
25 #include "intel_gt_pm.h"
26 #include "intel_gt_requests.h"
27 #include "intel_mchbar_regs.h"
28 #include "intel_pci_config.h"
29 #include "intel_reset.h"
31 #include "uc/intel_guc.h"
33 #define RESET_MAX_RETRIES 3
35 /* XXX How to handle concurrent GGTT updates using tiling registers? */
36 #define RESET_UNDER_STOP_MACHINE 0
38 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
40 intel_uncore_rmw_fw(uncore, reg, 0, set);
43 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
45 intel_uncore_rmw_fw(uncore, reg, clr, 0);
48 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
50 struct drm_i915_file_private *file_priv = ctx->file_priv;
51 unsigned long prev_hang;
54 if (IS_ERR_OR_NULL(file_priv))
59 score = I915_CLIENT_SCORE_CONTEXT_BAN;
61 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
62 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
63 score += I915_CLIENT_SCORE_HANG_FAST;
66 atomic_add(score, &file_priv->ban_score);
68 drm_dbg(&ctx->i915->drm,
69 "client %s: gained %u ban score, now %u\n",
71 atomic_read(&file_priv->ban_score));
75 static bool mark_guilty(struct i915_request *rq)
77 struct i915_gem_context *ctx;
78 unsigned long prev_hang;
82 if (intel_context_is_closed(rq->context))
86 ctx = rcu_dereference(rq->context->gem_context);
87 if (ctx && !kref_get_unless_zero(&ctx->ref))
91 return intel_context_is_banned(rq->context);
93 atomic_inc(&ctx->guilty_count);
95 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
96 if (!i915_gem_context_is_bannable(ctx)) {
101 drm_notice(&ctx->i915->drm,
102 "%s context reset due to GPU hang\n",
105 /* Record the timestamp for the last N hangs */
106 prev_hang = ctx->hang_timestamp[0];
107 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
108 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
109 ctx->hang_timestamp[i] = jiffies;
111 /* If we have hung N+1 times in rapid succession, we ban the context! */
112 banned = !i915_gem_context_is_recoverable(ctx);
113 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
116 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
117 ctx->name, atomic_read(&ctx->guilty_count));
119 client_mark_guilty(ctx, banned);
122 i915_gem_context_put(ctx);
126 static void mark_innocent(struct i915_request *rq)
128 struct i915_gem_context *ctx;
131 ctx = rcu_dereference(rq->context->gem_context);
133 atomic_inc(&ctx->active_count);
137 void __i915_request_reset(struct i915_request *rq, bool guilty)
141 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
142 GEM_BUG_ON(__i915_request_is_complete(rq));
144 rcu_read_lock(); /* protect the GEM context */
146 i915_request_set_error_once(rq, -EIO);
147 __i915_request_skip(rq);
148 banned = mark_guilty(rq);
150 i915_request_set_error_once(rq, -EAGAIN);
156 intel_context_ban(rq->context, rq);
159 static bool i915_in_reset(struct pci_dev *pdev)
163 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
164 return gdrst & GRDOM_RESET_STATUS;
167 static int i915_do_reset(struct intel_gt *gt,
168 intel_engine_mask_t engine_mask,
171 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
174 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
175 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
177 err = wait_for_atomic(i915_in_reset(pdev), 50);
179 /* Clear the reset request. */
180 pci_write_config_byte(pdev, I915_GDRST, 0);
183 err = wait_for_atomic(!i915_in_reset(pdev), 50);
188 static bool g4x_reset_complete(struct pci_dev *pdev)
192 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
193 return (gdrst & GRDOM_RESET_ENABLE) == 0;
196 static int g33_do_reset(struct intel_gt *gt,
197 intel_engine_mask_t engine_mask,
200 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
202 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
203 return wait_for_atomic(g4x_reset_complete(pdev), 50);
206 static int g4x_do_reset(struct intel_gt *gt,
207 intel_engine_mask_t engine_mask,
210 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
211 struct intel_uncore *uncore = gt->uncore;
214 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
215 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
218 pci_write_config_byte(pdev, I915_GDRST,
219 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
220 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
222 GT_TRACE(gt, "Wait for media reset failed\n");
226 pci_write_config_byte(pdev, I915_GDRST,
227 GRDOM_RENDER | GRDOM_RESET_ENABLE);
228 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
230 GT_TRACE(gt, "Wait for render reset failed\n");
235 pci_write_config_byte(pdev, I915_GDRST, 0);
237 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
238 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
243 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
246 struct intel_uncore *uncore = gt->uncore;
249 intel_uncore_write_fw(uncore, ILK_GDSR,
250 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
251 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
252 ILK_GRDOM_RESET_ENABLE, 0,
256 GT_TRACE(gt, "Wait for render reset failed\n");
260 intel_uncore_write_fw(uncore, ILK_GDSR,
261 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
262 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
263 ILK_GRDOM_RESET_ENABLE, 0,
267 GT_TRACE(gt, "Wait for media reset failed\n");
272 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
273 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
277 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
278 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
280 struct intel_uncore *uncore = gt->uncore;
284 * GEN6_GDRST is not in the gt power well, no need to check
285 * for fifo space for the write or forcewake the chip for
288 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
290 /* Wait for the device to ack the reset requests */
291 err = __intel_wait_for_register_fw(uncore,
292 GEN6_GDRST, hw_domain_mask, 0,
297 "Wait for 0x%08x engines reset failed\n",
303 static int __gen6_reset_engines(struct intel_gt *gt,
304 intel_engine_mask_t engine_mask,
307 struct intel_engine_cs *engine;
310 if (engine_mask == ALL_ENGINES) {
311 hw_mask = GEN6_GRDOM_FULL;
313 intel_engine_mask_t tmp;
316 for_each_engine_masked(engine, gt, engine_mask, tmp) {
317 hw_mask |= engine->reset_domain;
321 return gen6_hw_domain_reset(gt, hw_mask);
324 static int gen6_reset_engines(struct intel_gt *gt,
325 intel_engine_mask_t engine_mask,
331 spin_lock_irqsave(>->uncore->lock, flags);
332 ret = __gen6_reset_engines(gt, engine_mask, retry);
333 spin_unlock_irqrestore(>->uncore->lock, flags);
338 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
342 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
344 vecs_id = _VECS((engine->instance) / 2);
346 return engine->gt->engine[vecs_id];
349 struct sfc_lock_data {
352 i915_reg_t usage_reg;
359 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
360 struct sfc_lock_data *sfc_lock)
362 switch (engine->class) {
364 MISSING_CASE(engine->class);
366 case VIDEO_DECODE_CLASS:
367 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
368 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
370 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
371 sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
373 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
374 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
375 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
378 case VIDEO_ENHANCEMENT_CLASS:
379 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
380 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
382 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
383 sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
385 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
386 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
387 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
393 static int gen11_lock_sfc(struct intel_engine_cs *engine,
397 struct intel_uncore *uncore = engine->uncore;
398 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
399 struct sfc_lock_data sfc_lock;
400 bool lock_obtained, lock_to_other = false;
403 switch (engine->class) {
404 case VIDEO_DECODE_CLASS:
405 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
409 case VIDEO_ENHANCEMENT_CLASS:
410 get_sfc_forced_lock_data(engine, &sfc_lock);
417 if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
418 struct intel_engine_cs *paired_vecs;
420 if (engine->class != VIDEO_DECODE_CLASS ||
421 GRAPHICS_VER(engine->i915) != 12)
427 * If the VCS-MFX isn't using the SFC, we also need to check
428 * whether VCS-HCP is using it. If so, we need to issue a *VE*
429 * forced lock on the VE engine that shares the same SFC.
431 if (!(intel_uncore_read_fw(uncore,
432 GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
433 GEN12_HCP_SFC_USAGE_BIT))
436 paired_vecs = find_sfc_paired_vecs_engine(engine);
437 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
438 lock_to_other = true;
439 *unlock_mask |= paired_vecs->mask;
441 *unlock_mask |= engine->mask;
445 * If the engine is using an SFC, tell the engine that a software reset
446 * is going to happen. The engine will then try to force lock the SFC.
447 * If SFC ends up being locked to the engine we want to reset, we have
448 * to reset it as well (we will unlock it once the reset sequence is
451 rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
453 ret = __intel_wait_for_register_fw(uncore,
460 * Was the SFC released while we were trying to lock it?
462 * We should reset both the engine and the SFC if:
463 * - We were locking the SFC to this engine and the lock succeeded
465 * - We were locking the SFC to a different engine (Wa_14010733141)
466 * but the SFC was released before the lock was obtained.
468 * Otherwise we need only reset the engine by itself and we can
469 * leave the SFC alone.
471 lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
472 sfc_lock.usage_bit) != 0;
473 if (lock_obtained == lock_to_other)
477 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
481 *reset_mask |= sfc_lock.reset_bit;
485 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
487 struct intel_uncore *uncore = engine->uncore;
488 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
489 struct sfc_lock_data sfc_lock = {};
491 if (engine->class != VIDEO_DECODE_CLASS &&
492 engine->class != VIDEO_ENHANCEMENT_CLASS)
495 if (engine->class == VIDEO_DECODE_CLASS &&
496 (BIT(engine->instance) & vdbox_sfc_access) == 0)
499 get_sfc_forced_lock_data(engine, &sfc_lock);
501 rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
504 static int __gen11_reset_engines(struct intel_gt *gt,
505 intel_engine_mask_t engine_mask,
508 struct intel_engine_cs *engine;
509 intel_engine_mask_t tmp;
510 u32 reset_mask, unlock_mask = 0;
513 if (engine_mask == ALL_ENGINES) {
514 reset_mask = GEN11_GRDOM_FULL;
517 for_each_engine_masked(engine, gt, engine_mask, tmp) {
518 reset_mask |= engine->reset_domain;
519 ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
525 ret = gen6_hw_domain_reset(gt, reset_mask);
529 * We unlock the SFC based on the lock status and not the result of
530 * gen11_lock_sfc to make sure that we clean properly if something
531 * wrong happened during the lock (e.g. lock acquired after timeout
534 * Due to Wa_14010733141, we may have locked an SFC to an engine that
535 * wasn't being reset. So instead of calling gen11_unlock_sfc()
536 * on engine_mask, we instead call it on the mask of engines that our
537 * gen11_lock_sfc() calls told us actually had locks attempted.
539 for_each_engine_masked(engine, gt, unlock_mask, tmp)
540 gen11_unlock_sfc(engine);
545 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
547 struct intel_uncore *uncore = engine->uncore;
548 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
549 u32 request, mask, ack;
552 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
555 ack = intel_uncore_read_fw(uncore, reg);
556 if (ack & RESET_CTL_CAT_ERROR) {
558 * For catastrophic errors, ready-for-reset sequence
559 * needs to be bypassed: HAS#396813
561 request = RESET_CTL_CAT_ERROR;
562 mask = RESET_CTL_CAT_ERROR;
564 /* Catastrophic errors need to be cleared by HW */
566 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
567 request = RESET_CTL_REQUEST_RESET;
568 mask = RESET_CTL_READY_TO_RESET;
569 ack = RESET_CTL_READY_TO_RESET;
574 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
575 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
578 drm_err(&engine->i915->drm,
579 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
580 engine->name, request,
581 intel_uncore_read_fw(uncore, reg));
586 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
588 intel_uncore_write_fw(engine->uncore,
589 RING_RESET_CTL(engine->mmio_base),
590 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
593 static int gen8_reset_engines(struct intel_gt *gt,
594 intel_engine_mask_t engine_mask,
597 struct intel_engine_cs *engine;
598 const bool reset_non_ready = retry >= 1;
599 intel_engine_mask_t tmp;
603 spin_lock_irqsave(>->uncore->lock, flags);
605 for_each_engine_masked(engine, gt, engine_mask, tmp) {
606 ret = gen8_engine_reset_prepare(engine);
607 if (ret && !reset_non_ready)
611 * If this is not the first failed attempt to prepare,
612 * we decide to proceed anyway.
614 * By doing so we risk context corruption and with
615 * some gens (kbl), possible system hang if reset
616 * happens during active bb execution.
618 * We rather take context corruption instead of
619 * failed reset with a wedged driver/gpu. And
620 * active bb execution case should be covered by
621 * stop_engines() we have before the reset.
626 * Wa_22011100796:dg2, whenever Full soft reset is required,
627 * reset all individual engines firstly, and then do a full soft reset.
629 * This is best effort, so ignore any error from the initial reset.
631 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
632 __gen11_reset_engines(gt, gt->info.engine_mask, 0);
634 if (GRAPHICS_VER(gt->i915) >= 11)
635 ret = __gen11_reset_engines(gt, engine_mask, retry);
637 ret = __gen6_reset_engines(gt, engine_mask, retry);
640 for_each_engine_masked(engine, gt, engine_mask, tmp)
641 gen8_engine_reset_cancel(engine);
643 spin_unlock_irqrestore(>->uncore->lock, flags);
648 static int mock_reset(struct intel_gt *gt,
649 intel_engine_mask_t mask,
655 typedef int (*reset_func)(struct intel_gt *,
656 intel_engine_mask_t engine_mask,
659 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
661 struct drm_i915_private *i915 = gt->i915;
665 else if (GRAPHICS_VER(i915) >= 8)
666 return gen8_reset_engines;
667 else if (GRAPHICS_VER(i915) >= 6)
668 return gen6_reset_engines;
669 else if (GRAPHICS_VER(i915) >= 5)
671 else if (IS_G4X(i915))
673 else if (IS_G33(i915) || IS_PINEVIEW(i915))
675 else if (GRAPHICS_VER(i915) >= 3)
676 return i915_do_reset;
681 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
683 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
685 int ret = -ETIMEDOUT;
688 reset = intel_get_gpu_reset(gt);
693 * If the power well sleeps during the reset, the reset
694 * request may be dropped and never completes (causing -EIO).
696 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
697 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
698 GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
700 ret = reset(gt, engine_mask, retry);
703 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
708 bool intel_has_gpu_reset(const struct intel_gt *gt)
710 if (!gt->i915->params.reset)
713 return intel_get_gpu_reset(gt);
716 bool intel_has_reset_engine(const struct intel_gt *gt)
718 if (gt->i915->params.reset < 2)
721 return INTEL_INFO(gt->i915)->has_reset_engine;
724 int intel_reset_guc(struct intel_gt *gt)
727 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
730 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
732 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
733 ret = gen6_hw_domain_reset(gt, guc_domain);
734 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
740 * Ensure irq handler finishes, and not run again.
741 * Also return the active request so that we only search for it once.
743 static void reset_prepare_engine(struct intel_engine_cs *engine)
746 * During the reset sequence, we must prevent the engine from
747 * entering RC6. As the context state is undefined until we restart
748 * the engine, if it does enter RC6 during the reset, the state
749 * written to the powercontext is undefined and so we may lose
750 * GPU state upon resume, i.e. fail to restart after a reset.
752 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
753 if (engine->reset.prepare)
754 engine->reset.prepare(engine);
757 static void revoke_mmaps(struct intel_gt *gt)
761 for (i = 0; i < gt->ggtt->num_fences; i++) {
762 struct drm_vma_offset_node *node;
763 struct i915_vma *vma;
766 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
770 if (!i915_vma_has_userfault(vma))
773 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
778 node = &vma->mmo->vma_node;
779 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
781 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
782 drm_vma_node_offset_addr(node) + vma_offset,
788 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
790 struct intel_engine_cs *engine;
791 intel_engine_mask_t awake = 0;
792 enum intel_engine_id id;
794 /* For GuC mode, ensure submission is disabled before stopping ring */
795 intel_uc_reset_prepare(>->uc);
797 for_each_engine(engine, gt, id) {
798 if (intel_engine_pm_get_if_awake(engine))
799 awake |= engine->mask;
800 reset_prepare_engine(engine);
806 static void gt_revoke(struct intel_gt *gt)
811 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
813 struct intel_engine_cs *engine;
814 enum intel_engine_id id;
818 * Everything depends on having the GTT running, so we need to start
821 err = i915_ggtt_enable_hw(gt->i915);
826 for_each_engine(engine, gt, id)
827 __intel_engine_reset(engine, stalled_mask & engine->mask);
830 intel_uc_reset(>->uc, ALL_ENGINES);
832 intel_ggtt_restore_fences(gt->ggtt);
837 static void reset_finish_engine(struct intel_engine_cs *engine)
839 if (engine->reset.finish)
840 engine->reset.finish(engine);
841 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
843 intel_engine_signal_breadcrumbs(engine);
846 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
848 struct intel_engine_cs *engine;
849 enum intel_engine_id id;
851 for_each_engine(engine, gt, id) {
852 reset_finish_engine(engine);
853 if (awake & engine->mask)
854 intel_engine_pm_put(engine);
857 intel_uc_reset_finish(>->uc);
860 static void nop_submit_request(struct i915_request *request)
862 RQ_TRACE(request, "-EIO\n");
864 request = i915_request_mark_eio(request);
866 i915_request_submit(request);
867 intel_engine_signal_breadcrumbs(request->engine);
869 i915_request_put(request);
873 static void __intel_gt_set_wedged(struct intel_gt *gt)
875 struct intel_engine_cs *engine;
876 intel_engine_mask_t awake;
877 enum intel_engine_id id;
879 if (test_bit(I915_WEDGED, >->reset.flags))
882 GT_TRACE(gt, "start\n");
885 * First, stop submission to hw, but do not yet complete requests by
886 * rolling the global seqno forward (since this would complete requests
887 * for which we haven't set the fence error to EIO yet).
889 awake = reset_prepare(gt);
891 /* Even if the GPU reset fails, it should still stop the engines */
892 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
893 __intel_gt_reset(gt, ALL_ENGINES);
895 for_each_engine(engine, gt, id)
896 engine->submit_request = nop_submit_request;
899 * Make sure no request can slip through without getting completed by
900 * either this call here to intel_engine_write_global_seqno, or the one
901 * in nop_submit_request.
903 synchronize_rcu_expedited();
904 set_bit(I915_WEDGED, >->reset.flags);
906 /* Mark all executing requests as skipped */
908 for_each_engine(engine, gt, id)
909 if (engine->reset.cancel)
910 engine->reset.cancel(engine);
911 intel_uc_cancel_requests(>->uc);
914 reset_finish(gt, awake);
916 GT_TRACE(gt, "end\n");
919 void intel_gt_set_wedged(struct intel_gt *gt)
921 intel_wakeref_t wakeref;
923 if (test_bit(I915_WEDGED, >->reset.flags))
926 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
927 mutex_lock(>->reset.mutex);
929 if (GEM_SHOW_DEBUG()) {
930 struct drm_printer p = drm_debug_printer(__func__);
931 struct intel_engine_cs *engine;
932 enum intel_engine_id id;
934 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
935 for_each_engine(engine, gt, id) {
936 if (intel_engine_is_idle(engine))
939 intel_engine_dump(engine, &p, "%s\n", engine->name);
943 __intel_gt_set_wedged(gt);
945 mutex_unlock(>->reset.mutex);
946 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
949 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
951 struct intel_gt_timelines *timelines = >->timelines;
952 struct intel_timeline *tl;
955 if (!test_bit(I915_WEDGED, >->reset.flags))
958 /* Never fully initialised, recovery impossible */
959 if (intel_gt_has_unrecoverable_error(gt))
962 GT_TRACE(gt, "start\n");
965 * Before unwedging, make sure that all pending operations
966 * are flushed and errored out - we may have requests waiting upon
967 * third party fences. We marked all inflight requests as EIO, and
968 * every execbuf since returned EIO, for consistency we want all
969 * the currently pending requests to also be marked as EIO, which
970 * is done inside our nop_submit_request - and so we must wait.
972 * No more can be submitted until we reset the wedged bit.
974 spin_lock(&timelines->lock);
975 list_for_each_entry(tl, &timelines->active_list, link) {
976 struct dma_fence *fence;
978 fence = i915_active_fence_get(&tl->last_request);
982 spin_unlock(&timelines->lock);
985 * All internal dependencies (i915_requests) will have
986 * been flushed by the set-wedge, but we may be stuck waiting
987 * for external fences. These should all be capped to 10s
988 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
991 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
992 dma_fence_put(fence);
994 /* Restart iteration after droping lock */
995 spin_lock(&timelines->lock);
996 tl = list_entry(&timelines->active_list, typeof(*tl), link);
998 spin_unlock(&timelines->lock);
1000 /* We must reset pending GPU events before restoring our submission */
1001 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1002 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1003 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
1006 * Warn CI about the unrecoverable wedged condition.
1007 * Time for a reboot.
1009 add_taint_for_CI(gt->i915, TAINT_WARN);
1014 * Undo nop_submit_request. We prevent all new i915 requests from
1015 * being queued (by disallowing execbuf whilst wedged) so having
1016 * waited for all active requests above, we know the system is idle
1017 * and do not have to worry about a thread being inside
1018 * engine->submit_request() as we swap over. So unlike installing
1019 * the nop_submit_request on reset, we can do this from normal
1020 * context and do not require stop_machine().
1022 intel_engines_reset_default_submission(gt);
1024 GT_TRACE(gt, "end\n");
1026 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1027 clear_bit(I915_WEDGED, >->reset.flags);
1032 bool intel_gt_unset_wedged(struct intel_gt *gt)
1036 mutex_lock(>->reset.mutex);
1037 result = __intel_gt_unset_wedged(gt);
1038 mutex_unlock(>->reset.mutex);
1043 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1047 err = __intel_gt_reset(gt, ALL_ENGINES);
1048 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1049 msleep(10 * (i + 1));
1050 err = __intel_gt_reset(gt, ALL_ENGINES);
1055 return gt_reset(gt, stalled_mask);
1058 static int resume(struct intel_gt *gt)
1060 struct intel_engine_cs *engine;
1061 enum intel_engine_id id;
1064 for_each_engine(engine, gt, id) {
1065 ret = intel_engine_resume(engine);
1074 * intel_gt_reset - reset chip after a hang
1075 * @gt: #intel_gt to reset
1076 * @stalled_mask: mask of the stalled engines with the guilty requests
1077 * @reason: user error message for why we are resetting
1079 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1082 * Procedure is fairly simple:
1083 * - reset the chip using the reset reg
1084 * - re-init context state
1085 * - re-init hardware status page
1086 * - re-init ring buffer
1087 * - re-init interrupt state
1090 void intel_gt_reset(struct intel_gt *gt,
1091 intel_engine_mask_t stalled_mask,
1094 intel_engine_mask_t awake;
1097 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1100 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1103 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1104 * critical section like gpu reset.
1108 mutex_lock(>->reset.mutex);
1110 /* Clear any previous failed attempts at recovery. Time to try again. */
1111 if (!__intel_gt_unset_wedged(gt))
1115 drm_notice(>->i915->drm,
1116 "Resetting chip for %s\n", reason);
1117 atomic_inc(>->i915->gpu_error.reset_count);
1119 awake = reset_prepare(gt);
1121 if (!intel_has_gpu_reset(gt)) {
1122 if (gt->i915->params.reset)
1123 drm_err(>->i915->drm, "GPU reset not supported\n");
1125 drm_dbg(>->i915->drm, "GPU reset disabled\n");
1129 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1130 intel_runtime_pm_disable_interrupts(gt->i915);
1132 if (do_reset(gt, stalled_mask)) {
1133 drm_err(>->i915->drm, "Failed to reset chip\n");
1137 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1138 intel_runtime_pm_enable_interrupts(gt->i915);
1140 intel_overlay_reset(gt->i915);
1143 * Next we need to restore the context, but we don't use those
1146 * Ring buffer needs to be re-initialized in the KMS case, or if X
1147 * was running at the time of the reset (i.e. we weren't VT
1150 ret = intel_gt_init_hw(gt);
1152 drm_err(>->i915->drm,
1153 "Failed to initialise HW following reset (%d)\n",
1163 reset_finish(gt, awake);
1165 mutex_unlock(>->reset.mutex);
1170 * History tells us that if we cannot reset the GPU now, we
1171 * never will. This then impacts everything that is run
1172 * subsequently. On failing the reset, we mark the driver
1173 * as wedged, preventing further execution on the GPU.
1174 * We also want to go one step further and add a taint to the
1175 * kernel so that any subsequent faults can be traced back to
1176 * this failure. This is important for CI, where if the
1177 * GPU/driver fails we would like to reboot and restart testing
1178 * rather than continue on into oblivion. For everyone else,
1179 * the system should still plod along, but they have been warned!
1181 add_taint_for_CI(gt->i915, TAINT_WARN);
1183 __intel_gt_set_wedged(gt);
1187 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1189 return __intel_gt_reset(engine->gt, engine->mask);
1192 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1194 struct intel_gt *gt = engine->gt;
1197 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1198 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1200 if (intel_engine_uses_guc(engine))
1203 if (!intel_engine_pm_get_if_awake(engine))
1206 reset_prepare_engine(engine);
1209 drm_notice(&engine->i915->drm,
1210 "Resetting %s for %s\n", engine->name, msg);
1211 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1213 ret = intel_gt_reset_engine(engine);
1215 /* If we fail here, we expect to fallback to a global reset */
1216 ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1221 * The request that caused the hang is stuck on elsp, we know the
1222 * active request and can drop it, adjust head to skip the offending
1223 * request to resume executing remaining requests in the queue.
1225 __intel_engine_reset(engine, true);
1228 * The engine and its registers (and workarounds in case of render)
1229 * have been reset to their default values. Follow the init_ring
1230 * process to program RING_MODE, HWSP and re-enable submission.
1232 ret = intel_engine_resume(engine);
1235 intel_engine_cancel_stop_cs(engine);
1236 reset_finish_engine(engine);
1237 intel_engine_pm_put_async(engine);
1242 * intel_engine_reset - reset GPU engine to recover from a hang
1243 * @engine: engine to reset
1244 * @msg: reason for GPU reset; or NULL for no drm_notice()
1246 * Reset a specific GPU engine. Useful if a hang is detected.
1247 * Returns zero on successful reset or otherwise an error code.
1250 * - identifies the request that caused the hang and it is dropped
1251 * - reset engine (which will force the engine to idle)
1252 * - re-init/configure engine
1254 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1259 err = __intel_engine_reset_bh(engine, msg);
1265 static void intel_gt_reset_global(struct intel_gt *gt,
1269 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1270 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1271 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1272 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1273 struct intel_wedge_me w;
1275 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1277 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1278 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1280 /* Use a watchdog to ensure that our reset completes */
1281 intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1282 intel_display_prepare_reset(gt->i915);
1284 /* Flush everyone using a resource about to be clobbered */
1285 synchronize_srcu_expedited(>->reset.backoff_srcu);
1287 intel_gt_reset(gt, engine_mask, reason);
1289 intel_display_finish_reset(gt->i915);
1292 if (!test_bit(I915_WEDGED, >->reset.flags))
1293 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1297 * intel_gt_handle_error - handle a gpu error
1299 * @engine_mask: mask representing engines that are hung
1300 * @flags: control flags
1301 * @fmt: Error message format string
1303 * Do some basic checking of register state at error time and
1304 * dump it to the syslog. Also call i915_capture_error_state() to make
1305 * sure we get a record and make it available in debugfs. Fire a uevent
1306 * so userspace knows something bad happened (should trigger collection
1307 * of a ring dump etc.).
1309 void intel_gt_handle_error(struct intel_gt *gt,
1310 intel_engine_mask_t engine_mask,
1311 unsigned long flags,
1312 const char *fmt, ...)
1314 struct intel_engine_cs *engine;
1315 intel_wakeref_t wakeref;
1316 intel_engine_mask_t tmp;
1323 va_start(args, fmt);
1324 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1331 * In most cases it's guaranteed that we get here with an RPM
1332 * reference held, for example because there is a pending GPU
1333 * request that won't finish until the reset is done. This
1334 * isn't the case at least when we get here by doing a
1335 * simulated reset via debugfs, so get an RPM reference.
1337 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1339 engine_mask &= gt->info.engine_mask;
1341 if (flags & I915_ERROR_CAPTURE) {
1342 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1343 intel_gt_clear_error_registers(gt, engine_mask);
1347 * Try engine reset when available. We fall back to full reset if
1348 * single reset fails.
1350 if (!intel_uc_uses_guc_submission(>->uc) &&
1351 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1353 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1354 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1355 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1359 if (__intel_engine_reset_bh(engine, msg) == 0)
1360 engine_mask &= ~engine->mask;
1362 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1371 /* Full reset needs the mutex, stop any other user trying to do so. */
1372 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1373 wait_event(gt->reset.queue,
1374 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1375 goto out; /* piggy-back on the other reset */
1378 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1379 synchronize_rcu_expedited();
1382 * Prevent any other reset-engine attempt. We don't do this for GuC
1383 * submission the GuC owns the per-engine reset, not the i915.
1385 if (!intel_uc_uses_guc_submission(>->uc)) {
1386 for_each_engine(engine, gt, tmp) {
1387 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1389 wait_on_bit(>->reset.flags,
1390 I915_RESET_ENGINE + engine->id,
1391 TASK_UNINTERRUPTIBLE);
1395 intel_gt_reset_global(gt, engine_mask, msg);
1397 if (!intel_uc_uses_guc_submission(>->uc)) {
1398 for_each_engine(engine, gt, tmp)
1399 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1402 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1403 smp_mb__after_atomic();
1404 wake_up_all(>->reset.queue);
1407 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1410 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1412 might_lock(>->reset.backoff_srcu);
1416 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1419 if (wait_event_interruptible(gt->reset.queue,
1420 !test_bit(I915_RESET_BACKOFF,
1426 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1432 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1433 __releases(>->reset.backoff_srcu)
1435 srcu_read_unlock(>->reset.backoff_srcu, tag);
1438 int intel_gt_terminally_wedged(struct intel_gt *gt)
1442 if (!intel_gt_is_wedged(gt))
1445 if (intel_gt_has_unrecoverable_error(gt))
1448 /* Reset still in progress? Maybe we will recover? */
1449 if (wait_event_interruptible(gt->reset.queue,
1450 !test_bit(I915_RESET_BACKOFF,
1454 return intel_gt_is_wedged(gt) ? -EIO : 0;
1457 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1459 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1460 I915_WEDGED_ON_INIT);
1461 intel_gt_set_wedged(gt);
1462 i915_disable_error_state(gt->i915, -ENODEV);
1463 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1465 /* Wedged on init is non-recoverable */
1466 add_taint_for_CI(gt->i915, TAINT_WARN);
1469 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1471 intel_gt_set_wedged(gt);
1472 i915_disable_error_state(gt->i915, -ENODEV);
1473 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1474 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1477 void intel_gt_init_reset(struct intel_gt *gt)
1479 init_waitqueue_head(>->reset.queue);
1480 mutex_init(>->reset.mutex);
1481 init_srcu_struct(>->reset.backoff_srcu);
1484 * While undesirable to wait inside the shrinker, complain anyway.
1486 * If we have to wait during shrinking, we guarantee forward progress
1487 * by forcing the reset. Therefore during the reset we must not
1488 * re-enter the shrinker. By declaring that we take the reset mutex
1489 * within the shrinker, we forbid ourselves from performing any
1490 * fs-reclaim or taking related locks during reset.
1492 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1494 /* no GPU until we are ready! */
1495 __set_bit(I915_WEDGED, >->reset.flags);
1498 void intel_gt_fini_reset(struct intel_gt *gt)
1500 cleanup_srcu_struct(>->reset.backoff_srcu);
1503 static void intel_wedge_me(struct work_struct *work)
1505 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1507 drm_err(&w->gt->i915->drm,
1508 "%s timed out, cancelling all in-flight rendering.\n",
1510 intel_gt_set_wedged(w->gt);
1513 void __intel_init_wedge(struct intel_wedge_me *w,
1514 struct intel_gt *gt,
1521 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1522 schedule_delayed_work(&w->work, timeout);
1525 void __intel_fini_wedge(struct intel_wedge_me *w)
1527 cancel_delayed_work_sync(&w->work);
1528 destroy_delayed_work_on_stack(&w->work);
1532 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1533 #include "selftest_reset.c"
1534 #include "selftest_hangcheck.c"