2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2018 Intel Corporation
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
11 #include "i915_gpu_error.h"
12 #include "i915_reset.h"
14 #include "intel_guc.h"
16 #define RESET_MAX_RETRIES 3
18 /* XXX How to handle concurrent GGTT updates using tiling registers? */
19 #define RESET_UNDER_STOP_MACHINE 0
21 static void engine_skip_context(struct i915_request *rq)
23 struct intel_engine_cs *engine = rq->engine;
24 struct i915_gem_context *hung_ctx = rq->gem_context;
26 lockdep_assert_held(&engine->timeline.lock);
28 if (!i915_request_is_active(rq))
31 list_for_each_entry_continue(rq, &engine->timeline.requests, link)
32 if (rq->gem_context == hung_ctx)
33 i915_request_skip(rq, -EIO);
36 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
37 const struct i915_gem_context *ctx)
40 unsigned long prev_hang;
42 if (i915_gem_context_is_banned(ctx))
43 score = I915_CLIENT_SCORE_CONTEXT_BAN;
47 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
48 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
49 score += I915_CLIENT_SCORE_HANG_FAST;
52 atomic_add(score, &file_priv->ban_score);
54 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
56 atomic_read(&file_priv->ban_score));
60 static bool context_mark_guilty(struct i915_gem_context *ctx)
62 unsigned long prev_hang;
66 atomic_inc(&ctx->guilty_count);
68 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
69 if (!i915_gem_context_is_bannable(ctx))
72 /* Record the timestamp for the last N hangs */
73 prev_hang = ctx->hang_timestamp[0];
74 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
75 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
76 ctx->hang_timestamp[i] = jiffies;
78 /* If we have hung N+1 times in rapid succession, we ban the context! */
79 banned = !i915_gem_context_is_recoverable(ctx);
80 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
83 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
84 ctx->name, atomic_read(&ctx->guilty_count));
85 i915_gem_context_set_banned(ctx);
88 if (!IS_ERR_OR_NULL(ctx->file_priv))
89 client_mark_guilty(ctx->file_priv, ctx);
94 static void context_mark_innocent(struct i915_gem_context *ctx)
96 atomic_inc(&ctx->active_count);
99 void i915_reset_request(struct i915_request *rq, bool guilty)
101 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
107 lockdep_assert_held(&rq->engine->timeline.lock);
108 GEM_BUG_ON(i915_request_completed(rq));
111 i915_request_skip(rq, -EIO);
112 if (context_mark_guilty(rq->gem_context))
113 engine_skip_context(rq);
115 dma_fence_set_error(&rq->fence, -EAGAIN);
116 context_mark_innocent(rq->gem_context);
120 static void gen3_stop_engine(struct intel_engine_cs *engine)
122 struct drm_i915_private *dev_priv = engine->i915;
123 const u32 base = engine->mmio_base;
125 GEM_TRACE("%s\n", engine->name);
127 if (intel_engine_stop_cs(engine))
128 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
130 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
131 POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
133 I915_WRITE_FW(RING_HEAD(base), 0);
134 I915_WRITE_FW(RING_TAIL(base), 0);
135 POSTING_READ_FW(RING_TAIL(base));
137 /* The ring must be empty before it is disabled */
138 I915_WRITE_FW(RING_CTL(base), 0);
140 /* Check acts as a post */
141 if (I915_READ_FW(RING_HEAD(base)))
142 GEM_TRACE("%s: ring head [%x] not parked\n",
143 engine->name, I915_READ_FW(RING_HEAD(base)));
146 static void i915_stop_engines(struct drm_i915_private *i915,
147 intel_engine_mask_t engine_mask)
149 struct intel_engine_cs *engine;
150 intel_engine_mask_t tmp;
152 if (INTEL_GEN(i915) < 3)
155 for_each_engine_masked(engine, i915, engine_mask, tmp)
156 gen3_stop_engine(engine);
159 static bool i915_in_reset(struct pci_dev *pdev)
163 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
164 return gdrst & GRDOM_RESET_STATUS;
167 static int i915_do_reset(struct drm_i915_private *i915,
168 intel_engine_mask_t engine_mask,
171 struct pci_dev *pdev = i915->drm.pdev;
174 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
175 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
177 err = wait_for_atomic(i915_in_reset(pdev), 50);
179 /* Clear the reset request. */
180 pci_write_config_byte(pdev, I915_GDRST, 0);
183 err = wait_for_atomic(!i915_in_reset(pdev), 50);
188 static bool g4x_reset_complete(struct pci_dev *pdev)
192 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
193 return (gdrst & GRDOM_RESET_ENABLE) == 0;
196 static int g33_do_reset(struct drm_i915_private *i915,
197 intel_engine_mask_t engine_mask,
200 struct pci_dev *pdev = i915->drm.pdev;
202 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
203 return wait_for_atomic(g4x_reset_complete(pdev), 50);
206 static int g4x_do_reset(struct drm_i915_private *dev_priv,
207 intel_engine_mask_t engine_mask,
210 struct pci_dev *pdev = dev_priv->drm.pdev;
213 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
214 I915_WRITE_FW(VDECCLK_GATE_D,
215 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
216 POSTING_READ_FW(VDECCLK_GATE_D);
218 pci_write_config_byte(pdev, I915_GDRST,
219 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
220 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
222 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
226 pci_write_config_byte(pdev, I915_GDRST,
227 GRDOM_RENDER | GRDOM_RESET_ENABLE);
228 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
230 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
235 pci_write_config_byte(pdev, I915_GDRST, 0);
237 I915_WRITE_FW(VDECCLK_GATE_D,
238 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
239 POSTING_READ_FW(VDECCLK_GATE_D);
244 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
245 intel_engine_mask_t engine_mask,
248 struct intel_uncore *uncore = &dev_priv->uncore;
251 intel_uncore_write_fw(uncore, ILK_GDSR,
252 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
253 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
254 ILK_GRDOM_RESET_ENABLE, 0,
258 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
262 intel_uncore_write_fw(uncore, ILK_GDSR,
263 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
264 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
265 ILK_GRDOM_RESET_ENABLE, 0,
269 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
274 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
275 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
279 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
280 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
283 struct intel_uncore *uncore = &dev_priv->uncore;
287 * GEN6_GDRST is not in the gt power well, no need to check
288 * for fifo space for the write or forcewake the chip for
291 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
293 /* Wait for the device to ack the reset requests */
294 err = __intel_wait_for_register_fw(uncore,
295 GEN6_GDRST, hw_domain_mask, 0,
299 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
305 static int gen6_reset_engines(struct drm_i915_private *i915,
306 intel_engine_mask_t engine_mask,
309 struct intel_engine_cs *engine;
310 const u32 hw_engine_mask[] = {
311 [RCS0] = GEN6_GRDOM_RENDER,
312 [BCS0] = GEN6_GRDOM_BLT,
313 [VCS0] = GEN6_GRDOM_MEDIA,
314 [VCS1] = GEN8_GRDOM_MEDIA2,
315 [VECS0] = GEN6_GRDOM_VECS,
319 if (engine_mask == ALL_ENGINES) {
320 hw_mask = GEN6_GRDOM_FULL;
322 intel_engine_mask_t tmp;
325 for_each_engine_masked(engine, i915, engine_mask, tmp) {
326 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
327 hw_mask |= hw_engine_mask[engine->id];
331 return gen6_hw_domain_reset(i915, hw_mask);
334 static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
335 struct intel_engine_cs *engine)
337 struct intel_uncore *uncore = &dev_priv->uncore;
338 u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
339 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
340 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
341 i915_reg_t sfc_usage;
345 switch (engine->class) {
346 case VIDEO_DECODE_CLASS:
347 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
350 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
351 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
353 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
354 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
356 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
357 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
358 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
361 case VIDEO_ENHANCEMENT_CLASS:
362 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
363 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
365 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
366 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
368 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
369 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
370 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
378 * Tell the engine that a software reset is going to happen. The engine
379 * will then try to force lock the SFC (if currently locked, it will
380 * remain so until we tell the engine it is safe to unlock; if currently
381 * unlocked, it will ignore this and all new lock requests). If SFC
382 * ends up being locked to the engine we want to reset, we have to reset
383 * it as well (we will unlock it once the reset sequence is completed).
385 intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
387 if (__intel_wait_for_register_fw(uncore,
389 sfc_forced_lock_ack_bit,
390 sfc_forced_lock_ack_bit,
392 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
396 if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
397 return sfc_reset_bit;
402 static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
403 struct intel_engine_cs *engine)
405 u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
406 i915_reg_t sfc_forced_lock;
407 u32 sfc_forced_lock_bit;
409 switch (engine->class) {
410 case VIDEO_DECODE_CLASS:
411 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
414 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
415 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
418 case VIDEO_ENHANCEMENT_CLASS:
419 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
420 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
427 I915_WRITE_FW(sfc_forced_lock,
428 I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
431 static int gen11_reset_engines(struct drm_i915_private *i915,
432 intel_engine_mask_t engine_mask,
435 const u32 hw_engine_mask[] = {
436 [RCS0] = GEN11_GRDOM_RENDER,
437 [BCS0] = GEN11_GRDOM_BLT,
438 [VCS0] = GEN11_GRDOM_MEDIA,
439 [VCS1] = GEN11_GRDOM_MEDIA2,
440 [VCS2] = GEN11_GRDOM_MEDIA3,
441 [VCS3] = GEN11_GRDOM_MEDIA4,
442 [VECS0] = GEN11_GRDOM_VECS,
443 [VECS1] = GEN11_GRDOM_VECS2,
445 struct intel_engine_cs *engine;
446 intel_engine_mask_t tmp;
450 if (engine_mask == ALL_ENGINES) {
451 hw_mask = GEN11_GRDOM_FULL;
454 for_each_engine_masked(engine, i915, engine_mask, tmp) {
455 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
456 hw_mask |= hw_engine_mask[engine->id];
457 hw_mask |= gen11_lock_sfc(i915, engine);
461 ret = gen6_hw_domain_reset(i915, hw_mask);
463 if (engine_mask != ALL_ENGINES)
464 for_each_engine_masked(engine, i915, engine_mask, tmp)
465 gen11_unlock_sfc(i915, engine);
470 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
472 struct intel_uncore *uncore = &engine->i915->uncore;
475 intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
476 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
478 ret = __intel_wait_for_register_fw(uncore,
479 RING_RESET_CTL(engine->mmio_base),
480 RESET_CTL_READY_TO_RESET,
481 RESET_CTL_READY_TO_RESET,
485 DRM_ERROR("%s: reset request timeout\n", engine->name);
490 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
492 struct drm_i915_private *dev_priv = engine->i915;
494 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
495 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
498 static int gen8_reset_engines(struct drm_i915_private *i915,
499 intel_engine_mask_t engine_mask,
502 struct intel_engine_cs *engine;
503 const bool reset_non_ready = retry >= 1;
504 intel_engine_mask_t tmp;
507 for_each_engine_masked(engine, i915, engine_mask, tmp) {
508 ret = gen8_engine_reset_prepare(engine);
509 if (ret && !reset_non_ready)
513 * If this is not the first failed attempt to prepare,
514 * we decide to proceed anyway.
516 * By doing so we risk context corruption and with
517 * some gens (kbl), possible system hang if reset
518 * happens during active bb execution.
520 * We rather take context corruption instead of
521 * failed reset with a wedged driver/gpu. And
522 * active bb execution case should be covered by
523 * i915_stop_engines we have before the reset.
527 if (INTEL_GEN(i915) >= 11)
528 ret = gen11_reset_engines(i915, engine_mask, retry);
530 ret = gen6_reset_engines(i915, engine_mask, retry);
533 for_each_engine_masked(engine, i915, engine_mask, tmp)
534 gen8_engine_reset_cancel(engine);
539 typedef int (*reset_func)(struct drm_i915_private *,
540 intel_engine_mask_t engine_mask,
543 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
545 if (INTEL_GEN(i915) >= 8)
546 return gen8_reset_engines;
547 else if (INTEL_GEN(i915) >= 6)
548 return gen6_reset_engines;
549 else if (INTEL_GEN(i915) >= 5)
550 return ironlake_do_reset;
551 else if (IS_G4X(i915))
553 else if (IS_G33(i915) || IS_PINEVIEW(i915))
555 else if (INTEL_GEN(i915) >= 3)
556 return i915_do_reset;
561 int intel_gpu_reset(struct drm_i915_private *i915,
562 intel_engine_mask_t engine_mask)
564 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
566 int ret = -ETIMEDOUT;
569 reset = intel_get_gpu_reset(i915);
574 * If the power well sleeps during the reset, the reset
575 * request may be dropped and never completes (causing -EIO).
577 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
578 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
580 * We stop engines, otherwise we might get failed reset and a
581 * dead gpu (on elk). Also as modern gpu as kbl can suffer
582 * from system hang if batchbuffer is progressing when
583 * the reset is issued, regardless of READY_TO_RESET ack.
584 * Thus assume it is best to stop engines on all gens
585 * where we have a gpu reset.
587 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
589 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
591 * FIXME: Wa for more modern gens needs to be validated
594 i915_stop_engines(i915, engine_mask);
596 GEM_TRACE("engine_mask=%x\n", engine_mask);
598 ret = reset(i915, engine_mask, retry);
601 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
606 bool intel_has_gpu_reset(struct drm_i915_private *i915)
611 if (!i915_modparams.reset)
614 return intel_get_gpu_reset(i915);
617 bool intel_has_reset_engine(struct drm_i915_private *i915)
619 return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
622 int intel_reset_guc(struct drm_i915_private *i915)
625 INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
628 GEM_BUG_ON(!HAS_GUC(i915));
630 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
631 ret = gen6_hw_domain_reset(i915, guc_domain);
632 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
638 * Ensure irq handler finishes, and not run again.
639 * Also return the active request so that we only search for it once.
641 static void reset_prepare_engine(struct intel_engine_cs *engine)
644 * During the reset sequence, we must prevent the engine from
645 * entering RC6. As the context state is undefined until we restart
646 * the engine, if it does enter RC6 during the reset, the state
647 * written to the powercontext is undefined and so we may lose
648 * GPU state upon resume, i.e. fail to restart after a reset.
650 intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
651 engine->reset.prepare(engine);
654 static void revoke_mmaps(struct drm_i915_private *i915)
658 for (i = 0; i < i915->num_fence_regs; i++) {
659 struct drm_vma_offset_node *node;
660 struct i915_vma *vma;
663 vma = READ_ONCE(i915->fence_regs[i].vma);
667 if (!i915_vma_has_userfault(vma))
670 GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
671 node = &vma->obj->base.vma_node;
672 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
673 unmap_mapping_range(i915->drm.anon_inode->i_mapping,
674 drm_vma_node_offset_addr(node) + vma_offset,
680 static void reset_prepare(struct drm_i915_private *i915)
682 struct intel_engine_cs *engine;
683 enum intel_engine_id id;
685 for_each_engine(engine, i915, id)
686 reset_prepare_engine(engine);
688 intel_uc_reset_prepare(i915);
691 static void gt_revoke(struct drm_i915_private *i915)
696 static int gt_reset(struct drm_i915_private *i915,
697 intel_engine_mask_t stalled_mask)
699 struct intel_engine_cs *engine;
700 enum intel_engine_id id;
704 * Everything depends on having the GTT running, so we need to start
707 err = i915_ggtt_enable_hw(i915);
711 for_each_engine(engine, i915, id)
712 intel_engine_reset(engine, stalled_mask & engine->mask);
714 i915_gem_restore_fences(i915);
719 static void reset_finish_engine(struct intel_engine_cs *engine)
721 engine->reset.finish(engine);
722 intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
725 struct i915_gpu_restart {
726 struct work_struct work;
727 struct drm_i915_private *i915;
730 static void restart_work(struct work_struct *work)
732 struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
733 struct drm_i915_private *i915 = arg->i915;
734 struct intel_engine_cs *engine;
735 enum intel_engine_id id;
736 intel_wakeref_t wakeref;
738 wakeref = intel_runtime_pm_get(i915);
739 mutex_lock(&i915->drm.struct_mutex);
740 WRITE_ONCE(i915->gpu_error.restart, NULL);
742 for_each_engine(engine, i915, id) {
743 struct i915_request *rq;
746 * Ostensibily, we always want a context loaded for powersaving,
747 * so if the engine is idle after the reset, send a request
748 * to load our scratch kernel_context.
750 if (!intel_engine_is_idle(engine))
753 rq = i915_request_alloc(engine, i915->kernel_context);
755 i915_request_add(rq);
758 mutex_unlock(&i915->drm.struct_mutex);
759 intel_runtime_pm_put(i915, wakeref);
764 static void reset_finish(struct drm_i915_private *i915)
766 struct intel_engine_cs *engine;
767 enum intel_engine_id id;
769 for_each_engine(engine, i915, id) {
770 reset_finish_engine(engine);
771 intel_engine_signal_breadcrumbs(engine);
775 static void reset_restart(struct drm_i915_private *i915)
777 struct i915_gpu_restart *arg;
780 * Following the reset, ensure that we always reload context for
781 * powersaving, and to correct engine->last_retired_context. Since
782 * this requires us to submit a request, queue a worker to do that
783 * task for us to evade any locking here.
785 if (READ_ONCE(i915->gpu_error.restart))
788 arg = kmalloc(sizeof(*arg), GFP_KERNEL);
791 INIT_WORK(&arg->work, restart_work);
793 WRITE_ONCE(i915->gpu_error.restart, arg);
794 queue_work(i915->wq, &arg->work);
798 static void nop_submit_request(struct i915_request *request)
800 struct intel_engine_cs *engine = request->engine;
803 GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
804 engine->name, request->fence.context, request->fence.seqno);
805 dma_fence_set_error(&request->fence, -EIO);
807 spin_lock_irqsave(&engine->timeline.lock, flags);
808 __i915_request_submit(request);
809 i915_request_mark_complete(request);
810 spin_unlock_irqrestore(&engine->timeline.lock, flags);
812 intel_engine_queue_breadcrumbs(engine);
815 static void __i915_gem_set_wedged(struct drm_i915_private *i915)
817 struct i915_gpu_error *error = &i915->gpu_error;
818 struct intel_engine_cs *engine;
819 enum intel_engine_id id;
821 if (test_bit(I915_WEDGED, &error->flags))
824 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
825 struct drm_printer p = drm_debug_printer(__func__);
827 for_each_engine(engine, i915, id)
828 intel_engine_dump(engine, &p, "%s\n", engine->name);
831 GEM_TRACE("start\n");
834 * First, stop submission to hw, but do not yet complete requests by
835 * rolling the global seqno forward (since this would complete requests
836 * for which we haven't set the fence error to EIO yet).
840 /* Even if the GPU reset fails, it should still stop the engines */
841 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
842 intel_gpu_reset(i915, ALL_ENGINES);
844 for_each_engine(engine, i915, id) {
845 engine->submit_request = nop_submit_request;
846 engine->schedule = NULL;
848 i915->caps.scheduler = 0;
851 * Make sure no request can slip through without getting completed by
852 * either this call here to intel_engine_write_global_seqno, or the one
853 * in nop_submit_request.
855 synchronize_rcu_expedited();
857 /* Mark all executing requests as skipped */
858 for_each_engine(engine, i915, id)
859 engine->cancel_requests(engine);
863 smp_mb__before_atomic();
864 set_bit(I915_WEDGED, &error->flags);
869 void i915_gem_set_wedged(struct drm_i915_private *i915)
871 struct i915_gpu_error *error = &i915->gpu_error;
872 intel_wakeref_t wakeref;
874 mutex_lock(&error->wedge_mutex);
875 with_intel_runtime_pm(i915, wakeref)
876 __i915_gem_set_wedged(i915);
877 mutex_unlock(&error->wedge_mutex);
880 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
882 struct i915_gpu_error *error = &i915->gpu_error;
883 struct i915_timeline *tl;
885 if (!test_bit(I915_WEDGED, &error->flags))
888 if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
891 GEM_TRACE("start\n");
894 * Before unwedging, make sure that all pending operations
895 * are flushed and errored out - we may have requests waiting upon
896 * third party fences. We marked all inflight requests as EIO, and
897 * every execbuf since returned EIO, for consistency we want all
898 * the currently pending requests to also be marked as EIO, which
899 * is done inside our nop_submit_request - and so we must wait.
901 * No more can be submitted until we reset the wedged bit.
903 mutex_lock(&i915->gt.timelines.mutex);
904 list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
905 struct i915_request *rq;
907 rq = i915_active_request_get_unlocked(&tl->last_request);
912 * All internal dependencies (i915_requests) will have
913 * been flushed by the set-wedge, but we may be stuck waiting
914 * for external fences. These should all be capped to 10s
915 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
918 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
919 i915_request_put(rq);
921 mutex_unlock(&i915->gt.timelines.mutex);
923 intel_engines_sanitize(i915, false);
926 * Undo nop_submit_request. We prevent all new i915 requests from
927 * being queued (by disallowing execbuf whilst wedged) so having
928 * waited for all active requests above, we know the system is idle
929 * and do not have to worry about a thread being inside
930 * engine->submit_request() as we swap over. So unlike installing
931 * the nop_submit_request on reset, we can do this from normal
932 * context and do not require stop_machine().
934 intel_engines_reset_default_submission(i915);
938 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
939 clear_bit(I915_WEDGED, &i915->gpu_error.flags);
944 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
946 struct i915_gpu_error *error = &i915->gpu_error;
949 mutex_lock(&error->wedge_mutex);
950 result = __i915_gem_unset_wedged(i915);
951 mutex_unlock(&error->wedge_mutex);
956 static int do_reset(struct drm_i915_private *i915,
957 intel_engine_mask_t stalled_mask)
963 err = intel_gpu_reset(i915, ALL_ENGINES);
964 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
965 msleep(10 * (i + 1));
966 err = intel_gpu_reset(i915, ALL_ENGINES);
971 return gt_reset(i915, stalled_mask);
975 * i915_reset - reset chip after a hang
976 * @i915: #drm_i915_private to reset
977 * @stalled_mask: mask of the stalled engines with the guilty requests
978 * @reason: user error message for why we are resetting
980 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
983 * Procedure is fairly simple:
984 * - reset the chip using the reset reg
985 * - re-init context state
986 * - re-init hardware status page
987 * - re-init ring buffer
988 * - re-init interrupt state
991 void i915_reset(struct drm_i915_private *i915,
992 intel_engine_mask_t stalled_mask,
995 struct i915_gpu_error *error = &i915->gpu_error;
998 GEM_TRACE("flags=%lx\n", error->flags);
1001 assert_rpm_wakelock_held(i915);
1002 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1004 /* Clear any previous failed attempts at recovery. Time to try again. */
1005 if (!__i915_gem_unset_wedged(i915))
1009 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
1010 error->reset_count++;
1012 reset_prepare(i915);
1014 if (!intel_has_gpu_reset(i915)) {
1015 if (i915_modparams.reset)
1016 dev_err(i915->drm.dev, "GPU reset not supported\n");
1018 DRM_DEBUG_DRIVER("GPU reset disabled\n");
1022 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1023 intel_runtime_pm_disable_interrupts(i915);
1025 if (do_reset(i915, stalled_mask)) {
1026 dev_err(i915->drm.dev, "Failed to reset chip\n");
1030 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1031 intel_runtime_pm_enable_interrupts(i915);
1033 intel_overlay_reset(i915);
1036 * Next we need to restore the context, but we don't use those
1039 * Ring buffer needs to be re-initialized in the KMS case, or if X
1040 * was running at the time of the reset (i.e. we weren't VT
1043 ret = i915_gem_init_hw(i915);
1045 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1050 i915_queue_hangcheck(i915);
1054 if (!__i915_wedged(error))
1055 reset_restart(i915);
1060 * History tells us that if we cannot reset the GPU now, we
1061 * never will. This then impacts everything that is run
1062 * subsequently. On failing the reset, we mark the driver
1063 * as wedged, preventing further execution on the GPU.
1064 * We also want to go one step further and add a taint to the
1065 * kernel so that any subsequent faults can be traced back to
1066 * this failure. This is important for CI, where if the
1067 * GPU/driver fails we would like to reboot and restart testing
1068 * rather than continue on into oblivion. For everyone else,
1069 * the system should still plod along, but they have been warned!
1071 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1073 __i915_gem_set_wedged(i915);
1077 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
1078 struct intel_engine_cs *engine)
1080 return intel_gpu_reset(i915, engine->mask);
1084 * i915_reset_engine - reset GPU engine to recover from a hang
1085 * @engine: engine to reset
1086 * @msg: reason for GPU reset; or NULL for no dev_notice()
1088 * Reset a specific GPU engine. Useful if a hang is detected.
1089 * Returns zero on successful reset or otherwise an error code.
1092 * - identifies the request that caused the hang and it is dropped
1093 * - reset engine (which will force the engine to idle)
1094 * - re-init/configure engine
1096 int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1098 struct i915_gpu_error *error = &engine->i915->gpu_error;
1101 GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1102 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1104 reset_prepare_engine(engine);
1107 dev_notice(engine->i915->drm.dev,
1108 "Resetting %s for %s\n", engine->name, msg);
1109 error->reset_engine_count[engine->id]++;
1111 if (!engine->i915->guc.execbuf_client)
1112 ret = intel_gt_reset_engine(engine->i915, engine);
1114 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
1116 /* If we fail here, we expect to fallback to a global reset */
1117 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1118 engine->i915->guc.execbuf_client ? "GuC " : "",
1124 * The request that caused the hang is stuck on elsp, we know the
1125 * active request and can drop it, adjust head to skip the offending
1126 * request to resume executing remaining requests in the queue.
1128 intel_engine_reset(engine, true);
1131 * The engine and its registers (and workarounds in case of render)
1132 * have been reset to their default values. Follow the init_ring
1133 * process to program RING_MODE, HWSP and re-enable submission.
1135 ret = engine->init_hw(engine);
1140 intel_engine_cancel_stop_cs(engine);
1141 reset_finish_engine(engine);
1145 static void i915_reset_device(struct drm_i915_private *i915,
1149 struct i915_gpu_error *error = &i915->gpu_error;
1150 struct kobject *kobj = &i915->drm.primary->kdev->kobj;
1151 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1152 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1153 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1154 struct i915_wedge_me w;
1156 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1158 DRM_DEBUG_DRIVER("resetting chip\n");
1159 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1161 /* Use a watchdog to ensure that our reset completes */
1162 i915_wedge_on_timeout(&w, i915, 5 * HZ) {
1163 intel_prepare_reset(i915);
1165 /* Flush everyone using a resource about to be clobbered */
1166 synchronize_srcu_expedited(&error->reset_backoff_srcu);
1168 mutex_lock(&error->wedge_mutex);
1169 i915_reset(i915, engine_mask, reason);
1170 mutex_unlock(&error->wedge_mutex);
1172 intel_finish_reset(i915);
1175 if (!test_bit(I915_WEDGED, &error->flags))
1176 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1179 static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
1181 I915_WRITE(reg, I915_READ(reg));
1184 void i915_clear_error_registers(struct drm_i915_private *dev_priv)
1188 if (!IS_GEN(dev_priv, 2))
1189 clear_register(dev_priv, PGTBL_ER);
1191 if (INTEL_GEN(dev_priv) < 4)
1192 clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
1194 clear_register(dev_priv, IPEIR_I965);
1196 clear_register(dev_priv, EIR);
1197 eir = I915_READ(EIR);
1200 * some errors might have become stuck,
1203 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1204 I915_WRITE(EMR, I915_READ(EMR) | eir);
1205 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
1208 if (INTEL_GEN(dev_priv) >= 8) {
1209 I915_WRITE(GEN8_RING_FAULT_REG,
1210 I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
1211 POSTING_READ(GEN8_RING_FAULT_REG);
1212 } else if (INTEL_GEN(dev_priv) >= 6) {
1213 struct intel_engine_cs *engine;
1214 enum intel_engine_id id;
1216 for_each_engine(engine, dev_priv, id) {
1217 I915_WRITE(RING_FAULT_REG(engine),
1218 I915_READ(RING_FAULT_REG(engine)) &
1221 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
1226 * i915_handle_error - handle a gpu error
1227 * @i915: i915 device private
1228 * @engine_mask: mask representing engines that are hung
1229 * @flags: control flags
1230 * @fmt: Error message format string
1232 * Do some basic checking of register state at error time and
1233 * dump it to the syslog. Also call i915_capture_error_state() to make
1234 * sure we get a record and make it available in debugfs. Fire a uevent
1235 * so userspace knows something bad happened (should trigger collection
1236 * of a ring dump etc.).
1238 void i915_handle_error(struct drm_i915_private *i915,
1239 intel_engine_mask_t engine_mask,
1240 unsigned long flags,
1241 const char *fmt, ...)
1243 struct i915_gpu_error *error = &i915->gpu_error;
1244 struct intel_engine_cs *engine;
1245 intel_wakeref_t wakeref;
1246 intel_engine_mask_t tmp;
1253 va_start(args, fmt);
1254 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1261 * In most cases it's guaranteed that we get here with an RPM
1262 * reference held, for example because there is a pending GPU
1263 * request that won't finish until the reset is done. This
1264 * isn't the case at least when we get here by doing a
1265 * simulated reset via debugfs, so get an RPM reference.
1267 wakeref = intel_runtime_pm_get(i915);
1269 engine_mask &= INTEL_INFO(i915)->engine_mask;
1271 if (flags & I915_ERROR_CAPTURE) {
1272 i915_capture_error_state(i915, engine_mask, msg);
1273 i915_clear_error_registers(i915);
1277 * Try engine reset when available. We fall back to full reset if
1278 * single reset fails.
1280 if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1281 for_each_engine_masked(engine, i915, engine_mask, tmp) {
1282 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1283 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1287 if (i915_reset_engine(engine, msg) == 0)
1288 engine_mask &= ~engine->mask;
1290 clear_bit(I915_RESET_ENGINE + engine->id,
1292 wake_up_bit(&error->flags,
1293 I915_RESET_ENGINE + engine->id);
1300 /* Full reset needs the mutex, stop any other user trying to do so. */
1301 if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
1302 wait_event(error->reset_queue,
1303 !test_bit(I915_RESET_BACKOFF, &error->flags));
1304 goto out; /* piggy-back on the other reset */
1307 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1308 synchronize_rcu_expedited();
1310 /* Prevent any other reset-engine attempt. */
1311 for_each_engine(engine, i915, tmp) {
1312 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1314 wait_on_bit(&error->flags,
1315 I915_RESET_ENGINE + engine->id,
1316 TASK_UNINTERRUPTIBLE);
1319 i915_reset_device(i915, engine_mask, msg);
1321 for_each_engine(engine, i915, tmp) {
1322 clear_bit(I915_RESET_ENGINE + engine->id,
1326 clear_bit(I915_RESET_BACKOFF, &error->flags);
1327 wake_up_all(&error->reset_queue);
1330 intel_runtime_pm_put(i915, wakeref);
1333 int i915_reset_trylock(struct drm_i915_private *i915)
1335 struct i915_gpu_error *error = &i915->gpu_error;
1338 might_lock(&error->reset_backoff_srcu);
1342 while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
1345 if (wait_event_interruptible(error->reset_queue,
1346 !test_bit(I915_RESET_BACKOFF,
1352 srcu = srcu_read_lock(&error->reset_backoff_srcu);
1358 void i915_reset_unlock(struct drm_i915_private *i915, int tag)
1359 __releases(&i915->gpu_error.reset_backoff_srcu)
1361 struct i915_gpu_error *error = &i915->gpu_error;
1363 srcu_read_unlock(&error->reset_backoff_srcu, tag);
1366 int i915_terminally_wedged(struct drm_i915_private *i915)
1368 struct i915_gpu_error *error = &i915->gpu_error;
1372 if (!__i915_wedged(error))
1375 /* Reset still in progress? Maybe we will recover? */
1376 if (!test_bit(I915_RESET_BACKOFF, &error->flags))
1379 /* XXX intel_reset_finish() still takes struct_mutex!!! */
1380 if (mutex_is_locked(&i915->drm.struct_mutex))
1383 if (wait_event_interruptible(error->reset_queue,
1384 !test_bit(I915_RESET_BACKOFF,
1388 return __i915_wedged(error) ? -EIO : 0;
1391 bool i915_reset_flush(struct drm_i915_private *i915)
1395 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
1397 flush_workqueue(i915->wq);
1398 GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
1400 mutex_lock(&i915->drm.struct_mutex);
1401 err = i915_gem_wait_for_idle(i915,
1403 I915_WAIT_FOR_IDLE_BOOST,
1404 MAX_SCHEDULE_TIMEOUT);
1405 mutex_unlock(&i915->drm.struct_mutex);
1410 static void i915_wedge_me(struct work_struct *work)
1412 struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
1414 dev_err(w->i915->drm.dev,
1415 "%s timed out, cancelling all in-flight rendering.\n",
1417 i915_gem_set_wedged(w->i915);
1420 void __i915_init_wedge(struct i915_wedge_me *w,
1421 struct drm_i915_private *i915,
1428 INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
1429 schedule_delayed_work(&w->work, timeout);
1432 void __i915_fini_wedge(struct i915_wedge_me *w)
1434 cancel_delayed_work_sync(&w->work);
1435 destroy_delayed_work_on_stack(&w->work);