7b3d9d4517a03c017943ecb948bae2e97e9ee58b
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_reset.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9
10 #include "display/intel_display_types.h"
11 #include "display/intel_overlay.h"
12
13 #include "gem/i915_gem_context.h"
14
15 #include "i915_drv.h"
16 #include "i915_gpu_error.h"
17 #include "i915_irq.h"
18 #include "intel_engine_pm.h"
19 #include "intel_gt.h"
20 #include "intel_gt_pm.h"
21 #include "intel_reset.h"
22
23 #include "uc/intel_guc.h"
24
25 #define RESET_MAX_RETRIES 3
26
27 /* XXX How to handle concurrent GGTT updates using tiling registers? */
28 #define RESET_UNDER_STOP_MACHINE 0
29
30 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
31 {
32         intel_uncore_rmw_fw(uncore, reg, 0, set);
33 }
34
35 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
36 {
37         intel_uncore_rmw_fw(uncore, reg, clr, 0);
38 }
39
40 static void engine_skip_context(struct i915_request *rq)
41 {
42         struct intel_engine_cs *engine = rq->engine;
43         struct i915_gem_context *hung_ctx = rq->gem_context;
44
45         if (!i915_request_is_active(rq))
46                 return;
47
48         lockdep_assert_held(&engine->active.lock);
49         list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
50                 if (rq->gem_context == hung_ctx)
51                         i915_request_skip(rq, -EIO);
52 }
53
54 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
55                                const struct i915_gem_context *ctx)
56 {
57         unsigned int score;
58         unsigned long prev_hang;
59
60         if (i915_gem_context_is_banned(ctx))
61                 score = I915_CLIENT_SCORE_CONTEXT_BAN;
62         else
63                 score = 0;
64
65         prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
66         if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
67                 score += I915_CLIENT_SCORE_HANG_FAST;
68
69         if (score) {
70                 atomic_add(score, &file_priv->ban_score);
71
72                 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
73                                  ctx->name, score,
74                                  atomic_read(&file_priv->ban_score));
75         }
76 }
77
78 static bool context_mark_guilty(struct i915_gem_context *ctx)
79 {
80         unsigned long prev_hang;
81         bool banned;
82         int i;
83
84         atomic_inc(&ctx->guilty_count);
85
86         /* Cool contexts are too cool to be banned! (Used for reset testing.) */
87         if (!i915_gem_context_is_bannable(ctx))
88                 return false;
89
90         /* Record the timestamp for the last N hangs */
91         prev_hang = ctx->hang_timestamp[0];
92         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
93                 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
94         ctx->hang_timestamp[i] = jiffies;
95
96         /* If we have hung N+1 times in rapid succession, we ban the context! */
97         banned = !i915_gem_context_is_recoverable(ctx);
98         if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
99                 banned = true;
100         if (banned) {
101                 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
102                                  ctx->name, atomic_read(&ctx->guilty_count));
103                 i915_gem_context_set_banned(ctx);
104         }
105
106         if (!IS_ERR_OR_NULL(ctx->file_priv))
107                 client_mark_guilty(ctx->file_priv, ctx);
108
109         return banned;
110 }
111
112 static void context_mark_innocent(struct i915_gem_context *ctx)
113 {
114         atomic_inc(&ctx->active_count);
115 }
116
117 void __i915_request_reset(struct i915_request *rq, bool guilty)
118 {
119         GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
120                   rq->engine->name,
121                   rq->fence.context,
122                   rq->fence.seqno,
123                   yesno(guilty));
124
125         GEM_BUG_ON(i915_request_completed(rq));
126
127         if (guilty) {
128                 i915_request_skip(rq, -EIO);
129                 if (context_mark_guilty(rq->gem_context))
130                         engine_skip_context(rq);
131         } else {
132                 dma_fence_set_error(&rq->fence, -EAGAIN);
133                 context_mark_innocent(rq->gem_context);
134         }
135 }
136
137 static bool i915_in_reset(struct pci_dev *pdev)
138 {
139         u8 gdrst;
140
141         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
142         return gdrst & GRDOM_RESET_STATUS;
143 }
144
145 static int i915_do_reset(struct intel_gt *gt,
146                          intel_engine_mask_t engine_mask,
147                          unsigned int retry)
148 {
149         struct pci_dev *pdev = gt->i915->drm.pdev;
150         int err;
151
152         /* Assert reset for at least 20 usec, and wait for acknowledgement. */
153         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
154         udelay(50);
155         err = wait_for_atomic(i915_in_reset(pdev), 50);
156
157         /* Clear the reset request. */
158         pci_write_config_byte(pdev, I915_GDRST, 0);
159         udelay(50);
160         if (!err)
161                 err = wait_for_atomic(!i915_in_reset(pdev), 50);
162
163         return err;
164 }
165
166 static bool g4x_reset_complete(struct pci_dev *pdev)
167 {
168         u8 gdrst;
169
170         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
171         return (gdrst & GRDOM_RESET_ENABLE) == 0;
172 }
173
174 static int g33_do_reset(struct intel_gt *gt,
175                         intel_engine_mask_t engine_mask,
176                         unsigned int retry)
177 {
178         struct pci_dev *pdev = gt->i915->drm.pdev;
179
180         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
181         return wait_for_atomic(g4x_reset_complete(pdev), 50);
182 }
183
184 static int g4x_do_reset(struct intel_gt *gt,
185                         intel_engine_mask_t engine_mask,
186                         unsigned int retry)
187 {
188         struct pci_dev *pdev = gt->i915->drm.pdev;
189         struct intel_uncore *uncore = gt->uncore;
190         int ret;
191
192         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
193         rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
194         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
195
196         pci_write_config_byte(pdev, I915_GDRST,
197                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
198         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
199         if (ret) {
200                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
201                 goto out;
202         }
203
204         pci_write_config_byte(pdev, I915_GDRST,
205                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
206         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
207         if (ret) {
208                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
209                 goto out;
210         }
211
212 out:
213         pci_write_config_byte(pdev, I915_GDRST, 0);
214
215         rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
217
218         return ret;
219 }
220
221 static int ironlake_do_reset(struct intel_gt *gt,
222                              intel_engine_mask_t engine_mask,
223                              unsigned int retry)
224 {
225         struct intel_uncore *uncore = gt->uncore;
226         int ret;
227
228         intel_uncore_write_fw(uncore, ILK_GDSR,
229                               ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
230         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
231                                            ILK_GRDOM_RESET_ENABLE, 0,
232                                            5000, 0,
233                                            NULL);
234         if (ret) {
235                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
236                 goto out;
237         }
238
239         intel_uncore_write_fw(uncore, ILK_GDSR,
240                               ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
241         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
242                                            ILK_GRDOM_RESET_ENABLE, 0,
243                                            5000, 0,
244                                            NULL);
245         if (ret) {
246                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
247                 goto out;
248         }
249
250 out:
251         intel_uncore_write_fw(uncore, ILK_GDSR, 0);
252         intel_uncore_posting_read_fw(uncore, ILK_GDSR);
253         return ret;
254 }
255
256 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
257 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
258 {
259         struct intel_uncore *uncore = gt->uncore;
260         int err;
261
262         /*
263          * GEN6_GDRST is not in the gt power well, no need to check
264          * for fifo space for the write or forcewake the chip for
265          * the read
266          */
267         intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
268
269         /* Wait for the device to ack the reset requests */
270         err = __intel_wait_for_register_fw(uncore,
271                                            GEN6_GDRST, hw_domain_mask, 0,
272                                            500, 0,
273                                            NULL);
274         if (err)
275                 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
276                                  hw_domain_mask);
277
278         return err;
279 }
280
281 static int gen6_reset_engines(struct intel_gt *gt,
282                               intel_engine_mask_t engine_mask,
283                               unsigned int retry)
284 {
285         struct intel_engine_cs *engine;
286         const u32 hw_engine_mask[] = {
287                 [RCS0]  = GEN6_GRDOM_RENDER,
288                 [BCS0]  = GEN6_GRDOM_BLT,
289                 [VCS0]  = GEN6_GRDOM_MEDIA,
290                 [VCS1]  = GEN8_GRDOM_MEDIA2,
291                 [VECS0] = GEN6_GRDOM_VECS,
292         };
293         u32 hw_mask;
294
295         if (engine_mask == ALL_ENGINES) {
296                 hw_mask = GEN6_GRDOM_FULL;
297         } else {
298                 intel_engine_mask_t tmp;
299
300                 hw_mask = 0;
301                 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
302                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
303                         hw_mask |= hw_engine_mask[engine->id];
304                 }
305         }
306
307         return gen6_hw_domain_reset(gt, hw_mask);
308 }
309
310 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
311 {
312         struct intel_uncore *uncore = engine->uncore;
313         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
314         i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
315         u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
316         i915_reg_t sfc_usage;
317         u32 sfc_usage_bit;
318         u32 sfc_reset_bit;
319         int ret;
320
321         switch (engine->class) {
322         case VIDEO_DECODE_CLASS:
323                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
324                         return 0;
325
326                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
327                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
328
329                 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
330                 sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
331
332                 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
333                 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
334                 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
335                 break;
336
337         case VIDEO_ENHANCEMENT_CLASS:
338                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
339                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
340
341                 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
342                 sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
343
344                 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
345                 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
346                 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
347                 break;
348
349         default:
350                 return 0;
351         }
352
353         /*
354          * If the engine is using a SFC, tell the engine that a software reset
355          * is going to happen. The engine will then try to force lock the SFC.
356          * If SFC ends up being locked to the engine we want to reset, we have
357          * to reset it as well (we will unlock it once the reset sequence is
358          * completed).
359          */
360         if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
361                 return 0;
362
363         rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
364
365         ret = __intel_wait_for_register_fw(uncore,
366                                            sfc_forced_lock_ack,
367                                            sfc_forced_lock_ack_bit,
368                                            sfc_forced_lock_ack_bit,
369                                            1000, 0, NULL);
370
371         /* Was the SFC released while we were trying to lock it? */
372         if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
373                 return 0;
374
375         if (ret) {
376                 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
377                 return ret;
378         }
379
380         *hw_mask |= sfc_reset_bit;
381         return 0;
382 }
383
384 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
385 {
386         struct intel_uncore *uncore = engine->uncore;
387         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
388         i915_reg_t sfc_forced_lock;
389         u32 sfc_forced_lock_bit;
390
391         switch (engine->class) {
392         case VIDEO_DECODE_CLASS:
393                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
394                         return;
395
396                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
397                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
398                 break;
399
400         case VIDEO_ENHANCEMENT_CLASS:
401                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
402                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
403                 break;
404
405         default:
406                 return;
407         }
408
409         rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
410 }
411
412 static int gen11_reset_engines(struct intel_gt *gt,
413                                intel_engine_mask_t engine_mask,
414                                unsigned int retry)
415 {
416         const u32 hw_engine_mask[] = {
417                 [RCS0]  = GEN11_GRDOM_RENDER,
418                 [BCS0]  = GEN11_GRDOM_BLT,
419                 [VCS0]  = GEN11_GRDOM_MEDIA,
420                 [VCS1]  = GEN11_GRDOM_MEDIA2,
421                 [VCS2]  = GEN11_GRDOM_MEDIA3,
422                 [VCS3]  = GEN11_GRDOM_MEDIA4,
423                 [VECS0] = GEN11_GRDOM_VECS,
424                 [VECS1] = GEN11_GRDOM_VECS2,
425         };
426         struct intel_engine_cs *engine;
427         intel_engine_mask_t tmp;
428         u32 hw_mask;
429         int ret;
430
431         if (engine_mask == ALL_ENGINES) {
432                 hw_mask = GEN11_GRDOM_FULL;
433         } else {
434                 hw_mask = 0;
435                 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
436                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
437                         hw_mask |= hw_engine_mask[engine->id];
438                         ret = gen11_lock_sfc(engine, &hw_mask);
439                         if (ret)
440                                 goto sfc_unlock;
441                 }
442         }
443
444         ret = gen6_hw_domain_reset(gt, hw_mask);
445
446 sfc_unlock:
447         /*
448          * We unlock the SFC based on the lock status and not the result of
449          * gen11_lock_sfc to make sure that we clean properly if something
450          * wrong happened during the lock (e.g. lock acquired after timeout
451          * expiration).
452          */
453         if (engine_mask != ALL_ENGINES)
454                 for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
455                         gen11_unlock_sfc(engine);
456
457         return ret;
458 }
459
460 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
461 {
462         struct intel_uncore *uncore = engine->uncore;
463         const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
464         u32 request, mask, ack;
465         int ret;
466
467         ack = intel_uncore_read_fw(uncore, reg);
468         if (ack & RESET_CTL_CAT_ERROR) {
469                 /*
470                  * For catastrophic errors, ready-for-reset sequence
471                  * needs to be bypassed: HAS#396813
472                  */
473                 request = RESET_CTL_CAT_ERROR;
474                 mask = RESET_CTL_CAT_ERROR;
475
476                 /* Catastrophic errors need to be cleared by HW */
477                 ack = 0;
478         } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
479                 request = RESET_CTL_REQUEST_RESET;
480                 mask = RESET_CTL_READY_TO_RESET;
481                 ack = RESET_CTL_READY_TO_RESET;
482         } else {
483                 return 0;
484         }
485
486         intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
487         ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
488                                            700, 0, NULL);
489         if (ret)
490                 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
491                           engine->name, request,
492                           intel_uncore_read_fw(uncore, reg));
493
494         return ret;
495 }
496
497 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
498 {
499         intel_uncore_write_fw(engine->uncore,
500                               RING_RESET_CTL(engine->mmio_base),
501                               _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
502 }
503
504 static int gen8_reset_engines(struct intel_gt *gt,
505                               intel_engine_mask_t engine_mask,
506                               unsigned int retry)
507 {
508         struct intel_engine_cs *engine;
509         const bool reset_non_ready = retry >= 1;
510         intel_engine_mask_t tmp;
511         int ret;
512
513         for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
514                 ret = gen8_engine_reset_prepare(engine);
515                 if (ret && !reset_non_ready)
516                         goto skip_reset;
517
518                 /*
519                  * If this is not the first failed attempt to prepare,
520                  * we decide to proceed anyway.
521                  *
522                  * By doing so we risk context corruption and with
523                  * some gens (kbl), possible system hang if reset
524                  * happens during active bb execution.
525                  *
526                  * We rather take context corruption instead of
527                  * failed reset with a wedged driver/gpu. And
528                  * active bb execution case should be covered by
529                  * stop_engines() we have before the reset.
530                  */
531         }
532
533         if (INTEL_GEN(gt->i915) >= 11)
534                 ret = gen11_reset_engines(gt, engine_mask, retry);
535         else
536                 ret = gen6_reset_engines(gt, engine_mask, retry);
537
538 skip_reset:
539         for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
540                 gen8_engine_reset_cancel(engine);
541
542         return ret;
543 }
544
545 static int mock_reset(struct intel_gt *gt,
546                       intel_engine_mask_t mask,
547                       unsigned int retry)
548 {
549         return 0;
550 }
551
552 typedef int (*reset_func)(struct intel_gt *,
553                           intel_engine_mask_t engine_mask,
554                           unsigned int retry);
555
556 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
557 {
558         struct drm_i915_private *i915 = gt->i915;
559
560         if (is_mock_gt(gt))
561                 return mock_reset;
562         else if (INTEL_GEN(i915) >= 8)
563                 return gen8_reset_engines;
564         else if (INTEL_GEN(i915) >= 6)
565                 return gen6_reset_engines;
566         else if (INTEL_GEN(i915) >= 5)
567                 return ironlake_do_reset;
568         else if (IS_G4X(i915))
569                 return g4x_do_reset;
570         else if (IS_G33(i915) || IS_PINEVIEW(i915))
571                 return g33_do_reset;
572         else if (INTEL_GEN(i915) >= 3)
573                 return i915_do_reset;
574         else
575                 return NULL;
576 }
577
578 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
579 {
580         const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
581         reset_func reset;
582         int ret = -ETIMEDOUT;
583         int retry;
584
585         reset = intel_get_gpu_reset(gt);
586         if (!reset)
587                 return -ENODEV;
588
589         /*
590          * If the power well sleeps during the reset, the reset
591          * request may be dropped and never completes (causing -EIO).
592          */
593         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
594         for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
595                 GEM_TRACE("engine_mask=%x\n", engine_mask);
596                 preempt_disable();
597                 ret = reset(gt, engine_mask, retry);
598                 preempt_enable();
599         }
600         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
601
602         return ret;
603 }
604
605 bool intel_has_gpu_reset(const struct intel_gt *gt)
606 {
607         if (!i915_modparams.reset)
608                 return NULL;
609
610         return intel_get_gpu_reset(gt);
611 }
612
613 bool intel_has_reset_engine(const struct intel_gt *gt)
614 {
615         if (i915_modparams.reset < 2)
616                 return false;
617
618         return INTEL_INFO(gt->i915)->has_reset_engine;
619 }
620
621 int intel_reset_guc(struct intel_gt *gt)
622 {
623         u32 guc_domain =
624                 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
625         int ret;
626
627         GEM_BUG_ON(!HAS_GT_UC(gt->i915));
628
629         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
630         ret = gen6_hw_domain_reset(gt, guc_domain);
631         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
632
633         return ret;
634 }
635
636 /*
637  * Ensure irq handler finishes, and not run again.
638  * Also return the active request so that we only search for it once.
639  */
640 static void reset_prepare_engine(struct intel_engine_cs *engine)
641 {
642         /*
643          * During the reset sequence, we must prevent the engine from
644          * entering RC6. As the context state is undefined until we restart
645          * the engine, if it does enter RC6 during the reset, the state
646          * written to the powercontext is undefined and so we may lose
647          * GPU state upon resume, i.e. fail to restart after a reset.
648          */
649         intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
650         engine->reset.prepare(engine);
651 }
652
653 static void revoke_mmaps(struct intel_gt *gt)
654 {
655         int i;
656
657         for (i = 0; i < gt->ggtt->num_fences; i++) {
658                 struct drm_vma_offset_node *node;
659                 struct i915_vma *vma;
660                 u64 vma_offset;
661
662                 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
663                 if (!vma)
664                         continue;
665
666                 if (!i915_vma_has_userfault(vma))
667                         continue;
668
669                 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
670                 node = &vma->obj->base.vma_node;
671                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
672                 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
673                                     drm_vma_node_offset_addr(node) + vma_offset,
674                                     vma->size,
675                                     1);
676         }
677 }
678
679 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
680 {
681         struct intel_engine_cs *engine;
682         intel_engine_mask_t awake = 0;
683         enum intel_engine_id id;
684
685         for_each_engine(engine, gt->i915, id) {
686                 if (intel_engine_pm_get_if_awake(engine))
687                         awake |= engine->mask;
688                 reset_prepare_engine(engine);
689         }
690
691         intel_uc_reset_prepare(&gt->uc);
692
693         return awake;
694 }
695
696 static void gt_revoke(struct intel_gt *gt)
697 {
698         revoke_mmaps(gt);
699 }
700
701 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
702 {
703         struct intel_engine_cs *engine;
704         enum intel_engine_id id;
705         int err;
706
707         /*
708          * Everything depends on having the GTT running, so we need to start
709          * there.
710          */
711         err = i915_ggtt_enable_hw(gt->i915);
712         if (err)
713                 return err;
714
715         for_each_engine(engine, gt->i915, id)
716                 __intel_engine_reset(engine, stalled_mask & engine->mask);
717
718         i915_gem_restore_fences(gt->i915);
719
720         return err;
721 }
722
723 static void reset_finish_engine(struct intel_engine_cs *engine)
724 {
725         engine->reset.finish(engine);
726         intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
727
728         intel_engine_breadcrumbs_irq(engine);
729 }
730
731 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
732 {
733         struct intel_engine_cs *engine;
734         enum intel_engine_id id;
735
736         for_each_engine(engine, gt->i915, id) {
737                 reset_finish_engine(engine);
738                 if (awake & engine->mask)
739                         intel_engine_pm_put(engine);
740         }
741 }
742
743 static void nop_submit_request(struct i915_request *request)
744 {
745         struct intel_engine_cs *engine = request->engine;
746         unsigned long flags;
747
748         GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
749                   engine->name, request->fence.context, request->fence.seqno);
750         dma_fence_set_error(&request->fence, -EIO);
751
752         spin_lock_irqsave(&engine->active.lock, flags);
753         __i915_request_submit(request);
754         i915_request_mark_complete(request);
755         spin_unlock_irqrestore(&engine->active.lock, flags);
756
757         intel_engine_queue_breadcrumbs(engine);
758 }
759
760 static void __intel_gt_set_wedged(struct intel_gt *gt)
761 {
762         struct intel_engine_cs *engine;
763         intel_engine_mask_t awake;
764         enum intel_engine_id id;
765
766         if (test_bit(I915_WEDGED, &gt->reset.flags))
767                 return;
768
769         if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
770                 struct drm_printer p = drm_debug_printer(__func__);
771
772                 for_each_engine(engine, gt->i915, id)
773                         intel_engine_dump(engine, &p, "%s\n", engine->name);
774         }
775
776         GEM_TRACE("start\n");
777
778         /*
779          * First, stop submission to hw, but do not yet complete requests by
780          * rolling the global seqno forward (since this would complete requests
781          * for which we haven't set the fence error to EIO yet).
782          */
783         awake = reset_prepare(gt);
784
785         /* Even if the GPU reset fails, it should still stop the engines */
786         if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
787                 __intel_gt_reset(gt, ALL_ENGINES);
788
789         for_each_engine(engine, gt->i915, id)
790                 engine->submit_request = nop_submit_request;
791
792         /*
793          * Make sure no request can slip through without getting completed by
794          * either this call here to intel_engine_write_global_seqno, or the one
795          * in nop_submit_request.
796          */
797         synchronize_rcu_expedited();
798         set_bit(I915_WEDGED, &gt->reset.flags);
799
800         /* Mark all executing requests as skipped */
801         for_each_engine(engine, gt->i915, id)
802                 engine->cancel_requests(engine);
803
804         reset_finish(gt, awake);
805
806         GEM_TRACE("end\n");
807 }
808
809 void intel_gt_set_wedged(struct intel_gt *gt)
810 {
811         intel_wakeref_t wakeref;
812
813         mutex_lock(&gt->reset.mutex);
814         with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
815                 __intel_gt_set_wedged(gt);
816         mutex_unlock(&gt->reset.mutex);
817 }
818
819 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
820 {
821         struct intel_gt_timelines *timelines = &gt->timelines;
822         struct intel_timeline *tl;
823         unsigned long flags;
824         bool ok;
825
826         if (!test_bit(I915_WEDGED, &gt->reset.flags))
827                 return true;
828
829         /* Never fully initialised, recovery impossible */
830         if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
831                 return false;
832
833         GEM_TRACE("start\n");
834
835         /*
836          * Before unwedging, make sure that all pending operations
837          * are flushed and errored out - we may have requests waiting upon
838          * third party fences. We marked all inflight requests as EIO, and
839          * every execbuf since returned EIO, for consistency we want all
840          * the currently pending requests to also be marked as EIO, which
841          * is done inside our nop_submit_request - and so we must wait.
842          *
843          * No more can be submitted until we reset the wedged bit.
844          */
845         spin_lock_irqsave(&timelines->lock, flags);
846         list_for_each_entry(tl, &timelines->active_list, link) {
847                 struct dma_fence *fence;
848
849                 fence = i915_active_fence_get(&tl->last_request);
850                 if (!fence)
851                         continue;
852
853                 spin_unlock_irqrestore(&timelines->lock, flags);
854
855                 /*
856                  * All internal dependencies (i915_requests) will have
857                  * been flushed by the set-wedge, but we may be stuck waiting
858                  * for external fences. These should all be capped to 10s
859                  * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
860                  * in the worst case.
861                  */
862                 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
863                 dma_fence_put(fence);
864
865                 /* Restart iteration after droping lock */
866                 spin_lock_irqsave(&timelines->lock, flags);
867                 tl = list_entry(&timelines->active_list, typeof(*tl), link);
868         }
869         spin_unlock_irqrestore(&timelines->lock, flags);
870
871         /* We must reset pending GPU events before restoring our submission */
872         ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
873         if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
874                 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
875         if (!ok)
876                 return false;
877
878         /*
879          * Undo nop_submit_request. We prevent all new i915 requests from
880          * being queued (by disallowing execbuf whilst wedged) so having
881          * waited for all active requests above, we know the system is idle
882          * and do not have to worry about a thread being inside
883          * engine->submit_request() as we swap over. So unlike installing
884          * the nop_submit_request on reset, we can do this from normal
885          * context and do not require stop_machine().
886          */
887         intel_engines_reset_default_submission(gt);
888
889         GEM_TRACE("end\n");
890
891         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
892         clear_bit(I915_WEDGED, &gt->reset.flags);
893
894         return true;
895 }
896
897 bool intel_gt_unset_wedged(struct intel_gt *gt)
898 {
899         bool result;
900
901         mutex_lock(&gt->reset.mutex);
902         result = __intel_gt_unset_wedged(gt);
903         mutex_unlock(&gt->reset.mutex);
904
905         return result;
906 }
907
908 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
909 {
910         int err, i;
911
912         gt_revoke(gt);
913
914         err = __intel_gt_reset(gt, ALL_ENGINES);
915         for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
916                 msleep(10 * (i + 1));
917                 err = __intel_gt_reset(gt, ALL_ENGINES);
918         }
919         if (err)
920                 return err;
921
922         return gt_reset(gt, stalled_mask);
923 }
924
925 static int resume(struct intel_gt *gt)
926 {
927         struct intel_engine_cs *engine;
928         enum intel_engine_id id;
929         int ret;
930
931         for_each_engine(engine, gt->i915, id) {
932                 ret = engine->resume(engine);
933                 if (ret)
934                         return ret;
935         }
936
937         return 0;
938 }
939
940 /**
941  * intel_gt_reset - reset chip after a hang
942  * @gt: #intel_gt to reset
943  * @stalled_mask: mask of the stalled engines with the guilty requests
944  * @reason: user error message for why we are resetting
945  *
946  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
947  * on failure.
948  *
949  * Procedure is fairly simple:
950  *   - reset the chip using the reset reg
951  *   - re-init context state
952  *   - re-init hardware status page
953  *   - re-init ring buffer
954  *   - re-init interrupt state
955  *   - re-init display
956  */
957 void intel_gt_reset(struct intel_gt *gt,
958                     intel_engine_mask_t stalled_mask,
959                     const char *reason)
960 {
961         intel_engine_mask_t awake;
962         int ret;
963
964         GEM_TRACE("flags=%lx\n", gt->reset.flags);
965
966         might_sleep();
967         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
968         mutex_lock(&gt->reset.mutex);
969
970         /* Clear any previous failed attempts at recovery. Time to try again. */
971         if (!__intel_gt_unset_wedged(gt))
972                 goto unlock;
973
974         if (reason)
975                 dev_notice(gt->i915->drm.dev,
976                            "Resetting chip for %s\n", reason);
977         atomic_inc(&gt->i915->gpu_error.reset_count);
978
979         awake = reset_prepare(gt);
980
981         if (!intel_has_gpu_reset(gt)) {
982                 if (i915_modparams.reset)
983                         dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
984                 else
985                         DRM_DEBUG_DRIVER("GPU reset disabled\n");
986                 goto error;
987         }
988
989         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
990                 intel_runtime_pm_disable_interrupts(gt->i915);
991
992         if (do_reset(gt, stalled_mask)) {
993                 dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
994                 goto taint;
995         }
996
997         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
998                 intel_runtime_pm_enable_interrupts(gt->i915);
999
1000         intel_overlay_reset(gt->i915);
1001
1002         /*
1003          * Next we need to restore the context, but we don't use those
1004          * yet either...
1005          *
1006          * Ring buffer needs to be re-initialized in the KMS case, or if X
1007          * was running at the time of the reset (i.e. we weren't VT
1008          * switched away).
1009          */
1010         ret = intel_gt_init_hw(gt);
1011         if (ret) {
1012                 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1013                           ret);
1014                 goto taint;
1015         }
1016
1017         ret = resume(gt);
1018         if (ret)
1019                 goto taint;
1020
1021         intel_gt_queue_hangcheck(gt);
1022
1023 finish:
1024         reset_finish(gt, awake);
1025 unlock:
1026         mutex_unlock(&gt->reset.mutex);
1027         return;
1028
1029 taint:
1030         /*
1031          * History tells us that if we cannot reset the GPU now, we
1032          * never will. This then impacts everything that is run
1033          * subsequently. On failing the reset, we mark the driver
1034          * as wedged, preventing further execution on the GPU.
1035          * We also want to go one step further and add a taint to the
1036          * kernel so that any subsequent faults can be traced back to
1037          * this failure. This is important for CI, where if the
1038          * GPU/driver fails we would like to reboot and restart testing
1039          * rather than continue on into oblivion. For everyone else,
1040          * the system should still plod along, but they have been warned!
1041          */
1042         add_taint_for_CI(TAINT_WARN);
1043 error:
1044         __intel_gt_set_wedged(gt);
1045         goto finish;
1046 }
1047
1048 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1049 {
1050         return __intel_gt_reset(engine->gt, engine->mask);
1051 }
1052
1053 /**
1054  * intel_engine_reset - reset GPU engine to recover from a hang
1055  * @engine: engine to reset
1056  * @msg: reason for GPU reset; or NULL for no dev_notice()
1057  *
1058  * Reset a specific GPU engine. Useful if a hang is detected.
1059  * Returns zero on successful reset or otherwise an error code.
1060  *
1061  * Procedure is:
1062  *  - identifies the request that caused the hang and it is dropped
1063  *  - reset engine (which will force the engine to idle)
1064  *  - re-init/configure engine
1065  */
1066 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1067 {
1068         struct intel_gt *gt = engine->gt;
1069         int ret;
1070
1071         GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
1072         GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1073
1074         if (!intel_engine_pm_get_if_awake(engine))
1075                 return 0;
1076
1077         reset_prepare_engine(engine);
1078
1079         if (msg)
1080                 dev_notice(engine->i915->drm.dev,
1081                            "Resetting %s for %s\n", engine->name, msg);
1082         atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1083
1084         if (!engine->gt->uc.guc.execbuf_client)
1085                 ret = intel_gt_reset_engine(engine);
1086         else
1087                 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1088         if (ret) {
1089                 /* If we fail here, we expect to fallback to a global reset */
1090                 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1091                                  engine->gt->uc.guc.execbuf_client ? "GuC " : "",
1092                                  engine->name, ret);
1093                 goto out;
1094         }
1095
1096         /*
1097          * The request that caused the hang is stuck on elsp, we know the
1098          * active request and can drop it, adjust head to skip the offending
1099          * request to resume executing remaining requests in the queue.
1100          */
1101         __intel_engine_reset(engine, true);
1102
1103         /*
1104          * The engine and its registers (and workarounds in case of render)
1105          * have been reset to their default values. Follow the init_ring
1106          * process to program RING_MODE, HWSP and re-enable submission.
1107          */
1108         ret = engine->resume(engine);
1109
1110 out:
1111         intel_engine_cancel_stop_cs(engine);
1112         reset_finish_engine(engine);
1113         intel_engine_pm_put(engine);
1114         return ret;
1115 }
1116
1117 static void intel_gt_reset_global(struct intel_gt *gt,
1118                                   u32 engine_mask,
1119                                   const char *reason)
1120 {
1121         struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1122         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1123         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1124         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1125         struct intel_wedge_me w;
1126
1127         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1128
1129         DRM_DEBUG_DRIVER("resetting chip\n");
1130         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1131
1132         /* Use a watchdog to ensure that our reset completes */
1133         intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1134                 intel_prepare_reset(gt->i915);
1135
1136                 /* Flush everyone using a resource about to be clobbered */
1137                 synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1138
1139                 intel_gt_reset(gt, engine_mask, reason);
1140
1141                 intel_finish_reset(gt->i915);
1142         }
1143
1144         if (!test_bit(I915_WEDGED, &gt->reset.flags))
1145                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1146 }
1147
1148 /**
1149  * intel_gt_handle_error - handle a gpu error
1150  * @gt: the intel_gt
1151  * @engine_mask: mask representing engines that are hung
1152  * @flags: control flags
1153  * @fmt: Error message format string
1154  *
1155  * Do some basic checking of register state at error time and
1156  * dump it to the syslog.  Also call i915_capture_error_state() to make
1157  * sure we get a record and make it available in debugfs.  Fire a uevent
1158  * so userspace knows something bad happened (should trigger collection
1159  * of a ring dump etc.).
1160  */
1161 void intel_gt_handle_error(struct intel_gt *gt,
1162                            intel_engine_mask_t engine_mask,
1163                            unsigned long flags,
1164                            const char *fmt, ...)
1165 {
1166         struct intel_engine_cs *engine;
1167         intel_wakeref_t wakeref;
1168         intel_engine_mask_t tmp;
1169         char error_msg[80];
1170         char *msg = NULL;
1171
1172         if (fmt) {
1173                 va_list args;
1174
1175                 va_start(args, fmt);
1176                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1177                 va_end(args);
1178
1179                 msg = error_msg;
1180         }
1181
1182         /*
1183          * In most cases it's guaranteed that we get here with an RPM
1184          * reference held, for example because there is a pending GPU
1185          * request that won't finish until the reset is done. This
1186          * isn't the case at least when we get here by doing a
1187          * simulated reset via debugfs, so get an RPM reference.
1188          */
1189         wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
1190
1191         engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1192
1193         if (flags & I915_ERROR_CAPTURE) {
1194                 i915_capture_error_state(gt->i915, engine_mask, msg);
1195                 intel_gt_clear_error_registers(gt, engine_mask);
1196         }
1197
1198         /*
1199          * Try engine reset when available. We fall back to full reset if
1200          * single reset fails.
1201          */
1202         if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1203                 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
1204                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1205                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1206                                              &gt->reset.flags))
1207                                 continue;
1208
1209                         if (intel_engine_reset(engine, msg) == 0)
1210                                 engine_mask &= ~engine->mask;
1211
1212                         clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1213                                               &gt->reset.flags);
1214                 }
1215         }
1216
1217         if (!engine_mask)
1218                 goto out;
1219
1220         /* Full reset needs the mutex, stop any other user trying to do so. */
1221         if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1222                 wait_event(gt->reset.queue,
1223                            !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1224                 goto out; /* piggy-back on the other reset */
1225         }
1226
1227         /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1228         synchronize_rcu_expedited();
1229
1230         /* Prevent any other reset-engine attempt. */
1231         for_each_engine(engine, gt->i915, tmp) {
1232                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1233                                         &gt->reset.flags))
1234                         wait_on_bit(&gt->reset.flags,
1235                                     I915_RESET_ENGINE + engine->id,
1236                                     TASK_UNINTERRUPTIBLE);
1237         }
1238
1239         intel_gt_reset_global(gt, engine_mask, msg);
1240
1241         for_each_engine(engine, gt->i915, tmp)
1242                 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1243                                  &gt->reset.flags);
1244         clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1245         smp_mb__after_atomic();
1246         wake_up_all(&gt->reset.queue);
1247
1248 out:
1249         intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
1250 }
1251
1252 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1253 {
1254         might_lock(&gt->reset.backoff_srcu);
1255         might_sleep();
1256
1257         rcu_read_lock();
1258         while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1259                 rcu_read_unlock();
1260
1261                 if (wait_event_interruptible(gt->reset.queue,
1262                                              !test_bit(I915_RESET_BACKOFF,
1263                                                        &gt->reset.flags)))
1264                         return -EINTR;
1265
1266                 rcu_read_lock();
1267         }
1268         *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1269         rcu_read_unlock();
1270
1271         return 0;
1272 }
1273
1274 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1275 __releases(&gt->reset.backoff_srcu)
1276 {
1277         srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1278 }
1279
1280 int intel_gt_terminally_wedged(struct intel_gt *gt)
1281 {
1282         might_sleep();
1283
1284         if (!intel_gt_is_wedged(gt))
1285                 return 0;
1286
1287         /* Reset still in progress? Maybe we will recover? */
1288         if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
1289                 return -EIO;
1290
1291         if (wait_event_interruptible(gt->reset.queue,
1292                                      !test_bit(I915_RESET_BACKOFF,
1293                                                &gt->reset.flags)))
1294                 return -EINTR;
1295
1296         return intel_gt_is_wedged(gt) ? -EIO : 0;
1297 }
1298
1299 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1300 {
1301         BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1302                      I915_WEDGED_ON_INIT);
1303         intel_gt_set_wedged(gt);
1304         set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1305 }
1306
1307 void intel_gt_init_reset(struct intel_gt *gt)
1308 {
1309         init_waitqueue_head(&gt->reset.queue);
1310         mutex_init(&gt->reset.mutex);
1311         init_srcu_struct(&gt->reset.backoff_srcu);
1312 }
1313
1314 void intel_gt_fini_reset(struct intel_gt *gt)
1315 {
1316         cleanup_srcu_struct(&gt->reset.backoff_srcu);
1317 }
1318
1319 static void intel_wedge_me(struct work_struct *work)
1320 {
1321         struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1322
1323         dev_err(w->gt->i915->drm.dev,
1324                 "%s timed out, cancelling all in-flight rendering.\n",
1325                 w->name);
1326         intel_gt_set_wedged(w->gt);
1327 }
1328
1329 void __intel_init_wedge(struct intel_wedge_me *w,
1330                         struct intel_gt *gt,
1331                         long timeout,
1332                         const char *name)
1333 {
1334         w->gt = gt;
1335         w->name = name;
1336
1337         INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1338         schedule_delayed_work(&w->work, timeout);
1339 }
1340
1341 void __intel_fini_wedge(struct intel_wedge_me *w)
1342 {
1343         cancel_delayed_work_sync(&w->work);
1344         destroy_delayed_work_on_stack(&w->work);
1345         w->gt = NULL;
1346 }
1347
1348 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1349 #include "selftest_reset.c"
1350 #endif