Merge tag 'drm-intel-next-2019-04-04' into gvt-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_reset.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9
10 #include "i915_drv.h"
11 #include "i915_gpu_error.h"
12 #include "i915_reset.h"
13
14 #include "intel_guc.h"
15
16 #define RESET_MAX_RETRIES 3
17
18 /* XXX How to handle concurrent GGTT updates using tiling registers? */
19 #define RESET_UNDER_STOP_MACHINE 0
20
21 static void engine_skip_context(struct i915_request *rq)
22 {
23         struct intel_engine_cs *engine = rq->engine;
24         struct i915_gem_context *hung_ctx = rq->gem_context;
25
26         lockdep_assert_held(&engine->timeline.lock);
27
28         if (!i915_request_is_active(rq))
29                 return;
30
31         list_for_each_entry_continue(rq, &engine->timeline.requests, link)
32                 if (rq->gem_context == hung_ctx)
33                         i915_request_skip(rq, -EIO);
34 }
35
36 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
37                                const struct i915_gem_context *ctx)
38 {
39         unsigned int score;
40         unsigned long prev_hang;
41
42         if (i915_gem_context_is_banned(ctx))
43                 score = I915_CLIENT_SCORE_CONTEXT_BAN;
44         else
45                 score = 0;
46
47         prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
48         if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
49                 score += I915_CLIENT_SCORE_HANG_FAST;
50
51         if (score) {
52                 atomic_add(score, &file_priv->ban_score);
53
54                 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
55                                  ctx->name, score,
56                                  atomic_read(&file_priv->ban_score));
57         }
58 }
59
60 static bool context_mark_guilty(struct i915_gem_context *ctx)
61 {
62         unsigned long prev_hang;
63         bool banned;
64         int i;
65
66         atomic_inc(&ctx->guilty_count);
67
68         /* Cool contexts are too cool to be banned! (Used for reset testing.) */
69         if (!i915_gem_context_is_bannable(ctx))
70                 return false;
71
72         /* Record the timestamp for the last N hangs */
73         prev_hang = ctx->hang_timestamp[0];
74         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
75                 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
76         ctx->hang_timestamp[i] = jiffies;
77
78         /* If we have hung N+1 times in rapid succession, we ban the context! */
79         banned = !i915_gem_context_is_recoverable(ctx);
80         if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
81                 banned = true;
82         if (banned) {
83                 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
84                                  ctx->name, atomic_read(&ctx->guilty_count));
85                 i915_gem_context_set_banned(ctx);
86         }
87
88         if (!IS_ERR_OR_NULL(ctx->file_priv))
89                 client_mark_guilty(ctx->file_priv, ctx);
90
91         return banned;
92 }
93
94 static void context_mark_innocent(struct i915_gem_context *ctx)
95 {
96         atomic_inc(&ctx->active_count);
97 }
98
99 void i915_reset_request(struct i915_request *rq, bool guilty)
100 {
101         GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
102                   rq->engine->name,
103                   rq->fence.context,
104                   rq->fence.seqno,
105                   yesno(guilty));
106
107         lockdep_assert_held(&rq->engine->timeline.lock);
108         GEM_BUG_ON(i915_request_completed(rq));
109
110         if (guilty) {
111                 i915_request_skip(rq, -EIO);
112                 if (context_mark_guilty(rq->gem_context))
113                         engine_skip_context(rq);
114         } else {
115                 dma_fence_set_error(&rq->fence, -EAGAIN);
116                 context_mark_innocent(rq->gem_context);
117         }
118 }
119
120 static void gen3_stop_engine(struct intel_engine_cs *engine)
121 {
122         struct drm_i915_private *dev_priv = engine->i915;
123         const u32 base = engine->mmio_base;
124
125         GEM_TRACE("%s\n", engine->name);
126
127         if (intel_engine_stop_cs(engine))
128                 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
129
130         I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
131         POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
132
133         I915_WRITE_FW(RING_HEAD(base), 0);
134         I915_WRITE_FW(RING_TAIL(base), 0);
135         POSTING_READ_FW(RING_TAIL(base));
136
137         /* The ring must be empty before it is disabled */
138         I915_WRITE_FW(RING_CTL(base), 0);
139
140         /* Check acts as a post */
141         if (I915_READ_FW(RING_HEAD(base)))
142                 GEM_TRACE("%s: ring head [%x] not parked\n",
143                           engine->name, I915_READ_FW(RING_HEAD(base)));
144 }
145
146 static void i915_stop_engines(struct drm_i915_private *i915,
147                               intel_engine_mask_t engine_mask)
148 {
149         struct intel_engine_cs *engine;
150         intel_engine_mask_t tmp;
151
152         if (INTEL_GEN(i915) < 3)
153                 return;
154
155         for_each_engine_masked(engine, i915, engine_mask, tmp)
156                 gen3_stop_engine(engine);
157 }
158
159 static bool i915_in_reset(struct pci_dev *pdev)
160 {
161         u8 gdrst;
162
163         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
164         return gdrst & GRDOM_RESET_STATUS;
165 }
166
167 static int i915_do_reset(struct drm_i915_private *i915,
168                          intel_engine_mask_t engine_mask,
169                          unsigned int retry)
170 {
171         struct pci_dev *pdev = i915->drm.pdev;
172         int err;
173
174         /* Assert reset for at least 20 usec, and wait for acknowledgement. */
175         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
176         udelay(50);
177         err = wait_for_atomic(i915_in_reset(pdev), 50);
178
179         /* Clear the reset request. */
180         pci_write_config_byte(pdev, I915_GDRST, 0);
181         udelay(50);
182         if (!err)
183                 err = wait_for_atomic(!i915_in_reset(pdev), 50);
184
185         return err;
186 }
187
188 static bool g4x_reset_complete(struct pci_dev *pdev)
189 {
190         u8 gdrst;
191
192         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
193         return (gdrst & GRDOM_RESET_ENABLE) == 0;
194 }
195
196 static int g33_do_reset(struct drm_i915_private *i915,
197                         intel_engine_mask_t engine_mask,
198                         unsigned int retry)
199 {
200         struct pci_dev *pdev = i915->drm.pdev;
201
202         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
203         return wait_for_atomic(g4x_reset_complete(pdev), 50);
204 }
205
206 static int g4x_do_reset(struct drm_i915_private *dev_priv,
207                         intel_engine_mask_t engine_mask,
208                         unsigned int retry)
209 {
210         struct pci_dev *pdev = dev_priv->drm.pdev;
211         int ret;
212
213         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
214         I915_WRITE_FW(VDECCLK_GATE_D,
215                       I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
216         POSTING_READ_FW(VDECCLK_GATE_D);
217
218         pci_write_config_byte(pdev, I915_GDRST,
219                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
220         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
221         if (ret) {
222                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
223                 goto out;
224         }
225
226         pci_write_config_byte(pdev, I915_GDRST,
227                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
228         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
229         if (ret) {
230                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
231                 goto out;
232         }
233
234 out:
235         pci_write_config_byte(pdev, I915_GDRST, 0);
236
237         I915_WRITE_FW(VDECCLK_GATE_D,
238                       I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
239         POSTING_READ_FW(VDECCLK_GATE_D);
240
241         return ret;
242 }
243
244 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
245                              intel_engine_mask_t engine_mask,
246                              unsigned int retry)
247 {
248         struct intel_uncore *uncore = &dev_priv->uncore;
249         int ret;
250
251         intel_uncore_write_fw(uncore, ILK_GDSR,
252                               ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
253         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
254                                            ILK_GRDOM_RESET_ENABLE, 0,
255                                            5000, 0,
256                                            NULL);
257         if (ret) {
258                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
259                 goto out;
260         }
261
262         intel_uncore_write_fw(uncore, ILK_GDSR,
263                               ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
264         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
265                                            ILK_GRDOM_RESET_ENABLE, 0,
266                                            5000, 0,
267                                            NULL);
268         if (ret) {
269                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
270                 goto out;
271         }
272
273 out:
274         intel_uncore_write_fw(uncore, ILK_GDSR, 0);
275         intel_uncore_posting_read_fw(uncore, ILK_GDSR);
276         return ret;
277 }
278
279 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
280 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
281                                 u32 hw_domain_mask)
282 {
283         struct intel_uncore *uncore = &dev_priv->uncore;
284         int err;
285
286         /*
287          * GEN6_GDRST is not in the gt power well, no need to check
288          * for fifo space for the write or forcewake the chip for
289          * the read
290          */
291         intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
292
293         /* Wait for the device to ack the reset requests */
294         err = __intel_wait_for_register_fw(uncore,
295                                            GEN6_GDRST, hw_domain_mask, 0,
296                                            500, 0,
297                                            NULL);
298         if (err)
299                 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
300                                  hw_domain_mask);
301
302         return err;
303 }
304
305 static int gen6_reset_engines(struct drm_i915_private *i915,
306                               intel_engine_mask_t engine_mask,
307                               unsigned int retry)
308 {
309         struct intel_engine_cs *engine;
310         const u32 hw_engine_mask[] = {
311                 [RCS0]  = GEN6_GRDOM_RENDER,
312                 [BCS0]  = GEN6_GRDOM_BLT,
313                 [VCS0]  = GEN6_GRDOM_MEDIA,
314                 [VCS1]  = GEN8_GRDOM_MEDIA2,
315                 [VECS0] = GEN6_GRDOM_VECS,
316         };
317         u32 hw_mask;
318
319         if (engine_mask == ALL_ENGINES) {
320                 hw_mask = GEN6_GRDOM_FULL;
321         } else {
322                 intel_engine_mask_t tmp;
323
324                 hw_mask = 0;
325                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
326                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
327                         hw_mask |= hw_engine_mask[engine->id];
328                 }
329         }
330
331         return gen6_hw_domain_reset(i915, hw_mask);
332 }
333
334 static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
335                           struct intel_engine_cs *engine)
336 {
337         struct intel_uncore *uncore = &dev_priv->uncore;
338         u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
339         i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
340         u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
341         i915_reg_t sfc_usage;
342         u32 sfc_usage_bit;
343         u32 sfc_reset_bit;
344
345         switch (engine->class) {
346         case VIDEO_DECODE_CLASS:
347                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
348                         return 0;
349
350                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
351                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
352
353                 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
354                 sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
355
356                 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
357                 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
358                 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
359                 break;
360
361         case VIDEO_ENHANCEMENT_CLASS:
362                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
363                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
364
365                 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
366                 sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
367
368                 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
369                 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
370                 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
371                 break;
372
373         default:
374                 return 0;
375         }
376
377         /*
378          * Tell the engine that a software reset is going to happen. The engine
379          * will then try to force lock the SFC (if currently locked, it will
380          * remain so until we tell the engine it is safe to unlock; if currently
381          * unlocked, it will ignore this and all new lock requests). If SFC
382          * ends up being locked to the engine we want to reset, we have to reset
383          * it as well (we will unlock it once the reset sequence is completed).
384          */
385         intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
386
387         if (__intel_wait_for_register_fw(uncore,
388                                          sfc_forced_lock_ack,
389                                          sfc_forced_lock_ack_bit,
390                                          sfc_forced_lock_ack_bit,
391                                          1000, 0, NULL)) {
392                 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
393                 return 0;
394         }
395
396         if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
397                 return sfc_reset_bit;
398
399         return 0;
400 }
401
402 static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
403                              struct intel_engine_cs *engine)
404 {
405         u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
406         i915_reg_t sfc_forced_lock;
407         u32 sfc_forced_lock_bit;
408
409         switch (engine->class) {
410         case VIDEO_DECODE_CLASS:
411                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
412                         return;
413
414                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
415                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
416                 break;
417
418         case VIDEO_ENHANCEMENT_CLASS:
419                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
420                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
421                 break;
422
423         default:
424                 return;
425         }
426
427         I915_WRITE_FW(sfc_forced_lock,
428                       I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
429 }
430
431 static int gen11_reset_engines(struct drm_i915_private *i915,
432                                intel_engine_mask_t engine_mask,
433                                unsigned int retry)
434 {
435         const u32 hw_engine_mask[] = {
436                 [RCS0]  = GEN11_GRDOM_RENDER,
437                 [BCS0]  = GEN11_GRDOM_BLT,
438                 [VCS0]  = GEN11_GRDOM_MEDIA,
439                 [VCS1]  = GEN11_GRDOM_MEDIA2,
440                 [VCS2]  = GEN11_GRDOM_MEDIA3,
441                 [VCS3]  = GEN11_GRDOM_MEDIA4,
442                 [VECS0] = GEN11_GRDOM_VECS,
443                 [VECS1] = GEN11_GRDOM_VECS2,
444         };
445         struct intel_engine_cs *engine;
446         intel_engine_mask_t tmp;
447         u32 hw_mask;
448         int ret;
449
450         if (engine_mask == ALL_ENGINES) {
451                 hw_mask = GEN11_GRDOM_FULL;
452         } else {
453                 hw_mask = 0;
454                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
455                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
456                         hw_mask |= hw_engine_mask[engine->id];
457                         hw_mask |= gen11_lock_sfc(i915, engine);
458                 }
459         }
460
461         ret = gen6_hw_domain_reset(i915, hw_mask);
462
463         if (engine_mask != ALL_ENGINES)
464                 for_each_engine_masked(engine, i915, engine_mask, tmp)
465                         gen11_unlock_sfc(i915, engine);
466
467         return ret;
468 }
469
470 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
471 {
472         struct intel_uncore *uncore = &engine->i915->uncore;
473         int ret;
474
475         intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
476                               _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
477
478         ret = __intel_wait_for_register_fw(uncore,
479                                            RING_RESET_CTL(engine->mmio_base),
480                                            RESET_CTL_READY_TO_RESET,
481                                            RESET_CTL_READY_TO_RESET,
482                                            700, 0,
483                                            NULL);
484         if (ret)
485                 DRM_ERROR("%s: reset request timeout\n", engine->name);
486
487         return ret;
488 }
489
490 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
491 {
492         struct drm_i915_private *dev_priv = engine->i915;
493
494         I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
495                       _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
496 }
497
498 static int gen8_reset_engines(struct drm_i915_private *i915,
499                               intel_engine_mask_t engine_mask,
500                               unsigned int retry)
501 {
502         struct intel_engine_cs *engine;
503         const bool reset_non_ready = retry >= 1;
504         intel_engine_mask_t tmp;
505         int ret;
506
507         for_each_engine_masked(engine, i915, engine_mask, tmp) {
508                 ret = gen8_engine_reset_prepare(engine);
509                 if (ret && !reset_non_ready)
510                         goto skip_reset;
511
512                 /*
513                  * If this is not the first failed attempt to prepare,
514                  * we decide to proceed anyway.
515                  *
516                  * By doing so we risk context corruption and with
517                  * some gens (kbl), possible system hang if reset
518                  * happens during active bb execution.
519                  *
520                  * We rather take context corruption instead of
521                  * failed reset with a wedged driver/gpu. And
522                  * active bb execution case should be covered by
523                  * i915_stop_engines we have before the reset.
524                  */
525         }
526
527         if (INTEL_GEN(i915) >= 11)
528                 ret = gen11_reset_engines(i915, engine_mask, retry);
529         else
530                 ret = gen6_reset_engines(i915, engine_mask, retry);
531
532 skip_reset:
533         for_each_engine_masked(engine, i915, engine_mask, tmp)
534                 gen8_engine_reset_cancel(engine);
535
536         return ret;
537 }
538
539 typedef int (*reset_func)(struct drm_i915_private *,
540                           intel_engine_mask_t engine_mask,
541                           unsigned int retry);
542
543 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
544 {
545         if (INTEL_GEN(i915) >= 8)
546                 return gen8_reset_engines;
547         else if (INTEL_GEN(i915) >= 6)
548                 return gen6_reset_engines;
549         else if (INTEL_GEN(i915) >= 5)
550                 return ironlake_do_reset;
551         else if (IS_G4X(i915))
552                 return g4x_do_reset;
553         else if (IS_G33(i915) || IS_PINEVIEW(i915))
554                 return g33_do_reset;
555         else if (INTEL_GEN(i915) >= 3)
556                 return i915_do_reset;
557         else
558                 return NULL;
559 }
560
561 int intel_gpu_reset(struct drm_i915_private *i915,
562                     intel_engine_mask_t engine_mask)
563 {
564         const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
565         reset_func reset;
566         int ret = -ETIMEDOUT;
567         int retry;
568
569         reset = intel_get_gpu_reset(i915);
570         if (!reset)
571                 return -ENODEV;
572
573         /*
574          * If the power well sleeps during the reset, the reset
575          * request may be dropped and never completes (causing -EIO).
576          */
577         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
578         for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
579                 /*
580                  * We stop engines, otherwise we might get failed reset and a
581                  * dead gpu (on elk). Also as modern gpu as kbl can suffer
582                  * from system hang if batchbuffer is progressing when
583                  * the reset is issued, regardless of READY_TO_RESET ack.
584                  * Thus assume it is best to stop engines on all gens
585                  * where we have a gpu reset.
586                  *
587                  * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
588                  *
589                  * WaMediaResetMainRingCleanup:ctg,elk (presumably)
590                  *
591                  * FIXME: Wa for more modern gens needs to be validated
592                  */
593                 if (retry)
594                         i915_stop_engines(i915, engine_mask);
595
596                 GEM_TRACE("engine_mask=%x\n", engine_mask);
597                 preempt_disable();
598                 ret = reset(i915, engine_mask, retry);
599                 preempt_enable();
600         }
601         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
602
603         return ret;
604 }
605
606 bool intel_has_gpu_reset(struct drm_i915_private *i915)
607 {
608         if (USES_GUC(i915))
609                 return false;
610
611         if (!i915_modparams.reset)
612                 return NULL;
613
614         return intel_get_gpu_reset(i915);
615 }
616
617 bool intel_has_reset_engine(struct drm_i915_private *i915)
618 {
619         return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
620 }
621
622 int intel_reset_guc(struct drm_i915_private *i915)
623 {
624         u32 guc_domain =
625                 INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
626         int ret;
627
628         GEM_BUG_ON(!HAS_GUC(i915));
629
630         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
631         ret = gen6_hw_domain_reset(i915, guc_domain);
632         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
633
634         return ret;
635 }
636
637 /*
638  * Ensure irq handler finishes, and not run again.
639  * Also return the active request so that we only search for it once.
640  */
641 static void reset_prepare_engine(struct intel_engine_cs *engine)
642 {
643         /*
644          * During the reset sequence, we must prevent the engine from
645          * entering RC6. As the context state is undefined until we restart
646          * the engine, if it does enter RC6 during the reset, the state
647          * written to the powercontext is undefined and so we may lose
648          * GPU state upon resume, i.e. fail to restart after a reset.
649          */
650         intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
651         engine->reset.prepare(engine);
652 }
653
654 static void revoke_mmaps(struct drm_i915_private *i915)
655 {
656         int i;
657
658         for (i = 0; i < i915->num_fence_regs; i++) {
659                 struct drm_vma_offset_node *node;
660                 struct i915_vma *vma;
661                 u64 vma_offset;
662
663                 vma = READ_ONCE(i915->fence_regs[i].vma);
664                 if (!vma)
665                         continue;
666
667                 if (!i915_vma_has_userfault(vma))
668                         continue;
669
670                 GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
671                 node = &vma->obj->base.vma_node;
672                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
673                 unmap_mapping_range(i915->drm.anon_inode->i_mapping,
674                                     drm_vma_node_offset_addr(node) + vma_offset,
675                                     vma->size,
676                                     1);
677         }
678 }
679
680 static void reset_prepare(struct drm_i915_private *i915)
681 {
682         struct intel_engine_cs *engine;
683         enum intel_engine_id id;
684
685         for_each_engine(engine, i915, id)
686                 reset_prepare_engine(engine);
687
688         intel_uc_reset_prepare(i915);
689 }
690
691 static void gt_revoke(struct drm_i915_private *i915)
692 {
693         revoke_mmaps(i915);
694 }
695
696 static int gt_reset(struct drm_i915_private *i915,
697                     intel_engine_mask_t stalled_mask)
698 {
699         struct intel_engine_cs *engine;
700         enum intel_engine_id id;
701         int err;
702
703         /*
704          * Everything depends on having the GTT running, so we need to start
705          * there.
706          */
707         err = i915_ggtt_enable_hw(i915);
708         if (err)
709                 return err;
710
711         for_each_engine(engine, i915, id)
712                 intel_engine_reset(engine, stalled_mask & engine->mask);
713
714         i915_gem_restore_fences(i915);
715
716         return err;
717 }
718
719 static void reset_finish_engine(struct intel_engine_cs *engine)
720 {
721         engine->reset.finish(engine);
722         intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
723 }
724
725 struct i915_gpu_restart {
726         struct work_struct work;
727         struct drm_i915_private *i915;
728 };
729
730 static void restart_work(struct work_struct *work)
731 {
732         struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
733         struct drm_i915_private *i915 = arg->i915;
734         struct intel_engine_cs *engine;
735         enum intel_engine_id id;
736         intel_wakeref_t wakeref;
737
738         wakeref = intel_runtime_pm_get(i915);
739         mutex_lock(&i915->drm.struct_mutex);
740         WRITE_ONCE(i915->gpu_error.restart, NULL);
741
742         for_each_engine(engine, i915, id) {
743                 struct i915_request *rq;
744
745                 /*
746                  * Ostensibily, we always want a context loaded for powersaving,
747                  * so if the engine is idle after the reset, send a request
748                  * to load our scratch kernel_context.
749                  */
750                 if (!intel_engine_is_idle(engine))
751                         continue;
752
753                 rq = i915_request_alloc(engine, i915->kernel_context);
754                 if (!IS_ERR(rq))
755                         i915_request_add(rq);
756         }
757
758         mutex_unlock(&i915->drm.struct_mutex);
759         intel_runtime_pm_put(i915, wakeref);
760
761         kfree(arg);
762 }
763
764 static void reset_finish(struct drm_i915_private *i915)
765 {
766         struct intel_engine_cs *engine;
767         enum intel_engine_id id;
768
769         for_each_engine(engine, i915, id) {
770                 reset_finish_engine(engine);
771                 intel_engine_signal_breadcrumbs(engine);
772         }
773 }
774
775 static void reset_restart(struct drm_i915_private *i915)
776 {
777         struct i915_gpu_restart *arg;
778
779         /*
780          * Following the reset, ensure that we always reload context for
781          * powersaving, and to correct engine->last_retired_context. Since
782          * this requires us to submit a request, queue a worker to do that
783          * task for us to evade any locking here.
784          */
785         if (READ_ONCE(i915->gpu_error.restart))
786                 return;
787
788         arg = kmalloc(sizeof(*arg), GFP_KERNEL);
789         if (arg) {
790                 arg->i915 = i915;
791                 INIT_WORK(&arg->work, restart_work);
792
793                 WRITE_ONCE(i915->gpu_error.restart, arg);
794                 queue_work(i915->wq, &arg->work);
795         }
796 }
797
798 static void nop_submit_request(struct i915_request *request)
799 {
800         struct intel_engine_cs *engine = request->engine;
801         unsigned long flags;
802
803         GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
804                   engine->name, request->fence.context, request->fence.seqno);
805         dma_fence_set_error(&request->fence, -EIO);
806
807         spin_lock_irqsave(&engine->timeline.lock, flags);
808         __i915_request_submit(request);
809         i915_request_mark_complete(request);
810         spin_unlock_irqrestore(&engine->timeline.lock, flags);
811
812         intel_engine_queue_breadcrumbs(engine);
813 }
814
815 static void __i915_gem_set_wedged(struct drm_i915_private *i915)
816 {
817         struct i915_gpu_error *error = &i915->gpu_error;
818         struct intel_engine_cs *engine;
819         enum intel_engine_id id;
820
821         if (test_bit(I915_WEDGED, &error->flags))
822                 return;
823
824         if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
825                 struct drm_printer p = drm_debug_printer(__func__);
826
827                 for_each_engine(engine, i915, id)
828                         intel_engine_dump(engine, &p, "%s\n", engine->name);
829         }
830
831         GEM_TRACE("start\n");
832
833         /*
834          * First, stop submission to hw, but do not yet complete requests by
835          * rolling the global seqno forward (since this would complete requests
836          * for which we haven't set the fence error to EIO yet).
837          */
838         reset_prepare(i915);
839
840         /* Even if the GPU reset fails, it should still stop the engines */
841         if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
842                 intel_gpu_reset(i915, ALL_ENGINES);
843
844         for_each_engine(engine, i915, id) {
845                 engine->submit_request = nop_submit_request;
846                 engine->schedule = NULL;
847         }
848         i915->caps.scheduler = 0;
849
850         /*
851          * Make sure no request can slip through without getting completed by
852          * either this call here to intel_engine_write_global_seqno, or the one
853          * in nop_submit_request.
854          */
855         synchronize_rcu_expedited();
856
857         /* Mark all executing requests as skipped */
858         for_each_engine(engine, i915, id)
859                 engine->cancel_requests(engine);
860
861         reset_finish(i915);
862
863         smp_mb__before_atomic();
864         set_bit(I915_WEDGED, &error->flags);
865
866         GEM_TRACE("end\n");
867 }
868
869 void i915_gem_set_wedged(struct drm_i915_private *i915)
870 {
871         struct i915_gpu_error *error = &i915->gpu_error;
872         intel_wakeref_t wakeref;
873
874         mutex_lock(&error->wedge_mutex);
875         with_intel_runtime_pm(i915, wakeref)
876                 __i915_gem_set_wedged(i915);
877         mutex_unlock(&error->wedge_mutex);
878 }
879
880 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
881 {
882         struct i915_gpu_error *error = &i915->gpu_error;
883         struct i915_timeline *tl;
884
885         if (!test_bit(I915_WEDGED, &error->flags))
886                 return true;
887
888         if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
889                 return false;
890
891         GEM_TRACE("start\n");
892
893         /*
894          * Before unwedging, make sure that all pending operations
895          * are flushed and errored out - we may have requests waiting upon
896          * third party fences. We marked all inflight requests as EIO, and
897          * every execbuf since returned EIO, for consistency we want all
898          * the currently pending requests to also be marked as EIO, which
899          * is done inside our nop_submit_request - and so we must wait.
900          *
901          * No more can be submitted until we reset the wedged bit.
902          */
903         mutex_lock(&i915->gt.timelines.mutex);
904         list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
905                 struct i915_request *rq;
906
907                 rq = i915_active_request_get_unlocked(&tl->last_request);
908                 if (!rq)
909                         continue;
910
911                 /*
912                  * All internal dependencies (i915_requests) will have
913                  * been flushed by the set-wedge, but we may be stuck waiting
914                  * for external fences. These should all be capped to 10s
915                  * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
916                  * in the worst case.
917                  */
918                 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
919                 i915_request_put(rq);
920         }
921         mutex_unlock(&i915->gt.timelines.mutex);
922
923         intel_engines_sanitize(i915, false);
924
925         /*
926          * Undo nop_submit_request. We prevent all new i915 requests from
927          * being queued (by disallowing execbuf whilst wedged) so having
928          * waited for all active requests above, we know the system is idle
929          * and do not have to worry about a thread being inside
930          * engine->submit_request() as we swap over. So unlike installing
931          * the nop_submit_request on reset, we can do this from normal
932          * context and do not require stop_machine().
933          */
934         intel_engines_reset_default_submission(i915);
935
936         GEM_TRACE("end\n");
937
938         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
939         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
940
941         return true;
942 }
943
944 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
945 {
946         struct i915_gpu_error *error = &i915->gpu_error;
947         bool result;
948
949         mutex_lock(&error->wedge_mutex);
950         result = __i915_gem_unset_wedged(i915);
951         mutex_unlock(&error->wedge_mutex);
952
953         return result;
954 }
955
956 static int do_reset(struct drm_i915_private *i915,
957                     intel_engine_mask_t stalled_mask)
958 {
959         int err, i;
960
961         gt_revoke(i915);
962
963         err = intel_gpu_reset(i915, ALL_ENGINES);
964         for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
965                 msleep(10 * (i + 1));
966                 err = intel_gpu_reset(i915, ALL_ENGINES);
967         }
968         if (err)
969                 return err;
970
971         return gt_reset(i915, stalled_mask);
972 }
973
974 /**
975  * i915_reset - reset chip after a hang
976  * @i915: #drm_i915_private to reset
977  * @stalled_mask: mask of the stalled engines with the guilty requests
978  * @reason: user error message for why we are resetting
979  *
980  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
981  * on failure.
982  *
983  * Procedure is fairly simple:
984  *   - reset the chip using the reset reg
985  *   - re-init context state
986  *   - re-init hardware status page
987  *   - re-init ring buffer
988  *   - re-init interrupt state
989  *   - re-init display
990  */
991 void i915_reset(struct drm_i915_private *i915,
992                 intel_engine_mask_t stalled_mask,
993                 const char *reason)
994 {
995         struct i915_gpu_error *error = &i915->gpu_error;
996         int ret;
997
998         GEM_TRACE("flags=%lx\n", error->flags);
999
1000         might_sleep();
1001         assert_rpm_wakelock_held(i915);
1002         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1003
1004         /* Clear any previous failed attempts at recovery. Time to try again. */
1005         if (!__i915_gem_unset_wedged(i915))
1006                 return;
1007
1008         if (reason)
1009                 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
1010         error->reset_count++;
1011
1012         reset_prepare(i915);
1013
1014         if (!intel_has_gpu_reset(i915)) {
1015                 if (i915_modparams.reset)
1016                         dev_err(i915->drm.dev, "GPU reset not supported\n");
1017                 else
1018                         DRM_DEBUG_DRIVER("GPU reset disabled\n");
1019                 goto error;
1020         }
1021
1022         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1023                 intel_runtime_pm_disable_interrupts(i915);
1024
1025         if (do_reset(i915, stalled_mask)) {
1026                 dev_err(i915->drm.dev, "Failed to reset chip\n");
1027                 goto taint;
1028         }
1029
1030         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1031                 intel_runtime_pm_enable_interrupts(i915);
1032
1033         intel_overlay_reset(i915);
1034
1035         /*
1036          * Next we need to restore the context, but we don't use those
1037          * yet either...
1038          *
1039          * Ring buffer needs to be re-initialized in the KMS case, or if X
1040          * was running at the time of the reset (i.e. we weren't VT
1041          * switched away).
1042          */
1043         ret = i915_gem_init_hw(i915);
1044         if (ret) {
1045                 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1046                           ret);
1047                 goto error;
1048         }
1049
1050         i915_queue_hangcheck(i915);
1051
1052 finish:
1053         reset_finish(i915);
1054         if (!__i915_wedged(error))
1055                 reset_restart(i915);
1056         return;
1057
1058 taint:
1059         /*
1060          * History tells us that if we cannot reset the GPU now, we
1061          * never will. This then impacts everything that is run
1062          * subsequently. On failing the reset, we mark the driver
1063          * as wedged, preventing further execution on the GPU.
1064          * We also want to go one step further and add a taint to the
1065          * kernel so that any subsequent faults can be traced back to
1066          * this failure. This is important for CI, where if the
1067          * GPU/driver fails we would like to reboot and restart testing
1068          * rather than continue on into oblivion. For everyone else,
1069          * the system should still plod along, but they have been warned!
1070          */
1071         add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1072 error:
1073         __i915_gem_set_wedged(i915);
1074         goto finish;
1075 }
1076
1077 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
1078                                         struct intel_engine_cs *engine)
1079 {
1080         return intel_gpu_reset(i915, engine->mask);
1081 }
1082
1083 /**
1084  * i915_reset_engine - reset GPU engine to recover from a hang
1085  * @engine: engine to reset
1086  * @msg: reason for GPU reset; or NULL for no dev_notice()
1087  *
1088  * Reset a specific GPU engine. Useful if a hang is detected.
1089  * Returns zero on successful reset or otherwise an error code.
1090  *
1091  * Procedure is:
1092  *  - identifies the request that caused the hang and it is dropped
1093  *  - reset engine (which will force the engine to idle)
1094  *  - re-init/configure engine
1095  */
1096 int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1097 {
1098         struct i915_gpu_error *error = &engine->i915->gpu_error;
1099         int ret;
1100
1101         GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1102         GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1103
1104         reset_prepare_engine(engine);
1105
1106         if (msg)
1107                 dev_notice(engine->i915->drm.dev,
1108                            "Resetting %s for %s\n", engine->name, msg);
1109         error->reset_engine_count[engine->id]++;
1110
1111         if (!engine->i915->guc.execbuf_client)
1112                 ret = intel_gt_reset_engine(engine->i915, engine);
1113         else
1114                 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
1115         if (ret) {
1116                 /* If we fail here, we expect to fallback to a global reset */
1117                 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1118                                  engine->i915->guc.execbuf_client ? "GuC " : "",
1119                                  engine->name, ret);
1120                 goto out;
1121         }
1122
1123         /*
1124          * The request that caused the hang is stuck on elsp, we know the
1125          * active request and can drop it, adjust head to skip the offending
1126          * request to resume executing remaining requests in the queue.
1127          */
1128         intel_engine_reset(engine, true);
1129
1130         /*
1131          * The engine and its registers (and workarounds in case of render)
1132          * have been reset to their default values. Follow the init_ring
1133          * process to program RING_MODE, HWSP and re-enable submission.
1134          */
1135         ret = engine->init_hw(engine);
1136         if (ret)
1137                 goto out;
1138
1139 out:
1140         intel_engine_cancel_stop_cs(engine);
1141         reset_finish_engine(engine);
1142         return ret;
1143 }
1144
1145 static void i915_reset_device(struct drm_i915_private *i915,
1146                               u32 engine_mask,
1147                               const char *reason)
1148 {
1149         struct i915_gpu_error *error = &i915->gpu_error;
1150         struct kobject *kobj = &i915->drm.primary->kdev->kobj;
1151         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1152         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1153         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1154         struct i915_wedge_me w;
1155
1156         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1157
1158         DRM_DEBUG_DRIVER("resetting chip\n");
1159         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1160
1161         /* Use a watchdog to ensure that our reset completes */
1162         i915_wedge_on_timeout(&w, i915, 5 * HZ) {
1163                 intel_prepare_reset(i915);
1164
1165                 /* Flush everyone using a resource about to be clobbered */
1166                 synchronize_srcu_expedited(&error->reset_backoff_srcu);
1167
1168                 mutex_lock(&error->wedge_mutex);
1169                 i915_reset(i915, engine_mask, reason);
1170                 mutex_unlock(&error->wedge_mutex);
1171
1172                 intel_finish_reset(i915);
1173         }
1174
1175         if (!test_bit(I915_WEDGED, &error->flags))
1176                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1177 }
1178
1179 static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
1180 {
1181         I915_WRITE(reg, I915_READ(reg));
1182 }
1183
1184 void i915_clear_error_registers(struct drm_i915_private *dev_priv)
1185 {
1186         u32 eir;
1187
1188         if (!IS_GEN(dev_priv, 2))
1189                 clear_register(dev_priv, PGTBL_ER);
1190
1191         if (INTEL_GEN(dev_priv) < 4)
1192                 clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
1193         else
1194                 clear_register(dev_priv, IPEIR_I965);
1195
1196         clear_register(dev_priv, EIR);
1197         eir = I915_READ(EIR);
1198         if (eir) {
1199                 /*
1200                  * some errors might have become stuck,
1201                  * mask them.
1202                  */
1203                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1204                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1205                 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
1206         }
1207
1208         if (INTEL_GEN(dev_priv) >= 8) {
1209                 I915_WRITE(GEN8_RING_FAULT_REG,
1210                            I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
1211                 POSTING_READ(GEN8_RING_FAULT_REG);
1212         } else if (INTEL_GEN(dev_priv) >= 6) {
1213                 struct intel_engine_cs *engine;
1214                 enum intel_engine_id id;
1215
1216                 for_each_engine(engine, dev_priv, id) {
1217                         I915_WRITE(RING_FAULT_REG(engine),
1218                                    I915_READ(RING_FAULT_REG(engine)) &
1219                                    ~RING_FAULT_VALID);
1220                 }
1221                 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
1222         }
1223 }
1224
1225 /**
1226  * i915_handle_error - handle a gpu error
1227  * @i915: i915 device private
1228  * @engine_mask: mask representing engines that are hung
1229  * @flags: control flags
1230  * @fmt: Error message format string
1231  *
1232  * Do some basic checking of register state at error time and
1233  * dump it to the syslog.  Also call i915_capture_error_state() to make
1234  * sure we get a record and make it available in debugfs.  Fire a uevent
1235  * so userspace knows something bad happened (should trigger collection
1236  * of a ring dump etc.).
1237  */
1238 void i915_handle_error(struct drm_i915_private *i915,
1239                        intel_engine_mask_t engine_mask,
1240                        unsigned long flags,
1241                        const char *fmt, ...)
1242 {
1243         struct i915_gpu_error *error = &i915->gpu_error;
1244         struct intel_engine_cs *engine;
1245         intel_wakeref_t wakeref;
1246         intel_engine_mask_t tmp;
1247         char error_msg[80];
1248         char *msg = NULL;
1249
1250         if (fmt) {
1251                 va_list args;
1252
1253                 va_start(args, fmt);
1254                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1255                 va_end(args);
1256
1257                 msg = error_msg;
1258         }
1259
1260         /*
1261          * In most cases it's guaranteed that we get here with an RPM
1262          * reference held, for example because there is a pending GPU
1263          * request that won't finish until the reset is done. This
1264          * isn't the case at least when we get here by doing a
1265          * simulated reset via debugfs, so get an RPM reference.
1266          */
1267         wakeref = intel_runtime_pm_get(i915);
1268
1269         engine_mask &= INTEL_INFO(i915)->engine_mask;
1270
1271         if (flags & I915_ERROR_CAPTURE) {
1272                 i915_capture_error_state(i915, engine_mask, msg);
1273                 i915_clear_error_registers(i915);
1274         }
1275
1276         /*
1277          * Try engine reset when available. We fall back to full reset if
1278          * single reset fails.
1279          */
1280         if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1281                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
1282                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1283                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1284                                              &error->flags))
1285                                 continue;
1286
1287                         if (i915_reset_engine(engine, msg) == 0)
1288                                 engine_mask &= ~engine->mask;
1289
1290                         clear_bit(I915_RESET_ENGINE + engine->id,
1291                                   &error->flags);
1292                         wake_up_bit(&error->flags,
1293                                     I915_RESET_ENGINE + engine->id);
1294                 }
1295         }
1296
1297         if (!engine_mask)
1298                 goto out;
1299
1300         /* Full reset needs the mutex, stop any other user trying to do so. */
1301         if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
1302                 wait_event(error->reset_queue,
1303                            !test_bit(I915_RESET_BACKOFF, &error->flags));
1304                 goto out; /* piggy-back on the other reset */
1305         }
1306
1307         /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1308         synchronize_rcu_expedited();
1309
1310         /* Prevent any other reset-engine attempt. */
1311         for_each_engine(engine, i915, tmp) {
1312                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1313                                         &error->flags))
1314                         wait_on_bit(&error->flags,
1315                                     I915_RESET_ENGINE + engine->id,
1316                                     TASK_UNINTERRUPTIBLE);
1317         }
1318
1319         i915_reset_device(i915, engine_mask, msg);
1320
1321         for_each_engine(engine, i915, tmp) {
1322                 clear_bit(I915_RESET_ENGINE + engine->id,
1323                           &error->flags);
1324         }
1325
1326         clear_bit(I915_RESET_BACKOFF, &error->flags);
1327         wake_up_all(&error->reset_queue);
1328
1329 out:
1330         intel_runtime_pm_put(i915, wakeref);
1331 }
1332
1333 int i915_reset_trylock(struct drm_i915_private *i915)
1334 {
1335         struct i915_gpu_error *error = &i915->gpu_error;
1336         int srcu;
1337
1338         might_lock(&error->reset_backoff_srcu);
1339         might_sleep();
1340
1341         rcu_read_lock();
1342         while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
1343                 rcu_read_unlock();
1344
1345                 if (wait_event_interruptible(error->reset_queue,
1346                                              !test_bit(I915_RESET_BACKOFF,
1347                                                        &error->flags)))
1348                         return -EINTR;
1349
1350                 rcu_read_lock();
1351         }
1352         srcu = srcu_read_lock(&error->reset_backoff_srcu);
1353         rcu_read_unlock();
1354
1355         return srcu;
1356 }
1357
1358 void i915_reset_unlock(struct drm_i915_private *i915, int tag)
1359 __releases(&i915->gpu_error.reset_backoff_srcu)
1360 {
1361         struct i915_gpu_error *error = &i915->gpu_error;
1362
1363         srcu_read_unlock(&error->reset_backoff_srcu, tag);
1364 }
1365
1366 int i915_terminally_wedged(struct drm_i915_private *i915)
1367 {
1368         struct i915_gpu_error *error = &i915->gpu_error;
1369
1370         might_sleep();
1371
1372         if (!__i915_wedged(error))
1373                 return 0;
1374
1375         /* Reset still in progress? Maybe we will recover? */
1376         if (!test_bit(I915_RESET_BACKOFF, &error->flags))
1377                 return -EIO;
1378
1379         /* XXX intel_reset_finish() still takes struct_mutex!!! */
1380         if (mutex_is_locked(&i915->drm.struct_mutex))
1381                 return -EAGAIN;
1382
1383         if (wait_event_interruptible(error->reset_queue,
1384                                      !test_bit(I915_RESET_BACKOFF,
1385                                                &error->flags)))
1386                 return -EINTR;
1387
1388         return __i915_wedged(error) ? -EIO : 0;
1389 }
1390
1391 bool i915_reset_flush(struct drm_i915_private *i915)
1392 {
1393         int err;
1394
1395         cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
1396
1397         flush_workqueue(i915->wq);
1398         GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
1399
1400         mutex_lock(&i915->drm.struct_mutex);
1401         err = i915_gem_wait_for_idle(i915,
1402                                      I915_WAIT_LOCKED |
1403                                      I915_WAIT_FOR_IDLE_BOOST,
1404                                      MAX_SCHEDULE_TIMEOUT);
1405         mutex_unlock(&i915->drm.struct_mutex);
1406
1407         return !err;
1408 }
1409
1410 static void i915_wedge_me(struct work_struct *work)
1411 {
1412         struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
1413
1414         dev_err(w->i915->drm.dev,
1415                 "%s timed out, cancelling all in-flight rendering.\n",
1416                 w->name);
1417         i915_gem_set_wedged(w->i915);
1418 }
1419
1420 void __i915_init_wedge(struct i915_wedge_me *w,
1421                        struct drm_i915_private *i915,
1422                        long timeout,
1423                        const char *name)
1424 {
1425         w->i915 = i915;
1426         w->name = name;
1427
1428         INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
1429         schedule_delayed_work(&w->work, timeout);
1430 }
1431
1432 void __i915_fini_wedge(struct i915_wedge_me *w)
1433 {
1434         cancel_delayed_work_sync(&w->work);
1435         destroy_delayed_work_on_stack(&w->work);
1436         w->i915 = NULL;
1437 }