2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_reset.h"
32 enum intel_engine_hangcheck_action action;
33 unsigned long action_timestamp;
35 struct intel_instdone instdone;
40 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
42 u32 tmp = current_instdone | *old_instdone;
45 unchanged = tmp == *old_instdone;
51 static bool subunits_stuck(struct intel_engine_cs *engine)
53 struct drm_i915_private *dev_priv = engine->i915;
54 struct intel_instdone instdone;
55 struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
60 if (engine->id != RCS0)
63 intel_engine_get_instdone(engine, &instdone);
65 /* There might be unstable subunit states even when
66 * actual head is not moving. Filter out the unstable ones by
67 * accumulating the undone -> done transitions and only
68 * consider those as progress.
70 stuck = instdone_unchanged(instdone.instdone,
71 &accu_instdone->instdone);
72 stuck &= instdone_unchanged(instdone.slice_common,
73 &accu_instdone->slice_common);
75 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
76 stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
77 &accu_instdone->sampler[slice][subslice]);
78 stuck &= instdone_unchanged(instdone.row[slice][subslice],
79 &accu_instdone->row[slice][subslice]);
85 static enum intel_engine_hangcheck_action
86 head_stuck(struct intel_engine_cs *engine, u64 acthd)
88 if (acthd != engine->hangcheck.acthd) {
90 /* Clear subunit states on head movement */
91 memset(&engine->hangcheck.instdone, 0,
92 sizeof(engine->hangcheck.instdone));
94 return ENGINE_ACTIVE_HEAD;
97 if (!subunits_stuck(engine))
98 return ENGINE_ACTIVE_SUBUNITS;
103 static enum intel_engine_hangcheck_action
104 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
106 struct drm_i915_private *dev_priv = engine->i915;
107 enum intel_engine_hangcheck_action ha;
110 ha = head_stuck(engine, acthd);
111 if (ha != ENGINE_DEAD)
114 if (IS_GEN(dev_priv, 2))
117 /* Is the chip hanging on a WAIT_FOR_EVENT?
118 * If so we can simply poke the RB_WAIT bit
119 * and break the hang. This should work on
120 * all but the second generation chipsets.
122 tmp = ENGINE_READ(engine, RING_CTL);
123 if (tmp & RING_WAIT) {
124 i915_handle_error(dev_priv, engine->mask, 0,
125 "stuck wait on %s", engine->name);
126 ENGINE_WRITE(engine, RING_CTL, tmp);
127 return ENGINE_WAIT_KICK;
133 static void hangcheck_load_sample(struct intel_engine_cs *engine,
134 struct hangcheck *hc)
136 hc->acthd = intel_engine_get_active_head(engine);
137 hc->ring = ENGINE_READ(engine, RING_START);
138 hc->head = ENGINE_READ(engine, RING_HEAD);
141 static void hangcheck_store_sample(struct intel_engine_cs *engine,
142 const struct hangcheck *hc)
144 engine->hangcheck.acthd = hc->acthd;
145 engine->hangcheck.last_ring = hc->ring;
146 engine->hangcheck.last_head = hc->head;
149 static enum intel_engine_hangcheck_action
150 hangcheck_get_action(struct intel_engine_cs *engine,
151 const struct hangcheck *hc)
153 if (intel_engine_is_idle(engine))
156 if (engine->hangcheck.last_ring != hc->ring)
157 return ENGINE_ACTIVE_SEQNO;
159 if (engine->hangcheck.last_head != hc->head)
160 return ENGINE_ACTIVE_SEQNO;
162 return engine_stuck(engine, hc->acthd);
165 static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
166 struct hangcheck *hc)
168 unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
170 hc->action = hangcheck_get_action(engine, hc);
172 /* We always increment the progress
173 * if the engine is busy and still processing
174 * the same request, so that no single request
175 * can run indefinitely (such as a chain of
176 * batches). The only time we do not increment
177 * the hangcheck score on this ring, if this
178 * engine is in a legitimate wait for another
179 * engine. In that case the waiting engine is a
180 * victim and we want to be sure we catch the
181 * right culprit. Then every time we do kick
182 * the ring, make it as a progress as the seqno
183 * advancement might ensure and if not, it
184 * will catch the hanging engine.
187 switch (hc->action) {
189 case ENGINE_ACTIVE_SEQNO:
190 /* Clear head and subunit states on seqno movement */
193 memset(&engine->hangcheck.instdone, 0,
194 sizeof(engine->hangcheck.instdone));
196 /* Intentional fall through */
197 case ENGINE_WAIT_KICK:
199 engine->hangcheck.action_timestamp = jiffies;
202 case ENGINE_ACTIVE_HEAD:
203 case ENGINE_ACTIVE_SUBUNITS:
205 * Seqno stuck with still active engine gets leeway,
206 * in hopes that it is just a long shader.
208 timeout = I915_SEQNO_DEAD_TIMEOUT;
215 MISSING_CASE(hc->action);
218 hc->stalled = time_after(jiffies,
219 engine->hangcheck.action_timestamp + timeout);
220 hc->wedged = time_after(jiffies,
221 engine->hangcheck.action_timestamp +
222 I915_ENGINE_WEDGED_TIMEOUT);
225 static void hangcheck_declare_hang(struct drm_i915_private *i915,
229 struct intel_engine_cs *engine;
230 intel_engine_mask_t tmp;
234 /* If some rings hung but others were still busy, only
235 * blame the hanging rings in the synopsis.
239 len = scnprintf(msg, sizeof(msg),
240 "%s on ", stuck == hung ? "no progress" : "hang");
241 for_each_engine_masked(engine, i915, hung, tmp)
242 len += scnprintf(msg + len, sizeof(msg) - len,
243 "%s, ", engine->name);
246 return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
250 * This is called when the chip hasn't reported back with completed
251 * batchbuffers in a long time. We keep track per ring seqno progress and
252 * if there are no progress, hangcheck score for that ring is increased.
253 * Further, acthd is inspected to see if the ring is stuck. On stuck case
254 * we kick the ring. If we see no progress on three subsequent calls
255 * we assume chip is wedged and try to fix it by resetting the chip.
257 static void i915_hangcheck_elapsed(struct work_struct *work)
259 struct drm_i915_private *dev_priv =
260 container_of(work, typeof(*dev_priv),
261 gpu_error.hangcheck_work.work);
262 struct intel_engine_cs *engine;
263 enum intel_engine_id id;
264 unsigned int hung = 0, stuck = 0, wedged = 0;
265 intel_wakeref_t wakeref;
267 if (!i915_modparams.enable_hangcheck)
270 if (!READ_ONCE(dev_priv->gt.awake))
273 if (i915_terminally_wedged(dev_priv))
276 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
280 /* As enabling the GPU requires fairly extensive mmio access,
281 * periodically arm the mmio checker to see if we are triggering
282 * any invalid access.
284 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
286 for_each_engine(engine, dev_priv, id) {
289 intel_engine_signal_breadcrumbs(engine);
291 hangcheck_load_sample(engine, &hc);
292 hangcheck_accumulate_sample(engine, &hc);
293 hangcheck_store_sample(engine, &hc);
296 hung |= engine->mask;
297 if (hc.action != ENGINE_DEAD)
298 stuck |= engine->mask;
302 wedged |= engine->mask;
305 if (GEM_SHOW_DEBUG() && (hung | stuck)) {
306 struct drm_printer p = drm_debug_printer("hangcheck");
308 for_each_engine(engine, dev_priv, id) {
309 if (intel_engine_is_idle(engine))
312 intel_engine_dump(engine, &p, "%s\n", engine->name);
317 dev_err(dev_priv->drm.dev,
318 "GPU recovery timed out,"
319 " cancelling all in-flight rendering.\n");
321 i915_gem_set_wedged(dev_priv);
325 hangcheck_declare_hang(dev_priv, hung, stuck);
327 intel_runtime_pm_put(dev_priv, wakeref);
329 /* Reset timer in case GPU hangs without another request being added */
330 i915_queue_hangcheck(dev_priv);
333 void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
335 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
336 engine->hangcheck.action_timestamp = jiffies;
339 void intel_hangcheck_init(struct drm_i915_private *i915)
341 INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
342 i915_hangcheck_elapsed);
345 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
346 #include "selftest_hangcheck.c"