2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
32 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
34 struct intel_wait *wait;
35 unsigned int result = 0;
37 lockdep_assert_held(&b->irq_lock);
42 * N.B. Since task_asleep() and ttwu are not atomic, the
43 * waiter may actually go to sleep after the check, causing
44 * us to suppress a valid wakeup. We prefer to reduce the
45 * number of false positive missed_breadcrumb() warnings
46 * at the expense of a few false negatives, as it it easy
47 * to trigger a false positive under heavy load. Enough
48 * signal should remain from genuine missed_breadcrumb()
49 * for us to detect in CI.
51 bool was_asleep = task_asleep(wait->tsk);
53 result = ENGINE_WAKEUP_WAITER;
54 if (wake_up_process(wait->tsk) && was_asleep)
55 result |= ENGINE_WAKEUP_ASLEEP;
61 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
63 struct intel_breadcrumbs *b = &engine->breadcrumbs;
67 spin_lock_irqsave(&b->irq_lock, flags);
68 result = __intel_breadcrumbs_wakeup(b);
69 spin_unlock_irqrestore(&b->irq_lock, flags);
74 static unsigned long wait_timeout(void)
76 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
79 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
81 if (GEM_SHOW_DEBUG()) {
82 struct drm_printer p = drm_debug_printer(__func__);
84 intel_engine_dump(engine, &p,
85 "%s missed breadcrumb at %pS\n",
86 engine->name, __builtin_return_address(0));
89 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
92 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
94 struct intel_engine_cs *engine =
95 from_timer(engine, t, breadcrumbs.hangcheck);
96 struct intel_breadcrumbs *b = &engine->breadcrumbs;
97 unsigned int irq_count;
102 irq_count = READ_ONCE(b->irq_count);
103 if (b->hangcheck_interrupts != irq_count) {
104 b->hangcheck_interrupts = irq_count;
105 mod_timer(&b->hangcheck, wait_timeout());
109 /* We keep the hangcheck timer alive until we disarm the irq, even
110 * if there are no waiters at present.
112 * If the waiter was currently running, assume it hasn't had a chance
113 * to process the pending interrupt (e.g, low priority task on a loaded
114 * system) and wait until it sleeps before declaring a missed interrupt.
116 * If the waiter was asleep (and not even pending a wakeup), then we
117 * must have missed an interrupt as the GPU has stopped advancing
118 * but we still have a waiter. Assuming all batches complete within
119 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
121 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
122 missed_breadcrumb(engine);
123 mod_timer(&b->fake_irq, jiffies + 1);
125 mod_timer(&b->hangcheck, wait_timeout());
129 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
131 struct intel_engine_cs *engine =
132 from_timer(engine, t, breadcrumbs.fake_irq);
133 struct intel_breadcrumbs *b = &engine->breadcrumbs;
136 * The timer persists in case we cannot enable interrupts,
137 * or if we have previously seen seqno/interrupt incoherency
138 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
139 * Here the worker will wake up every jiffie in order to kick the
140 * oldest waiter to do the coherent seqno check.
143 spin_lock_irq(&b->irq_lock);
144 if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
145 __intel_engine_disarm_breadcrumbs(engine);
146 spin_unlock_irq(&b->irq_lock);
150 /* If the user has disabled the fake-irq, restore the hangchecking */
151 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
152 mod_timer(&b->hangcheck, wait_timeout());
156 mod_timer(&b->fake_irq, jiffies + 1);
159 static void irq_enable(struct intel_engine_cs *engine)
162 * FIXME: Ideally we want this on the API boundary, but for the
163 * sake of testing with mock breadcrumbs (no HW so unable to
164 * enable irqs) we place it deep within the bowels, at the point
167 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
169 /* Caller disables interrupts */
170 if (engine->irq_enable) {
171 spin_lock(&engine->i915->irq_lock);
172 engine->irq_enable(engine);
173 spin_unlock(&engine->i915->irq_lock);
177 static void irq_disable(struct intel_engine_cs *engine)
179 /* Caller disables interrupts */
180 if (engine->irq_disable) {
181 spin_lock(&engine->i915->irq_lock);
182 engine->irq_disable(engine);
183 spin_unlock(&engine->i915->irq_lock);
187 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
189 struct intel_breadcrumbs *b = &engine->breadcrumbs;
191 lockdep_assert_held(&b->irq_lock);
192 GEM_BUG_ON(b->irq_wait);
193 GEM_BUG_ON(!b->irq_armed);
195 GEM_BUG_ON(!b->irq_enabled);
196 if (!--b->irq_enabled)
199 b->irq_armed = false;
202 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
204 struct intel_breadcrumbs *b = &engine->breadcrumbs;
206 spin_lock_irq(&b->irq_lock);
207 if (!b->irq_enabled++)
209 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
210 spin_unlock_irq(&b->irq_lock);
213 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
215 struct intel_breadcrumbs *b = &engine->breadcrumbs;
217 spin_lock_irq(&b->irq_lock);
218 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
219 if (!--b->irq_enabled)
221 spin_unlock_irq(&b->irq_lock);
224 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
226 struct intel_breadcrumbs *b = &engine->breadcrumbs;
227 struct intel_wait *wait, *n;
233 * We only disarm the irq when we are idle (all requests completed),
234 * so if the bottom-half remains asleep, it missed the request
237 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
238 missed_breadcrumb(engine);
240 spin_lock_irq(&b->rb_lock);
242 spin_lock(&b->irq_lock);
245 __intel_engine_disarm_breadcrumbs(engine);
246 spin_unlock(&b->irq_lock);
248 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
249 GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
250 RB_CLEAR_NODE(&wait->node);
251 wake_up_process(wait->tsk);
253 b->waiters = RB_ROOT;
255 spin_unlock_irq(&b->rb_lock);
258 static bool use_fake_irq(const struct intel_breadcrumbs *b)
260 const struct intel_engine_cs *engine =
261 container_of(b, struct intel_engine_cs, breadcrumbs);
263 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
267 * Only start with the heavy weight fake irq timer if we have not
268 * seen any interrupts since enabling it the first time. If the
269 * interrupts are still arriving, it means we made a mistake in our
270 * engine->seqno_barrier(), a timing error that should be transient
271 * and unlikely to reoccur.
273 return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
276 static void enable_fake_irq(struct intel_breadcrumbs *b)
278 /* Ensure we never sleep indefinitely */
279 if (!b->irq_enabled || use_fake_irq(b))
280 mod_timer(&b->fake_irq, jiffies + 1);
282 mod_timer(&b->hangcheck, wait_timeout());
285 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
287 struct intel_engine_cs *engine =
288 container_of(b, struct intel_engine_cs, breadcrumbs);
289 struct drm_i915_private *i915 = engine->i915;
292 lockdep_assert_held(&b->irq_lock);
296 /* The breadcrumb irq will be disarmed on the interrupt after the
297 * waiters are signaled. This gives us a single interrupt window in
298 * which we can add a new waiter and avoid the cost of re-enabling
303 if (I915_SELFTEST_ONLY(b->mock)) {
304 /* For our mock objects we want to avoid interaction
305 * with the real hardware (which is not set up). So
306 * we simply pretend we have enabled the powerwell
307 * and the irq, and leave it up to the mock
308 * implementation to call intel_engine_wakeup()
309 * itself when it wants to simulate a user interrupt,
314 /* Since we are waiting on a request, the GPU should be busy
315 * and should have its own rpm reference. This is tracked
316 * by i915->gt.awake, we can forgo holding our own wakref
317 * for the interrupt as before i915->gt.awake is released (when
318 * the driver is idle) we disarm the breadcrumbs.
321 /* No interrupts? Kick the waiter every jiffie! */
323 if (!b->irq_enabled++ &&
324 !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
333 static inline struct intel_wait *to_wait(struct rb_node *node)
335 return rb_entry(node, struct intel_wait, node);
338 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
339 struct intel_wait *wait)
341 lockdep_assert_held(&b->rb_lock);
342 GEM_BUG_ON(b->irq_wait == wait);
345 * This request is completed, so remove it from the tree, mark it as
346 * complete, and *then* wake up the associated task. N.B. when the
347 * task wakes up, it will find the empty rb_node, discern that it
348 * has already been removed from the tree and skip the serialisation
349 * of the b->rb_lock and b->irq_lock. This means that the destruction
350 * of the intel_wait is not serialised with the interrupt handler
351 * by the waiter - it must instead be serialised by the caller.
353 rb_erase(&wait->node, &b->waiters);
354 RB_CLEAR_NODE(&wait->node);
356 if (wait->tsk->state != TASK_RUNNING)
357 wake_up_process(wait->tsk); /* implicit smp_wmb() */
360 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
361 struct rb_node *next)
363 struct intel_breadcrumbs *b = &engine->breadcrumbs;
365 spin_lock(&b->irq_lock);
366 GEM_BUG_ON(!b->irq_armed);
367 GEM_BUG_ON(!b->irq_wait);
368 b->irq_wait = to_wait(next);
369 spin_unlock(&b->irq_lock);
371 /* We always wake up the next waiter that takes over as the bottom-half
372 * as we may delegate not only the irq-seqno barrier to the next waiter
373 * but also the task of waking up concurrent waiters.
376 wake_up_process(to_wait(next)->tsk);
379 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
380 struct intel_wait *wait)
382 struct intel_breadcrumbs *b = &engine->breadcrumbs;
383 struct rb_node **p, *parent, *completed;
387 GEM_BUG_ON(!wait->seqno);
389 /* Insert the request into the retirement ordered list
390 * of waiters by walking the rbtree. If we are the oldest
391 * seqno in the tree (the first to be retired), then
392 * set ourselves as the bottom-half.
394 * As we descend the tree, prune completed branches since we hold the
395 * spinlock we know that the first_waiter must be delayed and can
396 * reduce some of the sequential wake up latency if we take action
397 * ourselves and wake up the completed tasks in parallel. Also, by
398 * removing stale elements in the tree, we may be able to reduce the
399 * ping-pong between the old bottom-half and ourselves as first-waiter.
405 seqno = intel_engine_get_seqno(engine);
407 /* If the request completed before we managed to grab the spinlock,
408 * return now before adding ourselves to the rbtree. We let the
409 * current bottom-half handle any pending wakeups and instead
410 * try and get out of the way quickly.
412 if (i915_seqno_passed(seqno, wait->seqno)) {
413 RB_CLEAR_NODE(&wait->node);
417 p = &b->waiters.rb_node;
420 if (wait->seqno == to_wait(parent)->seqno) {
421 /* We have multiple waiters on the same seqno, select
422 * the highest priority task (that with the smallest
423 * task->prio) to serve as the bottom-half for this
426 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
427 p = &parent->rb_right;
430 p = &parent->rb_left;
432 } else if (i915_seqno_passed(wait->seqno,
433 to_wait(parent)->seqno)) {
434 p = &parent->rb_right;
435 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
440 p = &parent->rb_left;
443 rb_link_node(&wait->node, parent, p);
444 rb_insert_color(&wait->node, &b->waiters);
447 spin_lock(&b->irq_lock);
449 /* After assigning ourselves as the new bottom-half, we must
450 * perform a cursory check to prevent a missed interrupt.
451 * Either we miss the interrupt whilst programming the hardware,
452 * or if there was a previous waiter (for a later seqno) they
453 * may be woken instead of us (due to the inherent race
454 * in the unlocked read of b->irq_seqno_bh in the irq handler)
455 * and so we miss the wake up.
457 armed = __intel_breadcrumbs_enable_irq(b);
458 spin_unlock(&b->irq_lock);
462 /* Advance the bottom-half (b->irq_wait) before we wake up
463 * the waiters who may scribble over their intel_wait
464 * just as the interrupt handler is dereferencing it via
468 struct rb_node *next = rb_next(completed);
469 GEM_BUG_ON(next == &wait->node);
470 __intel_breadcrumbs_next(engine, next);
474 struct intel_wait *crumb = to_wait(completed);
475 completed = rb_prev(completed);
476 __intel_breadcrumbs_finish(b, crumb);
480 GEM_BUG_ON(!b->irq_wait);
481 GEM_BUG_ON(!b->irq_armed);
482 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
487 bool intel_engine_add_wait(struct intel_engine_cs *engine,
488 struct intel_wait *wait)
490 struct intel_breadcrumbs *b = &engine->breadcrumbs;
493 spin_lock_irq(&b->rb_lock);
494 armed = __intel_engine_add_wait(engine, wait);
495 spin_unlock_irq(&b->rb_lock);
499 /* Make the caller recheck if its request has already started. */
500 return intel_engine_has_started(engine, wait->seqno);
503 static inline bool chain_wakeup(struct rb_node *rb, int priority)
505 return rb && to_wait(rb)->tsk->prio <= priority;
508 static inline int wakeup_priority(struct intel_breadcrumbs *b,
509 struct task_struct *tsk)
511 if (tsk == b->signaler)
517 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
518 struct intel_wait *wait)
520 struct intel_breadcrumbs *b = &engine->breadcrumbs;
522 lockdep_assert_held(&b->rb_lock);
524 if (RB_EMPTY_NODE(&wait->node))
527 if (b->irq_wait == wait) {
528 const int priority = wakeup_priority(b, wait->tsk);
529 struct rb_node *next;
531 /* We are the current bottom-half. Find the next candidate,
532 * the first waiter in the queue on the remaining oldest
533 * request. As multiple seqnos may complete in the time it
534 * takes us to wake up and find the next waiter, we have to
535 * wake up that waiter for it to perform its own coherent
538 next = rb_next(&wait->node);
539 if (chain_wakeup(next, priority)) {
540 /* If the next waiter is already complete,
541 * wake it up and continue onto the next waiter. So
542 * if have a small herd, they will wake up in parallel
543 * rather than sequentially, which should reduce
544 * the overall latency in waking all the completed
547 * However, waking up a chain adds extra latency to
548 * the first_waiter. This is undesirable if that
549 * waiter is a high priority task.
551 u32 seqno = intel_engine_get_seqno(engine);
553 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
554 struct rb_node *n = rb_next(next);
556 __intel_breadcrumbs_finish(b, to_wait(next));
558 if (!chain_wakeup(next, priority))
563 __intel_breadcrumbs_next(engine, next);
565 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
568 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
569 rb_erase(&wait->node, &b->waiters);
570 RB_CLEAR_NODE(&wait->node);
573 GEM_BUG_ON(b->irq_wait == wait);
574 GEM_BUG_ON(rb_first(&b->waiters) !=
575 (b->irq_wait ? &b->irq_wait->node : NULL));
578 void intel_engine_remove_wait(struct intel_engine_cs *engine,
579 struct intel_wait *wait)
581 struct intel_breadcrumbs *b = &engine->breadcrumbs;
583 /* Quick check to see if this waiter was already decoupled from
584 * the tree by the bottom-half to avoid contention on the spinlock
587 if (RB_EMPTY_NODE(&wait->node)) {
588 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
592 spin_lock_irq(&b->rb_lock);
593 __intel_engine_remove_wait(engine, wait);
594 spin_unlock_irq(&b->rb_lock);
597 static void signaler_set_rtpriority(void)
599 struct sched_param param = { .sched_priority = 1 };
601 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
604 static int intel_breadcrumbs_signaler(void *arg)
606 struct intel_engine_cs *engine = arg;
607 struct intel_breadcrumbs *b = &engine->breadcrumbs;
608 struct i915_request *rq, *n;
610 /* Install ourselves with high priority to reduce signalling latency */
611 signaler_set_rtpriority();
614 bool do_schedule = true;
618 set_current_state(TASK_INTERRUPTIBLE);
619 if (list_empty(&b->signals))
623 * We are either woken up by the interrupt bottom-half,
624 * or by a client adding a new signaller. In both cases,
625 * the GPU seqno may have advanced beyond our oldest signal.
626 * If it has, propagate the signal, remove the waiter and
627 * check again with the next oldest signal. Otherwise we
628 * need to wait for a new interrupt from the GPU or for
631 seqno = intel_engine_get_seqno(engine);
633 spin_lock_irq(&b->rb_lock);
634 list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
635 u32 this = rq->signaling.wait.seqno;
637 GEM_BUG_ON(!rq->signaling.wait.seqno);
639 if (!i915_seqno_passed(seqno, this))
642 if (likely(this == i915_request_global_seqno(rq))) {
643 __intel_engine_remove_wait(engine,
644 &rq->signaling.wait);
646 rq->signaling.wait.seqno = 0;
647 __list_del_entry(&rq->signaling.link);
649 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
651 list_add_tail(&rq->signaling.link,
653 i915_request_get(rq);
657 spin_unlock_irq(&b->rb_lock);
659 if (!list_empty(&list)) {
661 list_for_each_entry_safe(rq, n, &list, signaling.link) {
662 dma_fence_signal(&rq->fence);
663 GEM_BUG_ON(!i915_request_completed(rq));
664 i915_request_put(rq);
666 local_bh_enable(); /* kick start the tasklets */
669 * If the engine is saturated we may be continually
670 * processing completed requests. This angers the
671 * NMI watchdog if we never let anything else
672 * have access to the CPU. Let's pretend to be nice
673 * and relinquish the CPU if we burn through the
674 * entire RT timeslice!
676 do_schedule = need_resched();
679 if (unlikely(do_schedule)) {
681 if (kthread_should_park())
684 if (unlikely(kthread_should_stop()))
690 __set_current_state(TASK_RUNNING);
695 static void insert_signal(struct intel_breadcrumbs *b,
696 struct i915_request *request,
699 struct i915_request *iter;
701 lockdep_assert_held(&b->rb_lock);
704 * A reasonable assumption is that we are called to add signals
705 * in sequence, as the requests are submitted for execution and
706 * assigned a global_seqno. This will be the case for the majority
707 * of internally generated signals (inter-engine signaling).
709 * Out of order waiters triggering random signaling enabling will
710 * be more problematic, but hopefully rare enough and the list
711 * small enough that the O(N) insertion sort is not an issue.
714 list_for_each_entry_reverse(iter, &b->signals, signaling.link)
715 if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
718 list_add(&request->signaling.link, &iter->signaling.link);
721 bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
723 struct intel_engine_cs *engine = request->engine;
724 struct intel_breadcrumbs *b = &engine->breadcrumbs;
725 struct intel_wait *wait = &request->signaling.wait;
729 * Note that we may be called from an interrupt handler on another
730 * device (e.g. nouveau signaling a fence completion causing us
731 * to submit a request, and so enable signaling). As such,
732 * we need to make sure that all other users of b->rb_lock protect
733 * against interrupts, i.e. use spin_lock_irqsave.
736 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
737 GEM_BUG_ON(!irqs_disabled());
738 lockdep_assert_held(&request->lock);
740 seqno = i915_request_global_seqno(request);
741 if (!seqno) /* will be enabled later upon execution */
744 GEM_BUG_ON(wait->seqno);
745 wait->tsk = b->signaler;
746 wait->request = request;
750 * Add ourselves into the list of waiters, but registering our
751 * bottom-half as the signaller thread. As per usual, only the oldest
752 * waiter (not just signaller) is tasked as the bottom-half waking
753 * up all completed waiters after the user interrupt.
755 * If we are the oldest waiter, enable the irq (after which we
756 * must double check that the seqno did not complete).
758 spin_lock(&b->rb_lock);
759 insert_signal(b, request, seqno);
760 wakeup &= __intel_engine_add_wait(engine, wait);
761 spin_unlock(&b->rb_lock);
764 wake_up_process(b->signaler);
765 return !intel_wait_complete(wait);
771 void intel_engine_cancel_signaling(struct i915_request *request)
773 struct intel_engine_cs *engine = request->engine;
774 struct intel_breadcrumbs *b = &engine->breadcrumbs;
776 GEM_BUG_ON(!irqs_disabled());
777 lockdep_assert_held(&request->lock);
779 if (!READ_ONCE(request->signaling.wait.seqno))
782 spin_lock(&b->rb_lock);
783 __intel_engine_remove_wait(engine, &request->signaling.wait);
784 if (fetch_and_zero(&request->signaling.wait.seqno))
785 __list_del_entry(&request->signaling.link);
786 spin_unlock(&b->rb_lock);
789 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
791 struct intel_breadcrumbs *b = &engine->breadcrumbs;
792 struct task_struct *tsk;
794 spin_lock_init(&b->rb_lock);
795 spin_lock_init(&b->irq_lock);
797 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
798 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
800 INIT_LIST_HEAD(&b->signals);
802 /* Spawn a thread to provide a common bottom-half for all signals.
803 * As this is an asynchronous interface we cannot steal the current
804 * task for handling the bottom-half to the user interrupt, therefore
805 * we create a thread to do the coherent seqno dance after the
806 * interrupt and then signal the waitqueue (via the dma-buf/fence).
808 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
809 "i915/signal:%d", engine->id);
818 static void cancel_fake_irq(struct intel_engine_cs *engine)
820 struct intel_breadcrumbs *b = &engine->breadcrumbs;
822 del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
823 del_timer_sync(&b->hangcheck);
824 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
827 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
829 struct intel_breadcrumbs *b = &engine->breadcrumbs;
832 spin_lock_irqsave(&b->irq_lock, flags);
835 * Leave the fake_irq timer enabled (if it is running), but clear the
836 * bit so that it turns itself off on its next wake up and goes back
837 * to the long hangcheck interval if still required.
839 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
846 spin_unlock_irqrestore(&b->irq_lock, flags);
849 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
851 struct intel_breadcrumbs *b = &engine->breadcrumbs;
853 /* The engines should be idle and all requests accounted for! */
854 WARN_ON(READ_ONCE(b->irq_wait));
855 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
856 WARN_ON(!list_empty(&b->signals));
858 if (!IS_ERR_OR_NULL(b->signaler))
859 kthread_stop(b->signaler);
861 cancel_fake_irq(engine);
864 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
865 #include "selftests/intel_breadcrumbs.c"