2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
32 static void irq_enable(struct intel_engine_cs *engine)
34 if (!engine->irq_enable)
37 /* Caller disables interrupts */
38 spin_lock(&engine->i915->irq_lock);
39 engine->irq_enable(engine);
40 spin_unlock(&engine->i915->irq_lock);
43 static void irq_disable(struct intel_engine_cs *engine)
45 if (!engine->irq_disable)
48 /* Caller disables interrupts */
49 spin_lock(&engine->i915->irq_lock);
50 engine->irq_disable(engine);
51 spin_unlock(&engine->i915->irq_lock);
54 static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
56 lockdep_assert_held(&b->irq_lock);
58 GEM_BUG_ON(!b->irq_enabled);
59 if (!--b->irq_enabled)
60 irq_disable(container_of(b,
61 struct intel_engine_cs,
67 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
69 struct intel_breadcrumbs *b = &engine->breadcrumbs;
74 spin_lock_irq(&b->irq_lock);
76 __intel_breadcrumbs_disarm_irq(b);
77 spin_unlock_irq(&b->irq_lock);
80 static inline bool __request_completed(const struct i915_request *rq)
82 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
85 bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
87 struct intel_breadcrumbs *b = &engine->breadcrumbs;
88 struct intel_context *ce, *cn;
89 struct list_head *pos, *next;
92 spin_lock(&b->irq_lock);
94 if (b->irq_armed && list_empty(&b->signalers))
95 __intel_breadcrumbs_disarm_irq(b);
97 list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
98 GEM_BUG_ON(list_empty(&ce->signals));
100 list_for_each_safe(pos, next, &ce->signals) {
101 struct i915_request *rq =
102 list_entry(pos, typeof(*rq), signal_link);
104 if (!__request_completed(rq))
107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
111 * Queue for execution after dropping the signaling
112 * spinlock as the callback chain may end up adding
113 * more signalers to the same context or engine.
115 i915_request_get(rq);
118 * We may race with direct invocation of
119 * dma_fence_signal(), e.g. i915_request_retire(),
120 * so we need to acquire our reference to the request
121 * before we cancel the breadcrumb.
123 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
124 list_add_tail(&rq->signal_link, &signal);
128 * We process the list deletion in bulk, only using a list_add
129 * (not list_move) above but keeping the status of
130 * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
132 if (!list_is_first(pos, &ce->signals)) {
133 /* Advance the list to the first incomplete request */
134 __list_del_many(&ce->signals, pos);
135 if (&ce->signals == pos) /* now empty */
136 list_del_init(&ce->signal_link);
140 spin_unlock(&b->irq_lock);
142 list_for_each_safe(pos, next, &signal) {
143 struct i915_request *rq =
144 list_entry(pos, typeof(*rq), signal_link);
146 dma_fence_signal(&rq->fence);
147 i915_request_put(rq);
150 return !list_empty(&signal);
153 bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
158 result = intel_engine_breadcrumbs_irq(engine);
164 static void signal_irq_work(struct irq_work *work)
166 struct intel_engine_cs *engine =
167 container_of(work, typeof(*engine), breadcrumbs.irq_work);
169 intel_engine_breadcrumbs_irq(engine);
172 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
174 struct intel_breadcrumbs *b = &engine->breadcrumbs;
176 spin_lock_irq(&b->irq_lock);
177 if (!b->irq_enabled++)
179 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
180 spin_unlock_irq(&b->irq_lock);
183 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
185 struct intel_breadcrumbs *b = &engine->breadcrumbs;
187 spin_lock_irq(&b->irq_lock);
188 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
189 if (!--b->irq_enabled)
191 spin_unlock_irq(&b->irq_lock);
194 static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
196 struct intel_engine_cs *engine =
197 container_of(b, struct intel_engine_cs, breadcrumbs);
199 lockdep_assert_held(&b->irq_lock);
204 * The breadcrumb irq will be disarmed on the interrupt after the
205 * waiters are signaled. This gives us a single interrupt window in
206 * which we can add a new waiter and avoid the cost of re-enabling
212 * Since we are waiting on a request, the GPU should be busy
213 * and should have its own rpm reference. This is tracked
214 * by i915->gt.awake, we can forgo holding our own wakref
215 * for the interrupt as before i915->gt.awake is released (when
216 * the driver is idle) we disarm the breadcrumbs.
219 if (!b->irq_enabled++)
223 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
225 struct intel_breadcrumbs *b = &engine->breadcrumbs;
227 spin_lock_init(&b->irq_lock);
228 INIT_LIST_HEAD(&b->signalers);
230 init_irq_work(&b->irq_work, signal_irq_work);
233 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
235 struct intel_breadcrumbs *b = &engine->breadcrumbs;
238 spin_lock_irqsave(&b->irq_lock, flags);
245 spin_unlock_irqrestore(&b->irq_lock, flags);
248 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
252 bool i915_request_enable_breadcrumb(struct i915_request *rq)
254 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
256 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
258 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
261 spin_lock(&b->irq_lock);
262 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
263 !__request_completed(rq)) {
264 struct intel_context *ce = rq->hw_context;
265 struct list_head *pos;
267 __intel_breadcrumbs_arm_irq(b);
270 * We keep the seqno in retirement order, so we can break
271 * inside intel_engine_breadcrumbs_irq as soon as we've passed
272 * the last completed request (or seen a request that hasn't
273 * event started). We could iterate the timeline->requests list,
274 * but keeping a separate signalers_list has the advantage of
275 * hopefully being much smaller than the full list and so
276 * provides faster iteration and detection when there are no
277 * more interrupts required for this context.
279 * We typically expect to add new signalers in order, so we
280 * start looking for our insertion point from the tail of
283 list_for_each_prev(pos, &ce->signals) {
284 struct i915_request *it =
285 list_entry(pos, typeof(*it), signal_link);
287 if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
290 list_add(&rq->signal_link, pos);
291 if (pos == &ce->signals) /* catch transitions from empty list */
292 list_move_tail(&ce->signal_link, &b->signalers);
294 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
296 spin_unlock(&b->irq_lock);
298 return !__request_completed(rq);
301 void i915_request_cancel_breadcrumb(struct i915_request *rq)
303 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
305 if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
308 spin_lock(&b->irq_lock);
309 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
310 struct intel_context *ce = rq->hw_context;
312 list_del(&rq->signal_link);
313 if (list_empty(&ce->signals))
314 list_del_init(&ce->signal_link);
316 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
318 spin_unlock(&b->irq_lock);
321 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
322 struct drm_printer *p)
324 struct intel_breadcrumbs *b = &engine->breadcrumbs;
325 struct intel_context *ce;
326 struct i915_request *rq;
328 if (list_empty(&b->signalers))
331 drm_printf(p, "Signals:\n");
333 spin_lock_irq(&b->irq_lock);
334 list_for_each_entry(ce, &b->signalers, signal_link) {
335 list_for_each_entry(rq, &ce->signals, signal_link) {
336 drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
337 rq->fence.context, rq->fence.seqno,
338 i915_request_completed(rq) ? "!" :
339 i915_request_started(rq) ? "*" :
341 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
344 spin_unlock_irq(&b->irq_lock);