2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/mutex.h>
10 #include "i915_request.h"
11 #include "i915_scheduler.h"
13 static DEFINE_SPINLOCK(schedule_lock);
15 static const struct i915_request *
16 node_to_request(const struct i915_sched_node *node)
18 return container_of(node, const struct i915_request, sched);
21 static inline bool node_signaled(const struct i915_sched_node *node)
23 return i915_request_completed(node_to_request(node));
26 void i915_sched_node_init(struct i915_sched_node *node)
28 INIT_LIST_HEAD(&node->signalers_list);
29 INIT_LIST_HEAD(&node->waiters_list);
30 INIT_LIST_HEAD(&node->link);
31 node->attr.priority = I915_PRIORITY_INVALID;
34 static struct i915_dependency *
35 i915_dependency_alloc(struct drm_i915_private *i915)
37 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
41 i915_dependency_free(struct drm_i915_private *i915,
42 struct i915_dependency *dep)
44 kmem_cache_free(i915->dependencies, dep);
47 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
48 struct i915_sched_node *signal,
49 struct i915_dependency *dep,
54 spin_lock(&schedule_lock);
56 if (!node_signaled(signal)) {
57 INIT_LIST_HEAD(&dep->dfs_link);
58 list_add(&dep->wait_link, &signal->waiters_list);
59 list_add(&dep->signal_link, &node->signalers_list);
60 dep->signaler = signal;
66 spin_unlock(&schedule_lock);
71 int i915_sched_node_add_dependency(struct drm_i915_private *i915,
72 struct i915_sched_node *node,
73 struct i915_sched_node *signal)
75 struct i915_dependency *dep;
77 dep = i915_dependency_alloc(i915);
81 if (!__i915_sched_node_add_dependency(node, signal, dep,
82 I915_DEPENDENCY_ALLOC))
83 i915_dependency_free(i915, dep);
88 void i915_sched_node_fini(struct drm_i915_private *i915,
89 struct i915_sched_node *node)
91 struct i915_dependency *dep, *tmp;
93 GEM_BUG_ON(!list_empty(&node->link));
95 spin_lock(&schedule_lock);
98 * Everyone we depended upon (the fences we wait to be signaled)
99 * should retire before us and remove themselves from our list.
100 * However, retirement is run independently on each timeline and
101 * so we may be called out-of-order.
103 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
104 GEM_BUG_ON(!node_signaled(dep->signaler));
105 GEM_BUG_ON(!list_empty(&dep->dfs_link));
107 list_del(&dep->wait_link);
108 if (dep->flags & I915_DEPENDENCY_ALLOC)
109 i915_dependency_free(i915, dep);
112 /* Remove ourselves from everyone who depends upon us */
113 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
114 GEM_BUG_ON(dep->signaler != node);
115 GEM_BUG_ON(!list_empty(&dep->dfs_link));
117 list_del(&dep->signal_link);
118 if (dep->flags & I915_DEPENDENCY_ALLOC)
119 i915_dependency_free(i915, dep);
122 spin_unlock(&schedule_lock);
125 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
127 return rb_entry(rb, struct i915_priolist, node);
130 static void assert_priolists(struct intel_engine_execlists * const execlists)
135 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
138 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
139 rb_first(&execlists->queue.rb_root));
141 last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
142 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
143 const struct i915_priolist *p = to_priolist(rb);
145 GEM_BUG_ON(p->priority >= last_prio);
146 last_prio = p->priority;
148 GEM_BUG_ON(!p->used);
149 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
150 if (list_empty(&p->requests[i]))
153 GEM_BUG_ON(!(p->used & BIT(i)));
159 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
161 struct intel_engine_execlists * const execlists = &engine->execlists;
162 struct i915_priolist *p;
163 struct rb_node **parent, *rb;
167 lockdep_assert_held(&engine->timeline.lock);
168 assert_priolists(execlists);
170 /* buckets sorted from highest [in slot 0] to lowest priority */
171 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
172 prio >>= I915_USER_PRIORITY_SHIFT;
173 if (unlikely(execlists->no_priolist))
174 prio = I915_PRIORITY_NORMAL;
177 /* most positive priority is scheduled first, equal priorities fifo */
179 parent = &execlists->queue.rb_root.rb_node;
183 if (prio > p->priority) {
184 parent = &rb->rb_left;
185 } else if (prio < p->priority) {
186 parent = &rb->rb_right;
193 if (prio == I915_PRIORITY_NORMAL) {
194 p = &execlists->default_priolist;
196 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
197 /* Convert an allocation failure to a priority bump */
199 prio = I915_PRIORITY_NORMAL; /* recurses just once */
201 /* To maintain ordering with all rendering, after an
202 * allocation failure we have to disable all scheduling.
203 * Requests will then be executed in fifo, and schedule
204 * will ensure that dependencies are emitted in fifo.
205 * There will be still some reordering with existing
206 * requests, so if userspace lied about their
207 * dependencies that reordering may be visible.
209 execlists->no_priolist = true;
215 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
216 INIT_LIST_HEAD(&p->requests[i]);
217 rb_link_node(&p->node, rb, parent);
218 rb_insert_color_cached(&p->node, &execlists->queue, first);
223 return &p->requests[idx];
226 static struct intel_engine_cs *
227 sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
229 struct intel_engine_cs *engine = node_to_request(node)->engine;
233 if (engine != locked) {
234 spin_unlock(&locked->timeline.lock);
235 spin_lock(&engine->timeline.lock);
241 static bool inflight(const struct i915_request *rq,
242 const struct intel_engine_cs *engine)
244 const struct i915_request *active;
246 if (!i915_request_is_active(rq))
249 active = port_request(engine->execlists.port);
250 return active->hw_context == rq->hw_context;
253 static void __i915_schedule(struct i915_request *rq,
254 const struct i915_sched_attr *attr)
256 struct list_head *uninitialized_var(pl);
257 struct intel_engine_cs *engine, *last;
258 struct i915_dependency *dep, *p;
259 struct i915_dependency stack;
260 const int prio = attr->priority;
263 /* Needed in order to use the temporary link inside i915_dependency */
264 lockdep_assert_held(&schedule_lock);
265 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
267 if (i915_request_completed(rq))
270 if (prio <= READ_ONCE(rq->sched.attr.priority))
273 stack.signaler = &rq->sched;
274 list_add(&stack.dfs_link, &dfs);
277 * Recursively bump all dependent priorities to match the new request.
279 * A naive approach would be to use recursion:
280 * static void update_priorities(struct i915_sched_node *node, prio) {
281 * list_for_each_entry(dep, &node->signalers_list, signal_link)
282 * update_priorities(dep->signal, prio)
283 * queue_request(node);
285 * but that may have unlimited recursion depth and so runs a very
286 * real risk of overunning the kernel stack. Instead, we build
287 * a flat list of all dependencies starting with the current request.
288 * As we walk the list of dependencies, we add all of its dependencies
289 * to the end of the list (this may include an already visited
290 * request) and continue to walk onwards onto the new dependencies. The
291 * end result is a topological list of requests in reverse order, the
292 * last element in the list is the request we must execute first.
294 list_for_each_entry(dep, &dfs, dfs_link) {
295 struct i915_sched_node *node = dep->signaler;
298 * Within an engine, there can be no cycle, but we may
299 * refer to the same dependency chain multiple times
300 * (redundant dependencies are not eliminated) and across
303 list_for_each_entry(p, &node->signalers_list, signal_link) {
304 GEM_BUG_ON(p == dep); /* no cycles! */
306 if (node_signaled(p->signaler))
309 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
310 if (prio > READ_ONCE(p->signaler->attr.priority))
311 list_move_tail(&p->dfs_link, &dfs);
316 * If we didn't need to bump any existing priorities, and we haven't
317 * yet submitted this request (i.e. there is no potential race with
318 * execlists_submit_request()), we can set our own priority and skip
319 * acquiring the engine locks.
321 if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
322 GEM_BUG_ON(!list_empty(&rq->sched.link));
323 rq->sched.attr = *attr;
325 if (stack.dfs_link.next == stack.dfs_link.prev)
328 __list_del_entry(&stack.dfs_link);
333 spin_lock_irq(&engine->timeline.lock);
335 /* Fifo and depth-first replacement ensure our deps execute before us */
336 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
337 struct i915_sched_node *node = dep->signaler;
339 INIT_LIST_HEAD(&dep->dfs_link);
341 engine = sched_lock_engine(node, engine);
342 lockdep_assert_held(&engine->timeline.lock);
344 /* Recheck after acquiring the engine->timeline.lock */
345 if (prio <= node->attr.priority || node_signaled(node))
348 node->attr.priority = prio;
349 if (!list_empty(&node->link)) {
350 if (last != engine) {
351 pl = i915_sched_lookup_priolist(engine, prio);
354 list_move_tail(&node->link, pl);
357 * If the request is not in the priolist queue because
358 * it is not yet runnable, then it doesn't contribute
359 * to our preemption decisions. On the other hand,
360 * if the request is on the HW, it too is not in the
361 * queue; but in that case we may still need to reorder
362 * the inflight requests.
364 if (!i915_sw_fence_done(&node_to_request(node)->submit))
368 if (prio <= engine->execlists.queue_priority_hint)
371 engine->execlists.queue_priority_hint = prio;
374 * If we are already the currently executing context, don't
375 * bother evaluating if we should preempt ourselves.
377 if (inflight(node_to_request(node), engine))
380 /* Defer (tasklet) submission until after all of our updates. */
381 tasklet_hi_schedule(&engine->execlists.tasklet);
384 spin_unlock_irq(&engine->timeline.lock);
387 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
389 spin_lock(&schedule_lock);
390 __i915_schedule(rq, attr);
391 spin_unlock(&schedule_lock);
394 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
396 struct i915_sched_attr attr;
398 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
400 if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
403 spin_lock_bh(&schedule_lock);
405 attr = rq->sched.attr;
406 attr.priority |= bump;
407 __i915_schedule(rq, &attr);
409 spin_unlock_bh(&schedule_lock);