2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/mutex.h>
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
14 static struct i915_global_scheduler {
15 struct i915_global base;
16 struct kmem_cache *slab_dependencies;
17 struct kmem_cache *slab_priorities;
20 static DEFINE_SPINLOCK(schedule_lock);
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
25 return container_of(node, const struct i915_request, sched);
28 static inline bool node_started(const struct i915_sched_node *node)
30 return i915_request_started(node_to_request(node));
33 static inline bool node_signaled(const struct i915_sched_node *node)
35 return i915_request_completed(node_to_request(node));
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
40 return rb_entry(rb, struct i915_priolist, node);
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
51 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 rb_first(&execlists->queue.rb_root));
54 last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 const struct i915_priolist *p = to_priolist(rb);
58 GEM_BUG_ON(p->priority >= last_prio);
59 last_prio = p->priority;
62 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63 if (list_empty(&p->requests[i]))
66 GEM_BUG_ON(!(p->used & BIT(i)));
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
74 struct intel_engine_execlists * const execlists = &engine->execlists;
75 struct i915_priolist *p;
76 struct rb_node **parent, *rb;
80 lockdep_assert_held(&engine->active.lock);
81 assert_priolists(execlists);
83 /* buckets sorted from highest [in slot 0] to lowest priority */
84 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85 prio >>= I915_USER_PRIORITY_SHIFT;
86 if (unlikely(execlists->no_priolist))
87 prio = I915_PRIORITY_NORMAL;
90 /* most positive priority is scheduled first, equal priorities fifo */
92 parent = &execlists->queue.rb_root.rb_node;
96 if (prio > p->priority) {
97 parent = &rb->rb_left;
98 } else if (prio < p->priority) {
99 parent = &rb->rb_right;
106 if (prio == I915_PRIORITY_NORMAL) {
107 p = &execlists->default_priolist;
109 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110 /* Convert an allocation failure to a priority bump */
112 prio = I915_PRIORITY_NORMAL; /* recurses just once */
114 /* To maintain ordering with all rendering, after an
115 * allocation failure we have to disable all scheduling.
116 * Requests will then be executed in fifo, and schedule
117 * will ensure that dependencies are emitted in fifo.
118 * There will be still some reordering with existing
119 * requests, so if userspace lied about their
120 * dependencies that reordering may be visible.
122 execlists->no_priolist = true;
128 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129 INIT_LIST_HEAD(&p->requests[i]);
130 rb_link_node(&p->node, rb, parent);
131 rb_insert_color_cached(&p->node, &execlists->queue, first);
136 return &p->requests[idx];
139 void __i915_priolist_free(struct i915_priolist *p)
141 kmem_cache_free(global.slab_priorities, p);
145 struct list_head *priolist;
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150 struct intel_engine_cs *locked,
151 struct sched_cache *cache)
153 const struct i915_request *rq = node_to_request(node);
154 struct intel_engine_cs *engine;
159 * Virtual engines complicate acquiring the engine timeline lock,
160 * as their rq->engine pointer is not stable until under that
161 * engine lock. The simple ploy we use is to take the lock then
162 * check that the rq still belongs to the newly locked engine.
164 while (locked != (engine = READ_ONCE(rq->engine))) {
165 spin_unlock(&locked->active.lock);
166 memset(cache, 0, sizeof(*cache));
167 spin_lock(&engine->active.lock);
171 GEM_BUG_ON(locked != engine);
175 static inline int rq_prio(const struct i915_request *rq)
177 return rq->sched.attr.priority | __NO_PREEMPTION;
180 static void kick_submission(struct intel_engine_cs *engine, int prio)
182 const struct i915_request *inflight =
183 port_request(engine->execlists.port);
186 * If we are already the currently executing context, don't
187 * bother evaluating if we should preempt ourselves, or if
188 * we expect nothing to change as a result of running the
189 * tasklet, i.e. we have not change the priority queue
190 * sufficiently to oust the running context.
192 if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
195 tasklet_hi_schedule(&engine->execlists.tasklet);
198 static void __i915_schedule(struct i915_sched_node *node,
199 const struct i915_sched_attr *attr)
201 struct intel_engine_cs *engine;
202 struct i915_dependency *dep, *p;
203 struct i915_dependency stack;
204 const int prio = attr->priority;
205 struct sched_cache cache;
208 /* Needed in order to use the temporary link inside i915_dependency */
209 lockdep_assert_held(&schedule_lock);
210 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
212 if (prio <= READ_ONCE(node->attr.priority))
215 if (node_signaled(node))
218 stack.signaler = node;
219 list_add(&stack.dfs_link, &dfs);
222 * Recursively bump all dependent priorities to match the new request.
224 * A naive approach would be to use recursion:
225 * static void update_priorities(struct i915_sched_node *node, prio) {
226 * list_for_each_entry(dep, &node->signalers_list, signal_link)
227 * update_priorities(dep->signal, prio)
228 * queue_request(node);
230 * but that may have unlimited recursion depth and so runs a very
231 * real risk of overunning the kernel stack. Instead, we build
232 * a flat list of all dependencies starting with the current request.
233 * As we walk the list of dependencies, we add all of its dependencies
234 * to the end of the list (this may include an already visited
235 * request) and continue to walk onwards onto the new dependencies. The
236 * end result is a topological list of requests in reverse order, the
237 * last element in the list is the request we must execute first.
239 list_for_each_entry(dep, &dfs, dfs_link) {
240 struct i915_sched_node *node = dep->signaler;
242 /* If we are already flying, we know we have no signalers */
243 if (node_started(node))
247 * Within an engine, there can be no cycle, but we may
248 * refer to the same dependency chain multiple times
249 * (redundant dependencies are not eliminated) and across
252 list_for_each_entry(p, &node->signalers_list, signal_link) {
253 GEM_BUG_ON(p == dep); /* no cycles! */
255 if (node_signaled(p->signaler))
258 if (prio > READ_ONCE(p->signaler->attr.priority))
259 list_move_tail(&p->dfs_link, &dfs);
264 * If we didn't need to bump any existing priorities, and we haven't
265 * yet submitted this request (i.e. there is no potential race with
266 * execlists_submit_request()), we can set our own priority and skip
267 * acquiring the engine locks.
269 if (node->attr.priority == I915_PRIORITY_INVALID) {
270 GEM_BUG_ON(!list_empty(&node->link));
273 if (stack.dfs_link.next == stack.dfs_link.prev)
276 __list_del_entry(&stack.dfs_link);
279 memset(&cache, 0, sizeof(cache));
280 engine = node_to_request(node)->engine;
281 spin_lock(&engine->active.lock);
283 /* Fifo and depth-first replacement ensure our deps execute before us */
284 engine = sched_lock_engine(node, engine, &cache);
285 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
286 INIT_LIST_HEAD(&dep->dfs_link);
288 node = dep->signaler;
289 engine = sched_lock_engine(node, engine, &cache);
290 lockdep_assert_held(&engine->active.lock);
292 /* Recheck after acquiring the engine->timeline.lock */
293 if (prio <= node->attr.priority || node_signaled(node))
296 GEM_BUG_ON(node_to_request(node)->engine != engine);
298 node->attr.priority = prio;
300 if (list_empty(&node->link)) {
302 * If the request is not in the priolist queue because
303 * it is not yet runnable, then it doesn't contribute
304 * to our preemption decisions. On the other hand,
305 * if the request is on the HW, it too is not in the
306 * queue; but in that case we may still need to reorder
307 * the inflight requests.
312 if (!intel_engine_is_virtual(engine) &&
313 !i915_request_is_active(node_to_request(node))) {
316 i915_sched_lookup_priolist(engine,
318 list_move_tail(&node->link, cache.priolist);
321 if (prio <= engine->execlists.queue_priority_hint)
324 engine->execlists.queue_priority_hint = prio;
326 /* Defer (tasklet) submission until after all of our updates. */
327 kick_submission(engine, prio);
330 spin_unlock(&engine->active.lock);
333 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
335 spin_lock_irq(&schedule_lock);
336 __i915_schedule(&rq->sched, attr);
337 spin_unlock_irq(&schedule_lock);
340 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
342 struct i915_sched_attr attr = node->attr;
344 attr.priority |= bump;
345 __i915_schedule(node, &attr);
348 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
352 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
354 if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
357 spin_lock_irqsave(&schedule_lock, flags);
358 __bump_priority(&rq->sched, bump);
359 spin_unlock_irqrestore(&schedule_lock, flags);
362 void i915_sched_node_init(struct i915_sched_node *node)
364 INIT_LIST_HEAD(&node->signalers_list);
365 INIT_LIST_HEAD(&node->waiters_list);
366 INIT_LIST_HEAD(&node->link);
367 node->attr.priority = I915_PRIORITY_INVALID;
368 node->semaphores = 0;
372 static struct i915_dependency *
373 i915_dependency_alloc(void)
375 return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
379 i915_dependency_free(struct i915_dependency *dep)
381 kmem_cache_free(global.slab_dependencies, dep);
384 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
385 struct i915_sched_node *signal,
386 struct i915_dependency *dep,
391 spin_lock_irq(&schedule_lock);
393 if (!node_signaled(signal)) {
394 INIT_LIST_HEAD(&dep->dfs_link);
395 list_add(&dep->wait_link, &signal->waiters_list);
396 list_add(&dep->signal_link, &node->signalers_list);
397 dep->signaler = signal;
400 /* Keep track of whether anyone on this chain has a semaphore */
401 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
402 !node_started(signal))
403 node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
406 * As we do not allow WAIT to preempt inflight requests,
407 * once we have executed a request, along with triggering
408 * any execution callbacks, we must preserve its ordering
409 * within the non-preemptible FIFO.
411 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
412 if (flags & I915_DEPENDENCY_EXTERNAL)
413 __bump_priority(signal, __NO_PREEMPTION);
418 spin_unlock_irq(&schedule_lock);
423 int i915_sched_node_add_dependency(struct i915_sched_node *node,
424 struct i915_sched_node *signal)
426 struct i915_dependency *dep;
428 dep = i915_dependency_alloc();
432 if (!__i915_sched_node_add_dependency(node, signal, dep,
433 I915_DEPENDENCY_EXTERNAL |
434 I915_DEPENDENCY_ALLOC))
435 i915_dependency_free(dep);
440 void i915_sched_node_fini(struct i915_sched_node *node)
442 struct i915_dependency *dep, *tmp;
444 spin_lock_irq(&schedule_lock);
447 * Everyone we depended upon (the fences we wait to be signaled)
448 * should retire before us and remove themselves from our list.
449 * However, retirement is run independently on each timeline and
450 * so we may be called out-of-order.
452 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
453 GEM_BUG_ON(!node_signaled(dep->signaler));
454 GEM_BUG_ON(!list_empty(&dep->dfs_link));
456 list_del(&dep->wait_link);
457 if (dep->flags & I915_DEPENDENCY_ALLOC)
458 i915_dependency_free(dep);
461 /* Remove ourselves from everyone who depends upon us */
462 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
463 GEM_BUG_ON(dep->signaler != node);
464 GEM_BUG_ON(!list_empty(&dep->dfs_link));
466 list_del(&dep->signal_link);
467 if (dep->flags & I915_DEPENDENCY_ALLOC)
468 i915_dependency_free(dep);
471 spin_unlock_irq(&schedule_lock);
474 static void i915_global_scheduler_shrink(void)
476 kmem_cache_shrink(global.slab_dependencies);
477 kmem_cache_shrink(global.slab_priorities);
480 static void i915_global_scheduler_exit(void)
482 kmem_cache_destroy(global.slab_dependencies);
483 kmem_cache_destroy(global.slab_priorities);
486 static struct i915_global_scheduler global = { {
487 .shrink = i915_global_scheduler_shrink,
488 .exit = i915_global_scheduler_exit,
491 int __init i915_global_scheduler_init(void)
493 global.slab_dependencies = KMEM_CACHE(i915_dependency,
495 if (!global.slab_dependencies)
498 global.slab_priorities = KMEM_CACHE(i915_priolist,
500 if (!global.slab_priorities)
503 i915_global_register(&global.base);
507 kmem_cache_destroy(global.slab_priorities);