2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/mutex.h>
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
14 static struct i915_global_scheduler {
15 struct i915_global base;
16 struct kmem_cache *slab_dependencies;
17 struct kmem_cache *slab_priorities;
20 static DEFINE_SPINLOCK(schedule_lock);
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
25 return container_of(node, const struct i915_request, sched);
28 static inline bool node_started(const struct i915_sched_node *node)
30 return i915_request_started(node_to_request(node));
33 static inline bool node_signaled(const struct i915_sched_node *node)
35 return i915_request_completed(node_to_request(node));
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
40 return rb_entry(rb, struct i915_priolist, node);
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
51 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 rb_first(&execlists->queue.rb_root));
55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 const struct i915_priolist *p = to_priolist(rb);
58 GEM_BUG_ON(p->priority > last_prio);
59 last_prio = p->priority;
64 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
66 struct intel_engine_execlists * const execlists = &engine->execlists;
67 struct i915_priolist *p;
68 struct rb_node **parent, *rb;
71 lockdep_assert_held(&engine->active.lock);
72 assert_priolists(execlists);
74 if (unlikely(execlists->no_priolist))
75 prio = I915_PRIORITY_NORMAL;
78 /* most positive priority is scheduled first, equal priorities fifo */
80 parent = &execlists->queue.rb_root.rb_node;
84 if (prio > p->priority) {
85 parent = &rb->rb_left;
86 } else if (prio < p->priority) {
87 parent = &rb->rb_right;
94 if (prio == I915_PRIORITY_NORMAL) {
95 p = &execlists->default_priolist;
97 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
98 /* Convert an allocation failure to a priority bump */
100 prio = I915_PRIORITY_NORMAL; /* recurses just once */
102 /* To maintain ordering with all rendering, after an
103 * allocation failure we have to disable all scheduling.
104 * Requests will then be executed in fifo, and schedule
105 * will ensure that dependencies are emitted in fifo.
106 * There will be still some reordering with existing
107 * requests, so if userspace lied about their
108 * dependencies that reordering may be visible.
110 execlists->no_priolist = true;
116 INIT_LIST_HEAD(&p->requests);
118 rb_link_node(&p->node, rb, parent);
119 rb_insert_color_cached(&p->node, &execlists->queue, first);
124 void __i915_priolist_free(struct i915_priolist *p)
126 kmem_cache_free(global.slab_priorities, p);
130 struct list_head *priolist;
133 static struct intel_engine_cs *
134 sched_lock_engine(const struct i915_sched_node *node,
135 struct intel_engine_cs *locked,
136 struct sched_cache *cache)
138 const struct i915_request *rq = node_to_request(node);
139 struct intel_engine_cs *engine;
144 * Virtual engines complicate acquiring the engine timeline lock,
145 * as their rq->engine pointer is not stable until under that
146 * engine lock. The simple ploy we use is to take the lock then
147 * check that the rq still belongs to the newly locked engine.
149 while (locked != (engine = READ_ONCE(rq->engine))) {
150 spin_unlock(&locked->active.lock);
151 memset(cache, 0, sizeof(*cache));
152 spin_lock(&engine->active.lock);
156 GEM_BUG_ON(locked != engine);
160 static inline int rq_prio(const struct i915_request *rq)
162 return rq->sched.attr.priority;
165 static inline bool need_preempt(int prio, int active)
168 * Allow preemption of low -> normal -> high, but we do
169 * not allow low priority tasks to preempt other low priority
170 * tasks under the impression that latency for low priority
171 * tasks does not matter (as much as background throughput),
174 return prio >= max(I915_PRIORITY_NORMAL, active);
177 static void kick_submission(struct intel_engine_cs *engine,
178 const struct i915_request *rq,
181 const struct i915_request *inflight;
184 * We only need to kick the tasklet once for the high priority
185 * new context we add into the queue.
187 if (prio <= engine->execlists.queue_priority_hint)
192 /* Nothing currently active? We're overdue for a submission! */
193 inflight = execlists_active(&engine->execlists);
198 * If we are already the currently executing context, don't
199 * bother evaluating if we should preempt ourselves.
201 if (inflight->context == rq->context)
205 "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
207 rq->fence.context, rq->fence.seqno,
208 inflight->fence.context, inflight->fence.seqno,
209 inflight->sched.attr.priority);
211 engine->execlists.queue_priority_hint = prio;
212 if (need_preempt(prio, rq_prio(inflight)))
213 tasklet_hi_schedule(&engine->execlists.tasklet);
219 static void __i915_schedule(struct i915_sched_node *node,
220 const struct i915_sched_attr *attr)
222 const int prio = max(attr->priority, node->attr.priority);
223 struct intel_engine_cs *engine;
224 struct i915_dependency *dep, *p;
225 struct i915_dependency stack;
226 struct sched_cache cache;
229 /* Needed in order to use the temporary link inside i915_dependency */
230 lockdep_assert_held(&schedule_lock);
231 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
233 if (node_signaled(node))
236 stack.signaler = node;
237 list_add(&stack.dfs_link, &dfs);
240 * Recursively bump all dependent priorities to match the new request.
242 * A naive approach would be to use recursion:
243 * static void update_priorities(struct i915_sched_node *node, prio) {
244 * list_for_each_entry(dep, &node->signalers_list, signal_link)
245 * update_priorities(dep->signal, prio)
246 * queue_request(node);
248 * but that may have unlimited recursion depth and so runs a very
249 * real risk of overunning the kernel stack. Instead, we build
250 * a flat list of all dependencies starting with the current request.
251 * As we walk the list of dependencies, we add all of its dependencies
252 * to the end of the list (this may include an already visited
253 * request) and continue to walk onwards onto the new dependencies. The
254 * end result is a topological list of requests in reverse order, the
255 * last element in the list is the request we must execute first.
257 list_for_each_entry(dep, &dfs, dfs_link) {
258 struct i915_sched_node *node = dep->signaler;
260 /* If we are already flying, we know we have no signalers */
261 if (node_started(node))
265 * Within an engine, there can be no cycle, but we may
266 * refer to the same dependency chain multiple times
267 * (redundant dependencies are not eliminated) and across
270 list_for_each_entry(p, &node->signalers_list, signal_link) {
271 GEM_BUG_ON(p == dep); /* no cycles! */
273 if (node_signaled(p->signaler))
276 if (prio > READ_ONCE(p->signaler->attr.priority))
277 list_move_tail(&p->dfs_link, &dfs);
282 * If we didn't need to bump any existing priorities, and we haven't
283 * yet submitted this request (i.e. there is no potential race with
284 * execlists_submit_request()), we can set our own priority and skip
285 * acquiring the engine locks.
287 if (node->attr.priority == I915_PRIORITY_INVALID) {
288 GEM_BUG_ON(!list_empty(&node->link));
291 if (stack.dfs_link.next == stack.dfs_link.prev)
294 __list_del_entry(&stack.dfs_link);
297 memset(&cache, 0, sizeof(cache));
298 engine = node_to_request(node)->engine;
299 spin_lock(&engine->active.lock);
301 /* Fifo and depth-first replacement ensure our deps execute before us */
302 engine = sched_lock_engine(node, engine, &cache);
303 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
304 INIT_LIST_HEAD(&dep->dfs_link);
306 node = dep->signaler;
307 engine = sched_lock_engine(node, engine, &cache);
308 lockdep_assert_held(&engine->active.lock);
310 /* Recheck after acquiring the engine->timeline.lock */
311 if (prio <= node->attr.priority || node_signaled(node))
314 GEM_BUG_ON(node_to_request(node)->engine != engine);
316 WRITE_ONCE(node->attr.priority, prio);
319 * Once the request is ready, it will be placed into the
320 * priority lists and then onto the HW runlist. Before the
321 * request is ready, it does not contribute to our preemption
322 * decisions and we can safely ignore it, as it will, and
323 * any preemption required, be dealt with upon submission.
324 * See engine->submit_request()
326 if (list_empty(&node->link))
329 if (i915_request_in_priority_queue(node_to_request(node))) {
332 i915_sched_lookup_priolist(engine,
334 list_move_tail(&node->link, cache.priolist);
337 /* Defer (tasklet) submission until after all of our updates. */
338 kick_submission(engine, node_to_request(node), prio);
341 spin_unlock(&engine->active.lock);
344 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
346 spin_lock_irq(&schedule_lock);
347 __i915_schedule(&rq->sched, attr);
348 spin_unlock_irq(&schedule_lock);
351 void i915_sched_node_init(struct i915_sched_node *node)
353 INIT_LIST_HEAD(&node->signalers_list);
354 INIT_LIST_HEAD(&node->waiters_list);
355 INIT_LIST_HEAD(&node->link);
357 i915_sched_node_reinit(node);
360 void i915_sched_node_reinit(struct i915_sched_node *node)
362 node->attr.priority = I915_PRIORITY_INVALID;
363 node->semaphores = 0;
366 GEM_BUG_ON(!list_empty(&node->signalers_list));
367 GEM_BUG_ON(!list_empty(&node->waiters_list));
368 GEM_BUG_ON(!list_empty(&node->link));
371 static struct i915_dependency *
372 i915_dependency_alloc(void)
374 return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
378 i915_dependency_free(struct i915_dependency *dep)
380 kmem_cache_free(global.slab_dependencies, dep);
383 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
384 struct i915_sched_node *signal,
385 struct i915_dependency *dep,
390 spin_lock_irq(&schedule_lock);
392 if (!node_signaled(signal)) {
393 INIT_LIST_HEAD(&dep->dfs_link);
394 dep->signaler = signal;
398 /* All set, now publish. Beware the lockless walkers. */
399 list_add_rcu(&dep->signal_link, &node->signalers_list);
400 list_add_rcu(&dep->wait_link, &signal->waiters_list);
402 /* Propagate the chains */
403 node->flags |= signal->flags;
407 spin_unlock_irq(&schedule_lock);
412 int i915_sched_node_add_dependency(struct i915_sched_node *node,
413 struct i915_sched_node *signal,
416 struct i915_dependency *dep;
418 dep = i915_dependency_alloc();
422 if (!__i915_sched_node_add_dependency(node, signal, dep,
423 flags | I915_DEPENDENCY_ALLOC))
424 i915_dependency_free(dep);
429 void i915_sched_node_fini(struct i915_sched_node *node)
431 struct i915_dependency *dep, *tmp;
433 spin_lock_irq(&schedule_lock);
436 * Everyone we depended upon (the fences we wait to be signaled)
437 * should retire before us and remove themselves from our list.
438 * However, retirement is run independently on each timeline and
439 * so we may be called out-of-order.
441 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
442 GEM_BUG_ON(!list_empty(&dep->dfs_link));
444 list_del_rcu(&dep->wait_link);
445 if (dep->flags & I915_DEPENDENCY_ALLOC)
446 i915_dependency_free(dep);
448 INIT_LIST_HEAD(&node->signalers_list);
450 /* Remove ourselves from everyone who depends upon us */
451 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
452 GEM_BUG_ON(dep->signaler != node);
453 GEM_BUG_ON(!list_empty(&dep->dfs_link));
455 list_del_rcu(&dep->signal_link);
456 if (dep->flags & I915_DEPENDENCY_ALLOC)
457 i915_dependency_free(dep);
459 INIT_LIST_HEAD(&node->waiters_list);
461 spin_unlock_irq(&schedule_lock);
464 void i915_request_show_with_schedule(struct drm_printer *m,
465 const struct i915_request *rq,
469 struct i915_dependency *dep;
471 i915_request_show(m, rq, prefix, indent);
472 if (i915_request_completed(rq))
476 for_each_signaler(dep, rq) {
477 const struct i915_request *signaler =
478 node_to_request(dep->signaler);
480 /* Dependencies along the same timeline are expected. */
481 if (signaler->timeline == rq->timeline)
484 if (__i915_request_is_complete(signaler))
487 i915_request_show(m, signaler, prefix, indent + 2);
492 static void i915_global_scheduler_shrink(void)
494 kmem_cache_shrink(global.slab_dependencies);
495 kmem_cache_shrink(global.slab_priorities);
498 static void i915_global_scheduler_exit(void)
500 kmem_cache_destroy(global.slab_dependencies);
501 kmem_cache_destroy(global.slab_priorities);
504 static struct i915_global_scheduler global = { {
505 .shrink = i915_global_scheduler_shrink,
506 .exit = i915_global_scheduler_exit,
509 int __init i915_global_scheduler_init(void)
511 global.slab_dependencies = KMEM_CACHE(i915_dependency,
513 SLAB_TYPESAFE_BY_RCU);
514 if (!global.slab_dependencies)
517 global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
518 if (!global.slab_priorities)
521 i915_global_register(&global.base);
525 kmem_cache_destroy(global.slab_priorities);