Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_scheduler.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/mutex.h>
8
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13
14 static struct i915_global_scheduler {
15         struct i915_global base;
16         struct kmem_cache *slab_dependencies;
17         struct kmem_cache *slab_priorities;
18 } global;
19
20 static DEFINE_SPINLOCK(schedule_lock);
21
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25         return container_of(node, const struct i915_request, sched);
26 }
27
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30         return i915_request_started(node_to_request(node));
31 }
32
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35         return i915_request_completed(node_to_request(node));
36 }
37
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39 {
40         return rb_entry(rb, struct i915_priolist, node);
41 }
42
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
44 {
45         struct rb_node *rb;
46         long last_prio, i;
47
48         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49                 return;
50
51         GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52                    rb_first(&execlists->queue.rb_root));
53
54         last_prio = INT_MAX;
55         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56                 const struct i915_priolist *p = to_priolist(rb);
57
58                 GEM_BUG_ON(p->priority > last_prio);
59                 last_prio = p->priority;
60
61                 GEM_BUG_ON(!p->used);
62                 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63                         if (list_empty(&p->requests[i]))
64                                 continue;
65
66                         GEM_BUG_ON(!(p->used & BIT(i)));
67                 }
68         }
69 }
70
71 struct list_head *
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
73 {
74         struct intel_engine_execlists * const execlists = &engine->execlists;
75         struct i915_priolist *p;
76         struct rb_node **parent, *rb;
77         bool first = true;
78         int idx, i;
79
80         lockdep_assert_held(&engine->active.lock);
81         assert_priolists(execlists);
82
83         /* buckets sorted from highest [in slot 0] to lowest priority */
84         idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85         prio >>= I915_USER_PRIORITY_SHIFT;
86         if (unlikely(execlists->no_priolist))
87                 prio = I915_PRIORITY_NORMAL;
88
89 find_priolist:
90         /* most positive priority is scheduled first, equal priorities fifo */
91         rb = NULL;
92         parent = &execlists->queue.rb_root.rb_node;
93         while (*parent) {
94                 rb = *parent;
95                 p = to_priolist(rb);
96                 if (prio > p->priority) {
97                         parent = &rb->rb_left;
98                 } else if (prio < p->priority) {
99                         parent = &rb->rb_right;
100                         first = false;
101                 } else {
102                         goto out;
103                 }
104         }
105
106         if (prio == I915_PRIORITY_NORMAL) {
107                 p = &execlists->default_priolist;
108         } else {
109                 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110                 /* Convert an allocation failure to a priority bump */
111                 if (unlikely(!p)) {
112                         prio = I915_PRIORITY_NORMAL; /* recurses just once */
113
114                         /* To maintain ordering with all rendering, after an
115                          * allocation failure we have to disable all scheduling.
116                          * Requests will then be executed in fifo, and schedule
117                          * will ensure that dependencies are emitted in fifo.
118                          * There will be still some reordering with existing
119                          * requests, so if userspace lied about their
120                          * dependencies that reordering may be visible.
121                          */
122                         execlists->no_priolist = true;
123                         goto find_priolist;
124                 }
125         }
126
127         p->priority = prio;
128         for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129                 INIT_LIST_HEAD(&p->requests[i]);
130         rb_link_node(&p->node, rb, parent);
131         rb_insert_color_cached(&p->node, &execlists->queue, first);
132         p->used = 0;
133
134 out:
135         p->used |= BIT(idx);
136         return &p->requests[idx];
137 }
138
139 void __i915_priolist_free(struct i915_priolist *p)
140 {
141         kmem_cache_free(global.slab_priorities, p);
142 }
143
144 struct sched_cache {
145         struct list_head *priolist;
146 };
147
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150                   struct intel_engine_cs *locked,
151                   struct sched_cache *cache)
152 {
153         const struct i915_request *rq = node_to_request(node);
154         struct intel_engine_cs *engine;
155
156         GEM_BUG_ON(!locked);
157
158         /*
159          * Virtual engines complicate acquiring the engine timeline lock,
160          * as their rq->engine pointer is not stable until under that
161          * engine lock. The simple ploy we use is to take the lock then
162          * check that the rq still belongs to the newly locked engine.
163          */
164         while (locked != (engine = READ_ONCE(rq->engine))) {
165                 spin_unlock(&locked->active.lock);
166                 memset(cache, 0, sizeof(*cache));
167                 spin_lock(&engine->active.lock);
168                 locked = engine;
169         }
170
171         GEM_BUG_ON(locked != engine);
172         return locked;
173 }
174
175 static inline int rq_prio(const struct i915_request *rq)
176 {
177         return rq->sched.attr.priority;
178 }
179
180 static inline bool need_preempt(int prio, int active)
181 {
182         /*
183          * Allow preemption of low -> normal -> high, but we do
184          * not allow low priority tasks to preempt other low priority
185          * tasks under the impression that latency for low priority
186          * tasks does not matter (as much as background throughput),
187          * so kiss.
188          */
189         return prio >= max(I915_PRIORITY_NORMAL, active);
190 }
191
192 static void kick_submission(struct intel_engine_cs *engine,
193                             const struct i915_request *rq,
194                             int prio)
195 {
196         const struct i915_request *inflight;
197
198         /*
199          * We only need to kick the tasklet once for the high priority
200          * new context we add into the queue.
201          */
202         if (prio <= engine->execlists.queue_priority_hint)
203                 return;
204
205         rcu_read_lock();
206
207         /* Nothing currently active? We're overdue for a submission! */
208         inflight = execlists_active(&engine->execlists);
209         if (!inflight)
210                 goto unlock;
211
212         /*
213          * If we are already the currently executing context, don't
214          * bother evaluating if we should preempt ourselves.
215          */
216         if (inflight->context == rq->context)
217                 goto unlock;
218
219         ENGINE_TRACE(engine,
220                      "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
221                      prio,
222                      rq->fence.context, rq->fence.seqno,
223                      inflight->fence.context, inflight->fence.seqno,
224                      inflight->sched.attr.priority);
225
226         engine->execlists.queue_priority_hint = prio;
227         if (need_preempt(prio, rq_prio(inflight)))
228                 tasklet_hi_schedule(&engine->execlists.tasklet);
229
230 unlock:
231         rcu_read_unlock();
232 }
233
234 static void __i915_schedule(struct i915_sched_node *node,
235                             const struct i915_sched_attr *attr)
236 {
237         const int prio = max(attr->priority, node->attr.priority);
238         struct intel_engine_cs *engine;
239         struct i915_dependency *dep, *p;
240         struct i915_dependency stack;
241         struct sched_cache cache;
242         LIST_HEAD(dfs);
243
244         /* Needed in order to use the temporary link inside i915_dependency */
245         lockdep_assert_held(&schedule_lock);
246         GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
247
248         if (node_signaled(node))
249                 return;
250
251         stack.signaler = node;
252         list_add(&stack.dfs_link, &dfs);
253
254         /*
255          * Recursively bump all dependent priorities to match the new request.
256          *
257          * A naive approach would be to use recursion:
258          * static void update_priorities(struct i915_sched_node *node, prio) {
259          *      list_for_each_entry(dep, &node->signalers_list, signal_link)
260          *              update_priorities(dep->signal, prio)
261          *      queue_request(node);
262          * }
263          * but that may have unlimited recursion depth and so runs a very
264          * real risk of overunning the kernel stack. Instead, we build
265          * a flat list of all dependencies starting with the current request.
266          * As we walk the list of dependencies, we add all of its dependencies
267          * to the end of the list (this may include an already visited
268          * request) and continue to walk onwards onto the new dependencies. The
269          * end result is a topological list of requests in reverse order, the
270          * last element in the list is the request we must execute first.
271          */
272         list_for_each_entry(dep, &dfs, dfs_link) {
273                 struct i915_sched_node *node = dep->signaler;
274
275                 /* If we are already flying, we know we have no signalers */
276                 if (node_started(node))
277                         continue;
278
279                 /*
280                  * Within an engine, there can be no cycle, but we may
281                  * refer to the same dependency chain multiple times
282                  * (redundant dependencies are not eliminated) and across
283                  * engines.
284                  */
285                 list_for_each_entry(p, &node->signalers_list, signal_link) {
286                         GEM_BUG_ON(p == dep); /* no cycles! */
287
288                         if (node_signaled(p->signaler))
289                                 continue;
290
291                         if (prio > READ_ONCE(p->signaler->attr.priority))
292                                 list_move_tail(&p->dfs_link, &dfs);
293                 }
294         }
295
296         /*
297          * If we didn't need to bump any existing priorities, and we haven't
298          * yet submitted this request (i.e. there is no potential race with
299          * execlists_submit_request()), we can set our own priority and skip
300          * acquiring the engine locks.
301          */
302         if (node->attr.priority == I915_PRIORITY_INVALID) {
303                 GEM_BUG_ON(!list_empty(&node->link));
304                 node->attr = *attr;
305
306                 if (stack.dfs_link.next == stack.dfs_link.prev)
307                         return;
308
309                 __list_del_entry(&stack.dfs_link);
310         }
311
312         memset(&cache, 0, sizeof(cache));
313         engine = node_to_request(node)->engine;
314         spin_lock(&engine->active.lock);
315
316         /* Fifo and depth-first replacement ensure our deps execute before us */
317         engine = sched_lock_engine(node, engine, &cache);
318         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
319                 INIT_LIST_HEAD(&dep->dfs_link);
320
321                 node = dep->signaler;
322                 engine = sched_lock_engine(node, engine, &cache);
323                 lockdep_assert_held(&engine->active.lock);
324
325                 /* Recheck after acquiring the engine->timeline.lock */
326                 if (prio <= node->attr.priority || node_signaled(node))
327                         continue;
328
329                 GEM_BUG_ON(node_to_request(node)->engine != engine);
330
331                 WRITE_ONCE(node->attr.priority, prio);
332
333                 /*
334                  * Once the request is ready, it will be placed into the
335                  * priority lists and then onto the HW runlist. Before the
336                  * request is ready, it does not contribute to our preemption
337                  * decisions and we can safely ignore it, as it will, and
338                  * any preemption required, be dealt with upon submission.
339                  * See engine->submit_request()
340                  */
341                 if (list_empty(&node->link))
342                         continue;
343
344                 if (i915_request_in_priority_queue(node_to_request(node))) {
345                         if (!cache.priolist)
346                                 cache.priolist =
347                                         i915_sched_lookup_priolist(engine,
348                                                                    prio);
349                         list_move_tail(&node->link, cache.priolist);
350                 }
351
352                 /* Defer (tasklet) submission until after all of our updates. */
353                 kick_submission(engine, node_to_request(node), prio);
354         }
355
356         spin_unlock(&engine->active.lock);
357 }
358
359 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
360 {
361         spin_lock_irq(&schedule_lock);
362         __i915_schedule(&rq->sched, attr);
363         spin_unlock_irq(&schedule_lock);
364 }
365
366 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
367 {
368         struct i915_sched_attr attr = node->attr;
369
370         if (attr.priority & bump)
371                 return;
372
373         attr.priority |= bump;
374         __i915_schedule(node, &attr);
375 }
376
377 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
378 {
379         unsigned long flags;
380
381         GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
382         if (READ_ONCE(rq->sched.attr.priority) & bump)
383                 return;
384
385         spin_lock_irqsave(&schedule_lock, flags);
386         __bump_priority(&rq->sched, bump);
387         spin_unlock_irqrestore(&schedule_lock, flags);
388 }
389
390 void i915_sched_node_init(struct i915_sched_node *node)
391 {
392         INIT_LIST_HEAD(&node->signalers_list);
393         INIT_LIST_HEAD(&node->waiters_list);
394         INIT_LIST_HEAD(&node->link);
395
396         i915_sched_node_reinit(node);
397 }
398
399 void i915_sched_node_reinit(struct i915_sched_node *node)
400 {
401         node->attr.priority = I915_PRIORITY_INVALID;
402         node->semaphores = 0;
403         node->flags = 0;
404
405         GEM_BUG_ON(!list_empty(&node->signalers_list));
406         GEM_BUG_ON(!list_empty(&node->waiters_list));
407         GEM_BUG_ON(!list_empty(&node->link));
408 }
409
410 static struct i915_dependency *
411 i915_dependency_alloc(void)
412 {
413         return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
414 }
415
416 static void
417 i915_dependency_free(struct i915_dependency *dep)
418 {
419         kmem_cache_free(global.slab_dependencies, dep);
420 }
421
422 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
423                                       struct i915_sched_node *signal,
424                                       struct i915_dependency *dep,
425                                       unsigned long flags)
426 {
427         bool ret = false;
428
429         spin_lock_irq(&schedule_lock);
430
431         if (!node_signaled(signal)) {
432                 INIT_LIST_HEAD(&dep->dfs_link);
433                 dep->signaler = signal;
434                 dep->waiter = node;
435                 dep->flags = flags;
436
437                 /* All set, now publish. Beware the lockless walkers. */
438                 list_add_rcu(&dep->signal_link, &node->signalers_list);
439                 list_add_rcu(&dep->wait_link, &signal->waiters_list);
440
441                 /* Propagate the chains */
442                 node->flags |= signal->flags;
443                 ret = true;
444         }
445
446         spin_unlock_irq(&schedule_lock);
447
448         return ret;
449 }
450
451 int i915_sched_node_add_dependency(struct i915_sched_node *node,
452                                    struct i915_sched_node *signal,
453                                    unsigned long flags)
454 {
455         struct i915_dependency *dep;
456
457         dep = i915_dependency_alloc();
458         if (!dep)
459                 return -ENOMEM;
460
461         if (!__i915_sched_node_add_dependency(node, signal, dep,
462                                               flags | I915_DEPENDENCY_ALLOC))
463                 i915_dependency_free(dep);
464
465         return 0;
466 }
467
468 void i915_sched_node_fini(struct i915_sched_node *node)
469 {
470         struct i915_dependency *dep, *tmp;
471
472         spin_lock_irq(&schedule_lock);
473
474         /*
475          * Everyone we depended upon (the fences we wait to be signaled)
476          * should retire before us and remove themselves from our list.
477          * However, retirement is run independently on each timeline and
478          * so we may be called out-of-order.
479          */
480         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
481                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
482
483                 list_del_rcu(&dep->wait_link);
484                 if (dep->flags & I915_DEPENDENCY_ALLOC)
485                         i915_dependency_free(dep);
486         }
487         INIT_LIST_HEAD(&node->signalers_list);
488
489         /* Remove ourselves from everyone who depends upon us */
490         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
491                 GEM_BUG_ON(dep->signaler != node);
492                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
493
494                 list_del_rcu(&dep->signal_link);
495                 if (dep->flags & I915_DEPENDENCY_ALLOC)
496                         i915_dependency_free(dep);
497         }
498         INIT_LIST_HEAD(&node->waiters_list);
499
500         spin_unlock_irq(&schedule_lock);
501 }
502
503 void i915_request_show_with_schedule(struct drm_printer *m,
504                                      const struct i915_request *rq,
505                                      const char *prefix,
506                                      int indent)
507 {
508         struct i915_dependency *dep;
509
510         i915_request_show(m, rq, prefix, indent);
511         if (i915_request_completed(rq))
512                 return;
513
514         rcu_read_lock();
515         for_each_signaler(dep, rq) {
516                 const struct i915_request *signaler =
517                         node_to_request(dep->signaler);
518
519                 /* Dependencies along the same timeline are expected. */
520                 if (signaler->timeline == rq->timeline)
521                         continue;
522
523                 if (__i915_request_is_complete(signaler))
524                         continue;
525
526                 i915_request_show(m, signaler, prefix, indent + 2);
527         }
528         rcu_read_unlock();
529 }
530
531 static void i915_global_scheduler_shrink(void)
532 {
533         kmem_cache_shrink(global.slab_dependencies);
534         kmem_cache_shrink(global.slab_priorities);
535 }
536
537 static void i915_global_scheduler_exit(void)
538 {
539         kmem_cache_destroy(global.slab_dependencies);
540         kmem_cache_destroy(global.slab_priorities);
541 }
542
543 static struct i915_global_scheduler global = { {
544         .shrink = i915_global_scheduler_shrink,
545         .exit = i915_global_scheduler_exit,
546 } };
547
548 int __init i915_global_scheduler_init(void)
549 {
550         global.slab_dependencies = KMEM_CACHE(i915_dependency,
551                                               SLAB_HWCACHE_ALIGN |
552                                               SLAB_TYPESAFE_BY_RCU);
553         if (!global.slab_dependencies)
554                 return -ENOMEM;
555
556         global.slab_priorities = KMEM_CACHE(i915_priolist,
557                                             SLAB_HWCACHE_ALIGN);
558         if (!global.slab_priorities)
559                 goto err_priorities;
560
561         i915_global_register(&global.base);
562         return 0;
563
564 err_priorities:
565         kmem_cache_destroy(global.slab_priorities);
566         return -ENOMEM;
567 }