Merge tag 'drm-misc-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_scheduler.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/mutex.h>
8
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13
14 static struct i915_global_scheduler {
15         struct i915_global base;
16         struct kmem_cache *slab_dependencies;
17         struct kmem_cache *slab_priorities;
18 } global;
19
20 static DEFINE_SPINLOCK(schedule_lock);
21
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25         return container_of(node, const struct i915_request, sched);
26 }
27
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30         return i915_request_started(node_to_request(node));
31 }
32
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35         return i915_request_completed(node_to_request(node));
36 }
37
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39 {
40         return rb_entry(rb, struct i915_priolist, node);
41 }
42
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
44 {
45         struct rb_node *rb;
46         long last_prio, i;
47
48         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49                 return;
50
51         GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52                    rb_first(&execlists->queue.rb_root));
53
54         last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
55         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56                 const struct i915_priolist *p = to_priolist(rb);
57
58                 GEM_BUG_ON(p->priority >= last_prio);
59                 last_prio = p->priority;
60
61                 GEM_BUG_ON(!p->used);
62                 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63                         if (list_empty(&p->requests[i]))
64                                 continue;
65
66                         GEM_BUG_ON(!(p->used & BIT(i)));
67                 }
68         }
69 }
70
71 struct list_head *
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
73 {
74         struct intel_engine_execlists * const execlists = &engine->execlists;
75         struct i915_priolist *p;
76         struct rb_node **parent, *rb;
77         bool first = true;
78         int idx, i;
79
80         lockdep_assert_held(&engine->timeline.lock);
81         assert_priolists(execlists);
82
83         /* buckets sorted from highest [in slot 0] to lowest priority */
84         idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85         prio >>= I915_USER_PRIORITY_SHIFT;
86         if (unlikely(execlists->no_priolist))
87                 prio = I915_PRIORITY_NORMAL;
88
89 find_priolist:
90         /* most positive priority is scheduled first, equal priorities fifo */
91         rb = NULL;
92         parent = &execlists->queue.rb_root.rb_node;
93         while (*parent) {
94                 rb = *parent;
95                 p = to_priolist(rb);
96                 if (prio > p->priority) {
97                         parent = &rb->rb_left;
98                 } else if (prio < p->priority) {
99                         parent = &rb->rb_right;
100                         first = false;
101                 } else {
102                         goto out;
103                 }
104         }
105
106         if (prio == I915_PRIORITY_NORMAL) {
107                 p = &execlists->default_priolist;
108         } else {
109                 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110                 /* Convert an allocation failure to a priority bump */
111                 if (unlikely(!p)) {
112                         prio = I915_PRIORITY_NORMAL; /* recurses just once */
113
114                         /* To maintain ordering with all rendering, after an
115                          * allocation failure we have to disable all scheduling.
116                          * Requests will then be executed in fifo, and schedule
117                          * will ensure that dependencies are emitted in fifo.
118                          * There will be still some reordering with existing
119                          * requests, so if userspace lied about their
120                          * dependencies that reordering may be visible.
121                          */
122                         execlists->no_priolist = true;
123                         goto find_priolist;
124                 }
125         }
126
127         p->priority = prio;
128         for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129                 INIT_LIST_HEAD(&p->requests[i]);
130         rb_link_node(&p->node, rb, parent);
131         rb_insert_color_cached(&p->node, &execlists->queue, first);
132         p->used = 0;
133
134 out:
135         p->used |= BIT(idx);
136         return &p->requests[idx];
137 }
138
139 void __i915_priolist_free(struct i915_priolist *p)
140 {
141         kmem_cache_free(global.slab_priorities, p);
142 }
143
144 struct sched_cache {
145         struct list_head *priolist;
146 };
147
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150                   struct intel_engine_cs *locked,
151                   struct sched_cache *cache)
152 {
153         struct intel_engine_cs *engine = node_to_request(node)->engine;
154
155         GEM_BUG_ON(!locked);
156
157         if (engine != locked) {
158                 spin_unlock(&locked->timeline.lock);
159                 memset(cache, 0, sizeof(*cache));
160                 spin_lock(&engine->timeline.lock);
161         }
162
163         return engine;
164 }
165
166 static bool inflight(const struct i915_request *rq,
167                      const struct intel_engine_cs *engine)
168 {
169         const struct i915_request *active;
170
171         if (!i915_request_is_active(rq))
172                 return false;
173
174         active = port_request(engine->execlists.port);
175         return active->hw_context == rq->hw_context;
176 }
177
178 static void __i915_schedule(struct i915_sched_node *node,
179                             const struct i915_sched_attr *attr)
180 {
181         struct intel_engine_cs *engine;
182         struct i915_dependency *dep, *p;
183         struct i915_dependency stack;
184         const int prio = attr->priority;
185         struct sched_cache cache;
186         LIST_HEAD(dfs);
187
188         /* Needed in order to use the temporary link inside i915_dependency */
189         lockdep_assert_held(&schedule_lock);
190         GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
191
192         if (node_signaled(node))
193                 return;
194
195         if (prio <= READ_ONCE(node->attr.priority))
196                 return;
197
198         stack.signaler = node;
199         list_add(&stack.dfs_link, &dfs);
200
201         /*
202          * Recursively bump all dependent priorities to match the new request.
203          *
204          * A naive approach would be to use recursion:
205          * static void update_priorities(struct i915_sched_node *node, prio) {
206          *      list_for_each_entry(dep, &node->signalers_list, signal_link)
207          *              update_priorities(dep->signal, prio)
208          *      queue_request(node);
209          * }
210          * but that may have unlimited recursion depth and so runs a very
211          * real risk of overunning the kernel stack. Instead, we build
212          * a flat list of all dependencies starting with the current request.
213          * As we walk the list of dependencies, we add all of its dependencies
214          * to the end of the list (this may include an already visited
215          * request) and continue to walk onwards onto the new dependencies. The
216          * end result is a topological list of requests in reverse order, the
217          * last element in the list is the request we must execute first.
218          */
219         list_for_each_entry(dep, &dfs, dfs_link) {
220                 struct i915_sched_node *node = dep->signaler;
221
222                 /* If we are already flying, we know we have no signalers */
223                 if (node_started(node))
224                         continue;
225
226                 /*
227                  * Within an engine, there can be no cycle, but we may
228                  * refer to the same dependency chain multiple times
229                  * (redundant dependencies are not eliminated) and across
230                  * engines.
231                  */
232                 list_for_each_entry(p, &node->signalers_list, signal_link) {
233                         GEM_BUG_ON(p == dep); /* no cycles! */
234
235                         if (node_signaled(p->signaler))
236                                 continue;
237
238                         if (prio > READ_ONCE(p->signaler->attr.priority))
239                                 list_move_tail(&p->dfs_link, &dfs);
240                 }
241         }
242
243         /*
244          * If we didn't need to bump any existing priorities, and we haven't
245          * yet submitted this request (i.e. there is no potential race with
246          * execlists_submit_request()), we can set our own priority and skip
247          * acquiring the engine locks.
248          */
249         if (node->attr.priority == I915_PRIORITY_INVALID) {
250                 GEM_BUG_ON(!list_empty(&node->link));
251                 node->attr = *attr;
252
253                 if (stack.dfs_link.next == stack.dfs_link.prev)
254                         return;
255
256                 __list_del_entry(&stack.dfs_link);
257         }
258
259         memset(&cache, 0, sizeof(cache));
260         engine = node_to_request(node)->engine;
261         spin_lock(&engine->timeline.lock);
262
263         /* Fifo and depth-first replacement ensure our deps execute before us */
264         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
265                 INIT_LIST_HEAD(&dep->dfs_link);
266
267                 node = dep->signaler;
268                 engine = sched_lock_engine(node, engine, &cache);
269                 lockdep_assert_held(&engine->timeline.lock);
270
271                 /* Recheck after acquiring the engine->timeline.lock */
272                 if (prio <= node->attr.priority || node_signaled(node))
273                         continue;
274
275                 node->attr.priority = prio;
276                 if (!list_empty(&node->link)) {
277                         if (!cache.priolist)
278                                 cache.priolist =
279                                         i915_sched_lookup_priolist(engine,
280                                                                    prio);
281                         list_move_tail(&node->link, cache.priolist);
282                 } else {
283                         /*
284                          * If the request is not in the priolist queue because
285                          * it is not yet runnable, then it doesn't contribute
286                          * to our preemption decisions. On the other hand,
287                          * if the request is on the HW, it too is not in the
288                          * queue; but in that case we may still need to reorder
289                          * the inflight requests.
290                          */
291                         if (!i915_sw_fence_done(&node_to_request(node)->submit))
292                                 continue;
293                 }
294
295                 if (prio <= engine->execlists.queue_priority_hint)
296                         continue;
297
298                 engine->execlists.queue_priority_hint = prio;
299
300                 /*
301                  * If we are already the currently executing context, don't
302                  * bother evaluating if we should preempt ourselves.
303                  */
304                 if (inflight(node_to_request(node), engine))
305                         continue;
306
307                 /* Defer (tasklet) submission until after all of our updates. */
308                 tasklet_hi_schedule(&engine->execlists.tasklet);
309         }
310
311         spin_unlock(&engine->timeline.lock);
312 }
313
314 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
315 {
316         spin_lock_irq(&schedule_lock);
317         __i915_schedule(&rq->sched, attr);
318         spin_unlock_irq(&schedule_lock);
319 }
320
321 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
322 {
323         struct i915_sched_attr attr = node->attr;
324
325         attr.priority |= bump;
326         __i915_schedule(node, &attr);
327 }
328
329 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
330 {
331         unsigned long flags;
332
333         GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
334
335         if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
336                 return;
337
338         spin_lock_irqsave(&schedule_lock, flags);
339         __bump_priority(&rq->sched, bump);
340         spin_unlock_irqrestore(&schedule_lock, flags);
341 }
342
343 void i915_sched_node_init(struct i915_sched_node *node)
344 {
345         INIT_LIST_HEAD(&node->signalers_list);
346         INIT_LIST_HEAD(&node->waiters_list);
347         INIT_LIST_HEAD(&node->link);
348         node->attr.priority = I915_PRIORITY_INVALID;
349         node->semaphores = 0;
350         node->flags = 0;
351 }
352
353 static struct i915_dependency *
354 i915_dependency_alloc(void)
355 {
356         return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
357 }
358
359 static void
360 i915_dependency_free(struct i915_dependency *dep)
361 {
362         kmem_cache_free(global.slab_dependencies, dep);
363 }
364
365 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
366                                       struct i915_sched_node *signal,
367                                       struct i915_dependency *dep,
368                                       unsigned long flags)
369 {
370         bool ret = false;
371
372         spin_lock_irq(&schedule_lock);
373
374         if (!node_signaled(signal)) {
375                 INIT_LIST_HEAD(&dep->dfs_link);
376                 list_add(&dep->wait_link, &signal->waiters_list);
377                 list_add(&dep->signal_link, &node->signalers_list);
378                 dep->signaler = signal;
379                 dep->flags = flags;
380
381                 /* Keep track of whether anyone on this chain has a semaphore */
382                 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
383                     !node_started(signal))
384                         node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
385
386                 /*
387                  * As we do not allow WAIT to preempt inflight requests,
388                  * once we have executed a request, along with triggering
389                  * any execution callbacks, we must preserve its ordering
390                  * within the non-preemptible FIFO.
391                  */
392                 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
393                 if (flags & I915_DEPENDENCY_EXTERNAL)
394                         __bump_priority(signal, __NO_PREEMPTION);
395
396                 ret = true;
397         }
398
399         spin_unlock_irq(&schedule_lock);
400
401         return ret;
402 }
403
404 int i915_sched_node_add_dependency(struct i915_sched_node *node,
405                                    struct i915_sched_node *signal)
406 {
407         struct i915_dependency *dep;
408
409         dep = i915_dependency_alloc();
410         if (!dep)
411                 return -ENOMEM;
412
413         if (!__i915_sched_node_add_dependency(node, signal, dep,
414                                               I915_DEPENDENCY_EXTERNAL |
415                                               I915_DEPENDENCY_ALLOC))
416                 i915_dependency_free(dep);
417
418         return 0;
419 }
420
421 void i915_sched_node_fini(struct i915_sched_node *node)
422 {
423         struct i915_dependency *dep, *tmp;
424
425         GEM_BUG_ON(!list_empty(&node->link));
426
427         spin_lock_irq(&schedule_lock);
428
429         /*
430          * Everyone we depended upon (the fences we wait to be signaled)
431          * should retire before us and remove themselves from our list.
432          * However, retirement is run independently on each timeline and
433          * so we may be called out-of-order.
434          */
435         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
436                 GEM_BUG_ON(!node_signaled(dep->signaler));
437                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
438
439                 list_del(&dep->wait_link);
440                 if (dep->flags & I915_DEPENDENCY_ALLOC)
441                         i915_dependency_free(dep);
442         }
443
444         /* Remove ourselves from everyone who depends upon us */
445         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
446                 GEM_BUG_ON(dep->signaler != node);
447                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
448
449                 list_del(&dep->signal_link);
450                 if (dep->flags & I915_DEPENDENCY_ALLOC)
451                         i915_dependency_free(dep);
452         }
453
454         spin_unlock_irq(&schedule_lock);
455 }
456
457 static void i915_global_scheduler_shrink(void)
458 {
459         kmem_cache_shrink(global.slab_dependencies);
460         kmem_cache_shrink(global.slab_priorities);
461 }
462
463 static void i915_global_scheduler_exit(void)
464 {
465         kmem_cache_destroy(global.slab_dependencies);
466         kmem_cache_destroy(global.slab_priorities);
467 }
468
469 static struct i915_global_scheduler global = { {
470         .shrink = i915_global_scheduler_shrink,
471         .exit = i915_global_scheduler_exit,
472 } };
473
474 int __init i915_global_scheduler_init(void)
475 {
476         global.slab_dependencies = KMEM_CACHE(i915_dependency,
477                                               SLAB_HWCACHE_ALIGN);
478         if (!global.slab_dependencies)
479                 return -ENOMEM;
480
481         global.slab_priorities = KMEM_CACHE(i915_priolist,
482                                             SLAB_HWCACHE_ALIGN);
483         if (!global.slab_priorities)
484                 goto err_priorities;
485
486         i915_global_register(&global.base);
487         return 0;
488
489 err_priorities:
490         kmem_cache_destroy(global.slab_priorities);
491         return -ENOMEM;
492 }