Merge https://gitlab.freedesktop.org/drm/msm into drm-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_scheduler.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/mutex.h>
8
9 #include "i915_drv.h"
10 #include "i915_request.h"
11 #include "i915_scheduler.h"
12
13 static DEFINE_SPINLOCK(schedule_lock);
14
15 static const struct i915_request *
16 node_to_request(const struct i915_sched_node *node)
17 {
18         return container_of(node, const struct i915_request, sched);
19 }
20
21 static inline bool node_signaled(const struct i915_sched_node *node)
22 {
23         return i915_request_completed(node_to_request(node));
24 }
25
26 void i915_sched_node_init(struct i915_sched_node *node)
27 {
28         INIT_LIST_HEAD(&node->signalers_list);
29         INIT_LIST_HEAD(&node->waiters_list);
30         INIT_LIST_HEAD(&node->link);
31         node->attr.priority = I915_PRIORITY_INVALID;
32 }
33
34 static struct i915_dependency *
35 i915_dependency_alloc(struct drm_i915_private *i915)
36 {
37         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
38 }
39
40 static void
41 i915_dependency_free(struct drm_i915_private *i915,
42                      struct i915_dependency *dep)
43 {
44         kmem_cache_free(i915->dependencies, dep);
45 }
46
47 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
48                                       struct i915_sched_node *signal,
49                                       struct i915_dependency *dep,
50                                       unsigned long flags)
51 {
52         bool ret = false;
53
54         spin_lock(&schedule_lock);
55
56         if (!node_signaled(signal)) {
57                 INIT_LIST_HEAD(&dep->dfs_link);
58                 list_add(&dep->wait_link, &signal->waiters_list);
59                 list_add(&dep->signal_link, &node->signalers_list);
60                 dep->signaler = signal;
61                 dep->flags = flags;
62
63                 ret = true;
64         }
65
66         spin_unlock(&schedule_lock);
67
68         return ret;
69 }
70
71 int i915_sched_node_add_dependency(struct drm_i915_private *i915,
72                                    struct i915_sched_node *node,
73                                    struct i915_sched_node *signal)
74 {
75         struct i915_dependency *dep;
76
77         dep = i915_dependency_alloc(i915);
78         if (!dep)
79                 return -ENOMEM;
80
81         if (!__i915_sched_node_add_dependency(node, signal, dep,
82                                               I915_DEPENDENCY_ALLOC))
83                 i915_dependency_free(i915, dep);
84
85         return 0;
86 }
87
88 void i915_sched_node_fini(struct drm_i915_private *i915,
89                           struct i915_sched_node *node)
90 {
91         struct i915_dependency *dep, *tmp;
92
93         GEM_BUG_ON(!list_empty(&node->link));
94
95         spin_lock(&schedule_lock);
96
97         /*
98          * Everyone we depended upon (the fences we wait to be signaled)
99          * should retire before us and remove themselves from our list.
100          * However, retirement is run independently on each timeline and
101          * so we may be called out-of-order.
102          */
103         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
104                 GEM_BUG_ON(!node_signaled(dep->signaler));
105                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
106
107                 list_del(&dep->wait_link);
108                 if (dep->flags & I915_DEPENDENCY_ALLOC)
109                         i915_dependency_free(i915, dep);
110         }
111
112         /* Remove ourselves from everyone who depends upon us */
113         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
114                 GEM_BUG_ON(dep->signaler != node);
115                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
116
117                 list_del(&dep->signal_link);
118                 if (dep->flags & I915_DEPENDENCY_ALLOC)
119                         i915_dependency_free(i915, dep);
120         }
121
122         spin_unlock(&schedule_lock);
123 }
124
125 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
126 {
127         return rb_entry(rb, struct i915_priolist, node);
128 }
129
130 static void assert_priolists(struct intel_engine_execlists * const execlists)
131 {
132         struct rb_node *rb;
133         long last_prio, i;
134
135         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
136                 return;
137
138         GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
139                    rb_first(&execlists->queue.rb_root));
140
141         last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
142         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
143                 const struct i915_priolist *p = to_priolist(rb);
144
145                 GEM_BUG_ON(p->priority >= last_prio);
146                 last_prio = p->priority;
147
148                 GEM_BUG_ON(!p->used);
149                 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
150                         if (list_empty(&p->requests[i]))
151                                 continue;
152
153                         GEM_BUG_ON(!(p->used & BIT(i)));
154                 }
155         }
156 }
157
158 struct list_head *
159 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
160 {
161         struct intel_engine_execlists * const execlists = &engine->execlists;
162         struct i915_priolist *p;
163         struct rb_node **parent, *rb;
164         bool first = true;
165         int idx, i;
166
167         lockdep_assert_held(&engine->timeline.lock);
168         assert_priolists(execlists);
169
170         /* buckets sorted from highest [in slot 0] to lowest priority */
171         idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
172         prio >>= I915_USER_PRIORITY_SHIFT;
173         if (unlikely(execlists->no_priolist))
174                 prio = I915_PRIORITY_NORMAL;
175
176 find_priolist:
177         /* most positive priority is scheduled first, equal priorities fifo */
178         rb = NULL;
179         parent = &execlists->queue.rb_root.rb_node;
180         while (*parent) {
181                 rb = *parent;
182                 p = to_priolist(rb);
183                 if (prio > p->priority) {
184                         parent = &rb->rb_left;
185                 } else if (prio < p->priority) {
186                         parent = &rb->rb_right;
187                         first = false;
188                 } else {
189                         goto out;
190                 }
191         }
192
193         if (prio == I915_PRIORITY_NORMAL) {
194                 p = &execlists->default_priolist;
195         } else {
196                 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
197                 /* Convert an allocation failure to a priority bump */
198                 if (unlikely(!p)) {
199                         prio = I915_PRIORITY_NORMAL; /* recurses just once */
200
201                         /* To maintain ordering with all rendering, after an
202                          * allocation failure we have to disable all scheduling.
203                          * Requests will then be executed in fifo, and schedule
204                          * will ensure that dependencies are emitted in fifo.
205                          * There will be still some reordering with existing
206                          * requests, so if userspace lied about their
207                          * dependencies that reordering may be visible.
208                          */
209                         execlists->no_priolist = true;
210                         goto find_priolist;
211                 }
212         }
213
214         p->priority = prio;
215         for (i = 0; i < ARRAY_SIZE(p->requests); i++)
216                 INIT_LIST_HEAD(&p->requests[i]);
217         rb_link_node(&p->node, rb, parent);
218         rb_insert_color_cached(&p->node, &execlists->queue, first);
219         p->used = 0;
220
221 out:
222         p->used |= BIT(idx);
223         return &p->requests[idx];
224 }
225
226 static struct intel_engine_cs *
227 sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
228 {
229         struct intel_engine_cs *engine = node_to_request(node)->engine;
230
231         GEM_BUG_ON(!locked);
232
233         if (engine != locked) {
234                 spin_unlock(&locked->timeline.lock);
235                 spin_lock(&engine->timeline.lock);
236         }
237
238         return engine;
239 }
240
241 static bool inflight(const struct i915_request *rq,
242                      const struct intel_engine_cs *engine)
243 {
244         const struct i915_request *active;
245
246         if (!i915_request_is_active(rq))
247                 return false;
248
249         active = port_request(engine->execlists.port);
250         return active->hw_context == rq->hw_context;
251 }
252
253 static void __i915_schedule(struct i915_request *rq,
254                             const struct i915_sched_attr *attr)
255 {
256         struct list_head *uninitialized_var(pl);
257         struct intel_engine_cs *engine, *last;
258         struct i915_dependency *dep, *p;
259         struct i915_dependency stack;
260         const int prio = attr->priority;
261         LIST_HEAD(dfs);
262
263         /* Needed in order to use the temporary link inside i915_dependency */
264         lockdep_assert_held(&schedule_lock);
265         GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
266
267         if (i915_request_completed(rq))
268                 return;
269
270         if (prio <= READ_ONCE(rq->sched.attr.priority))
271                 return;
272
273         stack.signaler = &rq->sched;
274         list_add(&stack.dfs_link, &dfs);
275
276         /*
277          * Recursively bump all dependent priorities to match the new request.
278          *
279          * A naive approach would be to use recursion:
280          * static void update_priorities(struct i915_sched_node *node, prio) {
281          *      list_for_each_entry(dep, &node->signalers_list, signal_link)
282          *              update_priorities(dep->signal, prio)
283          *      queue_request(node);
284          * }
285          * but that may have unlimited recursion depth and so runs a very
286          * real risk of overunning the kernel stack. Instead, we build
287          * a flat list of all dependencies starting with the current request.
288          * As we walk the list of dependencies, we add all of its dependencies
289          * to the end of the list (this may include an already visited
290          * request) and continue to walk onwards onto the new dependencies. The
291          * end result is a topological list of requests in reverse order, the
292          * last element in the list is the request we must execute first.
293          */
294         list_for_each_entry(dep, &dfs, dfs_link) {
295                 struct i915_sched_node *node = dep->signaler;
296
297                 /*
298                  * Within an engine, there can be no cycle, but we may
299                  * refer to the same dependency chain multiple times
300                  * (redundant dependencies are not eliminated) and across
301                  * engines.
302                  */
303                 list_for_each_entry(p, &node->signalers_list, signal_link) {
304                         GEM_BUG_ON(p == dep); /* no cycles! */
305
306                         if (node_signaled(p->signaler))
307                                 continue;
308
309                         GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
310                         if (prio > READ_ONCE(p->signaler->attr.priority))
311                                 list_move_tail(&p->dfs_link, &dfs);
312                 }
313         }
314
315         /*
316          * If we didn't need to bump any existing priorities, and we haven't
317          * yet submitted this request (i.e. there is no potential race with
318          * execlists_submit_request()), we can set our own priority and skip
319          * acquiring the engine locks.
320          */
321         if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
322                 GEM_BUG_ON(!list_empty(&rq->sched.link));
323                 rq->sched.attr = *attr;
324
325                 if (stack.dfs_link.next == stack.dfs_link.prev)
326                         return;
327
328                 __list_del_entry(&stack.dfs_link);
329         }
330
331         last = NULL;
332         engine = rq->engine;
333         spin_lock_irq(&engine->timeline.lock);
334
335         /* Fifo and depth-first replacement ensure our deps execute before us */
336         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
337                 struct i915_sched_node *node = dep->signaler;
338
339                 INIT_LIST_HEAD(&dep->dfs_link);
340
341                 engine = sched_lock_engine(node, engine);
342                 lockdep_assert_held(&engine->timeline.lock);
343
344                 /* Recheck after acquiring the engine->timeline.lock */
345                 if (prio <= node->attr.priority || node_signaled(node))
346                         continue;
347
348                 node->attr.priority = prio;
349                 if (!list_empty(&node->link)) {
350                         if (last != engine) {
351                                 pl = i915_sched_lookup_priolist(engine, prio);
352                                 last = engine;
353                         }
354                         list_move_tail(&node->link, pl);
355                 } else {
356                         /*
357                          * If the request is not in the priolist queue because
358                          * it is not yet runnable, then it doesn't contribute
359                          * to our preemption decisions. On the other hand,
360                          * if the request is on the HW, it too is not in the
361                          * queue; but in that case we may still need to reorder
362                          * the inflight requests.
363                          */
364                         if (!i915_sw_fence_done(&node_to_request(node)->submit))
365                                 continue;
366                 }
367
368                 if (prio <= engine->execlists.queue_priority_hint)
369                         continue;
370
371                 engine->execlists.queue_priority_hint = prio;
372
373                 /*
374                  * If we are already the currently executing context, don't
375                  * bother evaluating if we should preempt ourselves.
376                  */
377                 if (inflight(node_to_request(node), engine))
378                         continue;
379
380                 /* Defer (tasklet) submission until after all of our updates. */
381                 tasklet_hi_schedule(&engine->execlists.tasklet);
382         }
383
384         spin_unlock_irq(&engine->timeline.lock);
385 }
386
387 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
388 {
389         spin_lock(&schedule_lock);
390         __i915_schedule(rq, attr);
391         spin_unlock(&schedule_lock);
392 }
393
394 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
395 {
396         struct i915_sched_attr attr;
397
398         GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
399
400         if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
401                 return;
402
403         spin_lock_bh(&schedule_lock);
404
405         attr = rq->sched.attr;
406         attr.priority |= bump;
407         __i915_schedule(rq, &attr);
408
409         spin_unlock_bh(&schedule_lock);
410 }