Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_active.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include <linux/debugobjects.h>
8
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
13
14 #include "i915_drv.h"
15 #include "i915_active.h"
16 #include "i915_globals.h"
17
18 /*
19  * Active refs memory management
20  *
21  * To be more economical with memory, we reap all the i915_active trees as
22  * they idle (when we know the active requests are inactive) and allocate the
23  * nodes from a local slab cache to hopefully reduce the fragmentation.
24  */
25 static struct i915_global_active {
26         struct i915_global base;
27         struct kmem_cache *slab_cache;
28 } global;
29
30 struct active_node {
31         struct rb_node node;
32         struct i915_active_fence base;
33         struct i915_active *ref;
34         u64 timeline;
35 };
36
37 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
38
39 static inline struct active_node *
40 node_from_active(struct i915_active_fence *active)
41 {
42         return container_of(active, struct active_node, base);
43 }
44
45 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
46
47 static inline bool is_barrier(const struct i915_active_fence *active)
48 {
49         return IS_ERR(rcu_access_pointer(active->fence));
50 }
51
52 static inline struct llist_node *barrier_to_ll(struct active_node *node)
53 {
54         GEM_BUG_ON(!is_barrier(&node->base));
55         return (struct llist_node *)&node->base.cb.node;
56 }
57
58 static inline struct intel_engine_cs *
59 __barrier_to_engine(struct active_node *node)
60 {
61         return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62 }
63
64 static inline struct intel_engine_cs *
65 barrier_to_engine(struct active_node *node)
66 {
67         GEM_BUG_ON(!is_barrier(&node->base));
68         return __barrier_to_engine(node);
69 }
70
71 static inline struct active_node *barrier_from_ll(struct llist_node *x)
72 {
73         return container_of((struct list_head *)x,
74                             struct active_node, base.cb.node);
75 }
76
77 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
78
79 static void *active_debug_hint(void *addr)
80 {
81         struct i915_active *ref = addr;
82
83         return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84 }
85
86 static const struct debug_obj_descr active_debug_desc = {
87         .name = "i915_active",
88         .debug_hint = active_debug_hint,
89 };
90
91 static void debug_active_init(struct i915_active *ref)
92 {
93         debug_object_init(ref, &active_debug_desc);
94 }
95
96 static void debug_active_activate(struct i915_active *ref)
97 {
98         lockdep_assert_held(&ref->tree_lock);
99         if (!atomic_read(&ref->count)) /* before the first inc */
100                 debug_object_activate(ref, &active_debug_desc);
101 }
102
103 static void debug_active_deactivate(struct i915_active *ref)
104 {
105         lockdep_assert_held(&ref->tree_lock);
106         if (!atomic_read(&ref->count)) /* after the last dec */
107                 debug_object_deactivate(ref, &active_debug_desc);
108 }
109
110 static void debug_active_fini(struct i915_active *ref)
111 {
112         debug_object_free(ref, &active_debug_desc);
113 }
114
115 static void debug_active_assert(struct i915_active *ref)
116 {
117         debug_object_assert_init(ref, &active_debug_desc);
118 }
119
120 #else
121
122 static inline void debug_active_init(struct i915_active *ref) { }
123 static inline void debug_active_activate(struct i915_active *ref) { }
124 static inline void debug_active_deactivate(struct i915_active *ref) { }
125 static inline void debug_active_fini(struct i915_active *ref) { }
126 static inline void debug_active_assert(struct i915_active *ref) { }
127
128 #endif
129
130 static void
131 __active_retire(struct i915_active *ref)
132 {
133         struct rb_root root = RB_ROOT;
134         struct active_node *it, *n;
135         unsigned long flags;
136
137         GEM_BUG_ON(i915_active_is_idle(ref));
138
139         /* return the unused nodes to our slabcache -- flushing the allocator */
140         if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141                 return;
142
143         GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144         debug_active_deactivate(ref);
145
146         /* Even if we have not used the cache, we may still have a barrier */
147         if (!ref->cache)
148                 ref->cache = fetch_node(ref->tree.rb_node);
149
150         /* Keep the MRU cached node for reuse */
151         if (ref->cache) {
152                 /* Discard all other nodes in the tree */
153                 rb_erase(&ref->cache->node, &ref->tree);
154                 root = ref->tree;
155
156                 /* Rebuild the tree with only the cached node */
157                 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158                 rb_insert_color(&ref->cache->node, &ref->tree);
159                 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160
161                 /* Make the cached node available for reuse with any timeline */
162                 if (IS_ENABLED(CONFIG_64BIT))
163                         ref->cache->timeline = 0; /* needs cmpxchg(u64) */
164         }
165
166         spin_unlock_irqrestore(&ref->tree_lock, flags);
167
168         /* After the final retire, the entire struct may be freed */
169         if (ref->retire)
170                 ref->retire(ref);
171
172         /* ... except if you wait on it, you must manage your own references! */
173         wake_up_var(ref);
174
175         /* Finally free the discarded timeline tree  */
176         rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
177                 GEM_BUG_ON(i915_active_fence_isset(&it->base));
178                 kmem_cache_free(global.slab_cache, it);
179         }
180 }
181
182 static void
183 active_work(struct work_struct *wrk)
184 {
185         struct i915_active *ref = container_of(wrk, typeof(*ref), work);
186
187         GEM_BUG_ON(!atomic_read(&ref->count));
188         if (atomic_add_unless(&ref->count, -1, 1))
189                 return;
190
191         __active_retire(ref);
192 }
193
194 static void
195 active_retire(struct i915_active *ref)
196 {
197         GEM_BUG_ON(!atomic_read(&ref->count));
198         if (atomic_add_unless(&ref->count, -1, 1))
199                 return;
200
201         if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
202                 queue_work(system_unbound_wq, &ref->work);
203                 return;
204         }
205
206         __active_retire(ref);
207 }
208
209 static inline struct dma_fence **
210 __active_fence_slot(struct i915_active_fence *active)
211 {
212         return (struct dma_fence ** __force)&active->fence;
213 }
214
215 static inline bool
216 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
217 {
218         struct i915_active_fence *active =
219                 container_of(cb, typeof(*active), cb);
220
221         return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
222 }
223
224 static void
225 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
226 {
227         if (active_fence_cb(fence, cb))
228                 active_retire(container_of(cb, struct active_node, base.cb)->ref);
229 }
230
231 static void
232 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
233 {
234         if (active_fence_cb(fence, cb))
235                 active_retire(container_of(cb, struct i915_active, excl.cb));
236 }
237
238 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
239 {
240         struct active_node *it;
241
242         GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
243
244         /*
245          * We track the most recently used timeline to skip a rbtree search
246          * for the common case, under typical loads we never need the rbtree
247          * at all. We can reuse the last slot if it is empty, that is
248          * after the previous activity has been retired, or if it matches the
249          * current timeline.
250          */
251         it = READ_ONCE(ref->cache);
252         if (it) {
253                 u64 cached = READ_ONCE(it->timeline);
254
255                 /* Once claimed, this slot will only belong to this idx */
256                 if (cached == idx)
257                         return it;
258
259 #ifdef CONFIG_64BIT /* for cmpxchg(u64) */
260                 /*
261                  * An unclaimed cache [.timeline=0] can only be claimed once.
262                  *
263                  * If the value is already non-zero, some other thread has
264                  * claimed the cache and we know that is does not match our
265                  * idx. If, and only if, the timeline is currently zero is it
266                  * worth competing to claim it atomically for ourselves (for
267                  * only the winner of that race will cmpxchg return the old
268                  * value of 0).
269                  */
270                 if (!cached && !cmpxchg(&it->timeline, 0, idx))
271                         return it;
272 #endif
273         }
274
275         BUILD_BUG_ON(offsetof(typeof(*it), node));
276
277         /* While active, the tree can only be built; not destroyed */
278         GEM_BUG_ON(i915_active_is_idle(ref));
279
280         it = fetch_node(ref->tree.rb_node);
281         while (it) {
282                 if (it->timeline < idx) {
283                         it = fetch_node(it->node.rb_right);
284                 } else if (it->timeline > idx) {
285                         it = fetch_node(it->node.rb_left);
286                 } else {
287                         WRITE_ONCE(ref->cache, it);
288                         break;
289                 }
290         }
291
292         /* NB: If the tree rotated beneath us, we may miss our target. */
293         return it;
294 }
295
296 static struct i915_active_fence *
297 active_instance(struct i915_active *ref, u64 idx)
298 {
299         struct active_node *node, *prealloc;
300         struct rb_node **p, *parent;
301
302         node = __active_lookup(ref, idx);
303         if (likely(node))
304                 return &node->base;
305
306         /* Preallocate a replacement, just in case */
307         prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
308         if (!prealloc)
309                 return NULL;
310
311         spin_lock_irq(&ref->tree_lock);
312         GEM_BUG_ON(i915_active_is_idle(ref));
313
314         parent = NULL;
315         p = &ref->tree.rb_node;
316         while (*p) {
317                 parent = *p;
318
319                 node = rb_entry(parent, struct active_node, node);
320                 if (node->timeline == idx) {
321                         kmem_cache_free(global.slab_cache, prealloc);
322                         goto out;
323                 }
324
325                 if (node->timeline < idx)
326                         p = &parent->rb_right;
327                 else
328                         p = &parent->rb_left;
329         }
330
331         node = prealloc;
332         __i915_active_fence_init(&node->base, NULL, node_retire);
333         node->ref = ref;
334         node->timeline = idx;
335
336         rb_link_node(&node->node, parent, p);
337         rb_insert_color(&node->node, &ref->tree);
338
339 out:
340         WRITE_ONCE(ref->cache, node);
341         spin_unlock_irq(&ref->tree_lock);
342
343         return &node->base;
344 }
345
346 void __i915_active_init(struct i915_active *ref,
347                         int (*active)(struct i915_active *ref),
348                         void (*retire)(struct i915_active *ref),
349                         struct lock_class_key *mkey,
350                         struct lock_class_key *wkey)
351 {
352         unsigned long bits;
353
354         debug_active_init(ref);
355
356         ref->flags = 0;
357         ref->active = active;
358         ref->retire = ptr_unpack_bits(retire, &bits, 2);
359         if (bits & I915_ACTIVE_MAY_SLEEP)
360                 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
361
362         spin_lock_init(&ref->tree_lock);
363         ref->tree = RB_ROOT;
364         ref->cache = NULL;
365
366         init_llist_head(&ref->preallocated_barriers);
367         atomic_set(&ref->count, 0);
368         __mutex_init(&ref->mutex, "i915_active", mkey);
369         __i915_active_fence_init(&ref->excl, NULL, excl_retire);
370         INIT_WORK(&ref->work, active_work);
371 #if IS_ENABLED(CONFIG_LOCKDEP)
372         lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
373 #endif
374 }
375
376 static bool ____active_del_barrier(struct i915_active *ref,
377                                    struct active_node *node,
378                                    struct intel_engine_cs *engine)
379
380 {
381         struct llist_node *head = NULL, *tail = NULL;
382         struct llist_node *pos, *next;
383
384         GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
385
386         /*
387          * Rebuild the llist excluding our node. We may perform this
388          * outside of the kernel_context timeline mutex and so someone
389          * else may be manipulating the engine->barrier_tasks, in
390          * which case either we or they will be upset :)
391          *
392          * A second __active_del_barrier() will report failure to claim
393          * the active_node and the caller will just shrug and know not to
394          * claim ownership of its node.
395          *
396          * A concurrent i915_request_add_active_barriers() will miss adding
397          * any of the tasks, but we will try again on the next -- and since
398          * we are actively using the barrier, we know that there will be
399          * at least another opportunity when we idle.
400          */
401         llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
402                 if (node == barrier_from_ll(pos)) {
403                         node = NULL;
404                         continue;
405                 }
406
407                 pos->next = head;
408                 head = pos;
409                 if (!tail)
410                         tail = pos;
411         }
412         if (head)
413                 llist_add_batch(head, tail, &engine->barrier_tasks);
414
415         return !node;
416 }
417
418 static bool
419 __active_del_barrier(struct i915_active *ref, struct active_node *node)
420 {
421         return ____active_del_barrier(ref, node, barrier_to_engine(node));
422 }
423
424 static bool
425 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
426 {
427         if (!is_barrier(active)) /* proto-node used by our idle barrier? */
428                 return false;
429
430         /*
431          * This request is on the kernel_context timeline, and so
432          * we can use it to substitute for the pending idle-barrer
433          * request that we want to emit on the kernel_context.
434          */
435         __active_del_barrier(ref, node_from_active(active));
436         return true;
437 }
438
439 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
440 {
441         struct i915_active_fence *active;
442         int err;
443
444         /* Prevent reaping in case we malloc/wait while building the tree */
445         err = i915_active_acquire(ref);
446         if (err)
447                 return err;
448
449         active = active_instance(ref, idx);
450         if (!active) {
451                 err = -ENOMEM;
452                 goto out;
453         }
454
455         if (replace_barrier(ref, active)) {
456                 RCU_INIT_POINTER(active->fence, NULL);
457                 atomic_dec(&ref->count);
458         }
459         if (!__i915_active_fence_set(active, fence))
460                 __i915_active_acquire(ref);
461
462 out:
463         i915_active_release(ref);
464         return err;
465 }
466
467 static struct dma_fence *
468 __i915_active_set_fence(struct i915_active *ref,
469                         struct i915_active_fence *active,
470                         struct dma_fence *fence)
471 {
472         struct dma_fence *prev;
473
474         if (replace_barrier(ref, active)) {
475                 RCU_INIT_POINTER(active->fence, fence);
476                 return NULL;
477         }
478
479         rcu_read_lock();
480         prev = __i915_active_fence_set(active, fence);
481         if (prev)
482                 prev = dma_fence_get_rcu(prev);
483         else
484                 __i915_active_acquire(ref);
485         rcu_read_unlock();
486
487         return prev;
488 }
489
490 static struct i915_active_fence *
491 __active_fence(struct i915_active *ref, u64 idx)
492 {
493         struct active_node *it;
494
495         it = __active_lookup(ref, idx);
496         if (unlikely(!it)) { /* Contention with parallel tree builders! */
497                 spin_lock_irq(&ref->tree_lock);
498                 it = __active_lookup(ref, idx);
499                 spin_unlock_irq(&ref->tree_lock);
500         }
501         GEM_BUG_ON(!it); /* slot must be preallocated */
502
503         return &it->base;
504 }
505
506 struct dma_fence *
507 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
508 {
509         /* Only valid while active, see i915_active_acquire_for_context() */
510         return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
511 }
512
513 struct dma_fence *
514 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
515 {
516         /* We expect the caller to manage the exclusive timeline ordering */
517         return __i915_active_set_fence(ref, &ref->excl, f);
518 }
519
520 bool i915_active_acquire_if_busy(struct i915_active *ref)
521 {
522         debug_active_assert(ref);
523         return atomic_add_unless(&ref->count, 1, 0);
524 }
525
526 static void __i915_active_activate(struct i915_active *ref)
527 {
528         spin_lock_irq(&ref->tree_lock); /* __active_retire() */
529         if (!atomic_fetch_inc(&ref->count))
530                 debug_active_activate(ref);
531         spin_unlock_irq(&ref->tree_lock);
532 }
533
534 int i915_active_acquire(struct i915_active *ref)
535 {
536         int err;
537
538         if (i915_active_acquire_if_busy(ref))
539                 return 0;
540
541         if (!ref->active) {
542                 __i915_active_activate(ref);
543                 return 0;
544         }
545
546         err = mutex_lock_interruptible(&ref->mutex);
547         if (err)
548                 return err;
549
550         if (likely(!i915_active_acquire_if_busy(ref))) {
551                 err = ref->active(ref);
552                 if (!err)
553                         __i915_active_activate(ref);
554         }
555
556         mutex_unlock(&ref->mutex);
557
558         return err;
559 }
560
561 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
562 {
563         struct i915_active_fence *active;
564         int err;
565
566         err = i915_active_acquire(ref);
567         if (err)
568                 return err;
569
570         active = active_instance(ref, idx);
571         if (!active) {
572                 i915_active_release(ref);
573                 return -ENOMEM;
574         }
575
576         return 0; /* return with active ref */
577 }
578
579 void i915_active_release(struct i915_active *ref)
580 {
581         debug_active_assert(ref);
582         active_retire(ref);
583 }
584
585 static void enable_signaling(struct i915_active_fence *active)
586 {
587         struct dma_fence *fence;
588
589         if (unlikely(is_barrier(active)))
590                 return;
591
592         fence = i915_active_fence_get(active);
593         if (!fence)
594                 return;
595
596         dma_fence_enable_sw_signaling(fence);
597         dma_fence_put(fence);
598 }
599
600 static int flush_barrier(struct active_node *it)
601 {
602         struct intel_engine_cs *engine;
603
604         if (likely(!is_barrier(&it->base)))
605                 return 0;
606
607         engine = __barrier_to_engine(it);
608         smp_rmb(); /* serialise with add_active_barriers */
609         if (!is_barrier(&it->base))
610                 return 0;
611
612         return intel_engine_flush_barriers(engine);
613 }
614
615 static int flush_lazy_signals(struct i915_active *ref)
616 {
617         struct active_node *it, *n;
618         int err = 0;
619
620         enable_signaling(&ref->excl);
621         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
622                 err = flush_barrier(it); /* unconnected idle barrier? */
623                 if (err)
624                         break;
625
626                 enable_signaling(&it->base);
627         }
628
629         return err;
630 }
631
632 int __i915_active_wait(struct i915_active *ref, int state)
633 {
634         int err;
635
636         might_sleep();
637
638         if (!i915_active_acquire_if_busy(ref))
639                 return 0;
640
641         /* Any fence added after the wait begins will not be auto-signaled */
642         err = flush_lazy_signals(ref);
643         i915_active_release(ref);
644         if (err)
645                 return err;
646
647         if (!i915_active_is_idle(ref) &&
648             ___wait_var_event(ref, i915_active_is_idle(ref),
649                               state, 0, 0, schedule()))
650                 return -EINTR;
651
652         flush_work(&ref->work);
653         return 0;
654 }
655
656 static int __await_active(struct i915_active_fence *active,
657                           int (*fn)(void *arg, struct dma_fence *fence),
658                           void *arg)
659 {
660         struct dma_fence *fence;
661
662         if (is_barrier(active)) /* XXX flush the barrier? */
663                 return 0;
664
665         fence = i915_active_fence_get(active);
666         if (fence) {
667                 int err;
668
669                 err = fn(arg, fence);
670                 dma_fence_put(fence);
671                 if (err < 0)
672                         return err;
673         }
674
675         return 0;
676 }
677
678 struct wait_barrier {
679         struct wait_queue_entry base;
680         struct i915_active *ref;
681 };
682
683 static int
684 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
685 {
686         struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
687
688         if (i915_active_is_idle(wb->ref)) {
689                 list_del(&wq->entry);
690                 i915_sw_fence_complete(wq->private);
691                 kfree(wq);
692         }
693
694         return 0;
695 }
696
697 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
698 {
699         struct wait_barrier *wb;
700
701         wb = kmalloc(sizeof(*wb), GFP_KERNEL);
702         if (unlikely(!wb))
703                 return -ENOMEM;
704
705         GEM_BUG_ON(i915_active_is_idle(ref));
706         if (!i915_sw_fence_await(fence)) {
707                 kfree(wb);
708                 return -EINVAL;
709         }
710
711         wb->base.flags = 0;
712         wb->base.func = barrier_wake;
713         wb->base.private = fence;
714         wb->ref = ref;
715
716         add_wait_queue(__var_waitqueue(ref), &wb->base);
717         return 0;
718 }
719
720 static int await_active(struct i915_active *ref,
721                         unsigned int flags,
722                         int (*fn)(void *arg, struct dma_fence *fence),
723                         void *arg, struct i915_sw_fence *barrier)
724 {
725         int err = 0;
726
727         if (!i915_active_acquire_if_busy(ref))
728                 return 0;
729
730         if (flags & I915_ACTIVE_AWAIT_EXCL &&
731             rcu_access_pointer(ref->excl.fence)) {
732                 err = __await_active(&ref->excl, fn, arg);
733                 if (err)
734                         goto out;
735         }
736
737         if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
738                 struct active_node *it, *n;
739
740                 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
741                         err = __await_active(&it->base, fn, arg);
742                         if (err)
743                                 goto out;
744                 }
745         }
746
747         if (flags & I915_ACTIVE_AWAIT_BARRIER) {
748                 err = flush_lazy_signals(ref);
749                 if (err)
750                         goto out;
751
752                 err = __await_barrier(ref, barrier);
753                 if (err)
754                         goto out;
755         }
756
757 out:
758         i915_active_release(ref);
759         return err;
760 }
761
762 static int rq_await_fence(void *arg, struct dma_fence *fence)
763 {
764         return i915_request_await_dma_fence(arg, fence);
765 }
766
767 int i915_request_await_active(struct i915_request *rq,
768                               struct i915_active *ref,
769                               unsigned int flags)
770 {
771         return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
772 }
773
774 static int sw_await_fence(void *arg, struct dma_fence *fence)
775 {
776         return i915_sw_fence_await_dma_fence(arg, fence, 0,
777                                              GFP_NOWAIT | __GFP_NOWARN);
778 }
779
780 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
781                                struct i915_active *ref,
782                                unsigned int flags)
783 {
784         return await_active(ref, flags, sw_await_fence, fence, fence);
785 }
786
787 void i915_active_fini(struct i915_active *ref)
788 {
789         debug_active_fini(ref);
790         GEM_BUG_ON(atomic_read(&ref->count));
791         GEM_BUG_ON(work_pending(&ref->work));
792         mutex_destroy(&ref->mutex);
793
794         if (ref->cache)
795                 kmem_cache_free(global.slab_cache, ref->cache);
796 }
797
798 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
799 {
800         return node->timeline == idx && !i915_active_fence_isset(&node->base);
801 }
802
803 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
804 {
805         struct rb_node *prev, *p;
806
807         if (RB_EMPTY_ROOT(&ref->tree))
808                 return NULL;
809
810         GEM_BUG_ON(i915_active_is_idle(ref));
811
812         /*
813          * Try to reuse any existing barrier nodes already allocated for this
814          * i915_active, due to overlapping active phases there is likely a
815          * node kept alive (as we reuse before parking). We prefer to reuse
816          * completely idle barriers (less hassle in manipulating the llists),
817          * but otherwise any will do.
818          */
819         if (ref->cache && is_idle_barrier(ref->cache, idx)) {
820                 p = &ref->cache->node;
821                 goto match;
822         }
823
824         prev = NULL;
825         p = ref->tree.rb_node;
826         while (p) {
827                 struct active_node *node =
828                         rb_entry(p, struct active_node, node);
829
830                 if (is_idle_barrier(node, idx))
831                         goto match;
832
833                 prev = p;
834                 if (node->timeline < idx)
835                         p = READ_ONCE(p->rb_right);
836                 else
837                         p = READ_ONCE(p->rb_left);
838         }
839
840         /*
841          * No quick match, but we did find the leftmost rb_node for the
842          * kernel_context. Walk the rb_tree in-order to see if there were
843          * any idle-barriers on this timeline that we missed, or just use
844          * the first pending barrier.
845          */
846         for (p = prev; p; p = rb_next(p)) {
847                 struct active_node *node =
848                         rb_entry(p, struct active_node, node);
849                 struct intel_engine_cs *engine;
850
851                 if (node->timeline > idx)
852                         break;
853
854                 if (node->timeline < idx)
855                         continue;
856
857                 if (is_idle_barrier(node, idx))
858                         goto match;
859
860                 /*
861                  * The list of pending barriers is protected by the
862                  * kernel_context timeline, which notably we do not hold
863                  * here. i915_request_add_active_barriers() may consume
864                  * the barrier before we claim it, so we have to check
865                  * for success.
866                  */
867                 engine = __barrier_to_engine(node);
868                 smp_rmb(); /* serialise with add_active_barriers */
869                 if (is_barrier(&node->base) &&
870                     ____active_del_barrier(ref, node, engine))
871                         goto match;
872         }
873
874         return NULL;
875
876 match:
877         spin_lock_irq(&ref->tree_lock);
878         rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
879         if (p == &ref->cache->node)
880                 WRITE_ONCE(ref->cache, NULL);
881         spin_unlock_irq(&ref->tree_lock);
882
883         return rb_entry(p, struct active_node, node);
884 }
885
886 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
887                                             struct intel_engine_cs *engine)
888 {
889         intel_engine_mask_t tmp, mask = engine->mask;
890         struct llist_node *first = NULL, *last = NULL;
891         struct intel_gt *gt = engine->gt;
892
893         GEM_BUG_ON(i915_active_is_idle(ref));
894
895         /* Wait until the previous preallocation is completed */
896         while (!llist_empty(&ref->preallocated_barriers))
897                 cond_resched();
898
899         /*
900          * Preallocate a node for each physical engine supporting the target
901          * engine (remember virtual engines have more than one sibling).
902          * We can then use the preallocated nodes in
903          * i915_active_acquire_barrier()
904          */
905         GEM_BUG_ON(!mask);
906         for_each_engine_masked(engine, gt, mask, tmp) {
907                 u64 idx = engine->kernel_context->timeline->fence_context;
908                 struct llist_node *prev = first;
909                 struct active_node *node;
910
911                 rcu_read_lock();
912                 node = reuse_idle_barrier(ref, idx);
913                 rcu_read_unlock();
914                 if (!node) {
915                         node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
916                         if (!node)
917                                 goto unwind;
918
919                         RCU_INIT_POINTER(node->base.fence, NULL);
920                         node->base.cb.func = node_retire;
921                         node->timeline = idx;
922                         node->ref = ref;
923                 }
924
925                 if (!i915_active_fence_isset(&node->base)) {
926                         /*
927                          * Mark this as being *our* unconnected proto-node.
928                          *
929                          * Since this node is not in any list, and we have
930                          * decoupled it from the rbtree, we can reuse the
931                          * request to indicate this is an idle-barrier node
932                          * and then we can use the rb_node and list pointers
933                          * for our tracking of the pending barrier.
934                          */
935                         RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
936                         node->base.cb.node.prev = (void *)engine;
937                         __i915_active_acquire(ref);
938                 }
939                 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
940
941                 GEM_BUG_ON(barrier_to_engine(node) != engine);
942                 first = barrier_to_ll(node);
943                 first->next = prev;
944                 if (!last)
945                         last = first;
946                 intel_engine_pm_get(engine);
947         }
948
949         GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
950         llist_add_batch(first, last, &ref->preallocated_barriers);
951
952         return 0;
953
954 unwind:
955         while (first) {
956                 struct active_node *node = barrier_from_ll(first);
957
958                 first = first->next;
959
960                 atomic_dec(&ref->count);
961                 intel_engine_pm_put(barrier_to_engine(node));
962
963                 kmem_cache_free(global.slab_cache, node);
964         }
965         return -ENOMEM;
966 }
967
968 void i915_active_acquire_barrier(struct i915_active *ref)
969 {
970         struct llist_node *pos, *next;
971         unsigned long flags;
972
973         GEM_BUG_ON(i915_active_is_idle(ref));
974
975         /*
976          * Transfer the list of preallocated barriers into the
977          * i915_active rbtree, but only as proto-nodes. They will be
978          * populated by i915_request_add_active_barriers() to point to the
979          * request that will eventually release them.
980          */
981         llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
982                 struct active_node *node = barrier_from_ll(pos);
983                 struct intel_engine_cs *engine = barrier_to_engine(node);
984                 struct rb_node **p, *parent;
985
986                 spin_lock_irqsave_nested(&ref->tree_lock, flags,
987                                          SINGLE_DEPTH_NESTING);
988                 parent = NULL;
989                 p = &ref->tree.rb_node;
990                 while (*p) {
991                         struct active_node *it;
992
993                         parent = *p;
994
995                         it = rb_entry(parent, struct active_node, node);
996                         if (it->timeline < node->timeline)
997                                 p = &parent->rb_right;
998                         else
999                                 p = &parent->rb_left;
1000                 }
1001                 rb_link_node(&node->node, parent, p);
1002                 rb_insert_color(&node->node, &ref->tree);
1003                 spin_unlock_irqrestore(&ref->tree_lock, flags);
1004
1005                 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1006                 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1007                 intel_engine_pm_put_delay(engine, 1);
1008         }
1009 }
1010
1011 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1012 {
1013         return __active_fence_slot(&barrier_from_ll(node)->base);
1014 }
1015
1016 void i915_request_add_active_barriers(struct i915_request *rq)
1017 {
1018         struct intel_engine_cs *engine = rq->engine;
1019         struct llist_node *node, *next;
1020         unsigned long flags;
1021
1022         GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1023         GEM_BUG_ON(intel_engine_is_virtual(engine));
1024         GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1025
1026         node = llist_del_all(&engine->barrier_tasks);
1027         if (!node)
1028                 return;
1029         /*
1030          * Attach the list of proto-fences to the in-flight request such
1031          * that the parent i915_active will be released when this request
1032          * is retired.
1033          */
1034         spin_lock_irqsave(&rq->lock, flags);
1035         llist_for_each_safe(node, next, node) {
1036                 /* serialise with reuse_idle_barrier */
1037                 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1038                 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1039         }
1040         spin_unlock_irqrestore(&rq->lock, flags);
1041 }
1042
1043 /*
1044  * __i915_active_fence_set: Update the last active fence along its timeline
1045  * @active: the active tracker
1046  * @fence: the new fence (under construction)
1047  *
1048  * Records the new @fence as the last active fence along its timeline in
1049  * this active tracker, moving the tracking callbacks from the previous
1050  * fence onto this one. Returns the previous fence (if not already completed),
1051  * which the caller must ensure is executed before the new fence. To ensure
1052  * that the order of fences within the timeline of the i915_active_fence is
1053  * understood, it should be locked by the caller.
1054  */
1055 struct dma_fence *
1056 __i915_active_fence_set(struct i915_active_fence *active,
1057                         struct dma_fence *fence)
1058 {
1059         struct dma_fence *prev;
1060         unsigned long flags;
1061
1062         if (fence == rcu_access_pointer(active->fence))
1063                 return fence;
1064
1065         GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1066
1067         /*
1068          * Consider that we have two threads arriving (A and B), with
1069          * C already resident as the active->fence.
1070          *
1071          * A does the xchg first, and so it sees C or NULL depending
1072          * on the timing of the interrupt handler. If it is NULL, the
1073          * previous fence must have been signaled and we know that
1074          * we are first on the timeline. If it is still present,
1075          * we acquire the lock on that fence and serialise with the interrupt
1076          * handler, in the process removing it from any future interrupt
1077          * callback. A will then wait on C before executing (if present).
1078          *
1079          * As B is second, it sees A as the previous fence and so waits for
1080          * it to complete its transition and takes over the occupancy for
1081          * itself -- remembering that it needs to wait on A before executing.
1082          *
1083          * Note the strong ordering of the timeline also provides consistent
1084          * nesting rules for the fence->lock; the inner lock is always the
1085          * older lock.
1086          */
1087         spin_lock_irqsave(fence->lock, flags);
1088         prev = xchg(__active_fence_slot(active), fence);
1089         if (prev) {
1090                 GEM_BUG_ON(prev == fence);
1091                 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1092                 __list_del_entry(&active->cb.node);
1093                 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1094         }
1095         list_add_tail(&active->cb.node, &fence->cb_list);
1096         spin_unlock_irqrestore(fence->lock, flags);
1097
1098         return prev;
1099 }
1100
1101 int i915_active_fence_set(struct i915_active_fence *active,
1102                           struct i915_request *rq)
1103 {
1104         struct dma_fence *fence;
1105         int err = 0;
1106
1107         /* Must maintain timeline ordering wrt previous active requests */
1108         rcu_read_lock();
1109         fence = __i915_active_fence_set(active, &rq->fence);
1110         if (fence) /* but the previous fence may not belong to that timeline! */
1111                 fence = dma_fence_get_rcu(fence);
1112         rcu_read_unlock();
1113         if (fence) {
1114                 err = i915_request_await_dma_fence(rq, fence);
1115                 dma_fence_put(fence);
1116         }
1117
1118         return err;
1119 }
1120
1121 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1122 {
1123         active_fence_cb(fence, cb);
1124 }
1125
1126 struct auto_active {
1127         struct i915_active base;
1128         struct kref ref;
1129 };
1130
1131 struct i915_active *i915_active_get(struct i915_active *ref)
1132 {
1133         struct auto_active *aa = container_of(ref, typeof(*aa), base);
1134
1135         kref_get(&aa->ref);
1136         return &aa->base;
1137 }
1138
1139 static void auto_release(struct kref *ref)
1140 {
1141         struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1142
1143         i915_active_fini(&aa->base);
1144         kfree(aa);
1145 }
1146
1147 void i915_active_put(struct i915_active *ref)
1148 {
1149         struct auto_active *aa = container_of(ref, typeof(*aa), base);
1150
1151         kref_put(&aa->ref, auto_release);
1152 }
1153
1154 static int auto_active(struct i915_active *ref)
1155 {
1156         i915_active_get(ref);
1157         return 0;
1158 }
1159
1160 static void auto_retire(struct i915_active *ref)
1161 {
1162         i915_active_put(ref);
1163 }
1164
1165 struct i915_active *i915_active_create(void)
1166 {
1167         struct auto_active *aa;
1168
1169         aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1170         if (!aa)
1171                 return NULL;
1172
1173         kref_init(&aa->ref);
1174         i915_active_init(&aa->base, auto_active, auto_retire);
1175
1176         return &aa->base;
1177 }
1178
1179 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1180 #include "selftests/i915_active.c"
1181 #endif
1182
1183 static void i915_global_active_shrink(void)
1184 {
1185         kmem_cache_shrink(global.slab_cache);
1186 }
1187
1188 static void i915_global_active_exit(void)
1189 {
1190         kmem_cache_destroy(global.slab_cache);
1191 }
1192
1193 static struct i915_global_active global = { {
1194         .shrink = i915_global_active_shrink,
1195         .exit = i915_global_active_exit,
1196 } };
1197
1198 int __init i915_global_active_init(void)
1199 {
1200         global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1201         if (!global.slab_cache)
1202                 return -ENOMEM;
1203
1204         i915_global_register(&global.base);
1205         return 0;
1206 }