Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_active.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include <linux/debugobjects.h>
8
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
13
14 #include "i915_drv.h"
15 #include "i915_active.h"
16 #include "i915_globals.h"
17
18 /*
19  * Active refs memory management
20  *
21  * To be more economical with memory, we reap all the i915_active trees as
22  * they idle (when we know the active requests are inactive) and allocate the
23  * nodes from a local slab cache to hopefully reduce the fragmentation.
24  */
25 static struct i915_global_active {
26         struct i915_global base;
27         struct kmem_cache *slab_cache;
28 } global;
29
30 struct active_node {
31         struct rb_node node;
32         struct i915_active_fence base;
33         struct i915_active *ref;
34         u64 timeline;
35 };
36
37 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
38
39 static inline struct active_node *
40 node_from_active(struct i915_active_fence *active)
41 {
42         return container_of(active, struct active_node, base);
43 }
44
45 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
46
47 static inline bool is_barrier(const struct i915_active_fence *active)
48 {
49         return IS_ERR(rcu_access_pointer(active->fence));
50 }
51
52 static inline struct llist_node *barrier_to_ll(struct active_node *node)
53 {
54         GEM_BUG_ON(!is_barrier(&node->base));
55         return (struct llist_node *)&node->base.cb.node;
56 }
57
58 static inline struct intel_engine_cs *
59 __barrier_to_engine(struct active_node *node)
60 {
61         return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62 }
63
64 static inline struct intel_engine_cs *
65 barrier_to_engine(struct active_node *node)
66 {
67         GEM_BUG_ON(!is_barrier(&node->base));
68         return __barrier_to_engine(node);
69 }
70
71 static inline struct active_node *barrier_from_ll(struct llist_node *x)
72 {
73         return container_of((struct list_head *)x,
74                             struct active_node, base.cb.node);
75 }
76
77 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
78
79 static void *active_debug_hint(void *addr)
80 {
81         struct i915_active *ref = addr;
82
83         return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84 }
85
86 static const struct debug_obj_descr active_debug_desc = {
87         .name = "i915_active",
88         .debug_hint = active_debug_hint,
89 };
90
91 static void debug_active_init(struct i915_active *ref)
92 {
93         debug_object_init(ref, &active_debug_desc);
94 }
95
96 static void debug_active_activate(struct i915_active *ref)
97 {
98         lockdep_assert_held(&ref->tree_lock);
99         if (!atomic_read(&ref->count)) /* before the first inc */
100                 debug_object_activate(ref, &active_debug_desc);
101 }
102
103 static void debug_active_deactivate(struct i915_active *ref)
104 {
105         lockdep_assert_held(&ref->tree_lock);
106         if (!atomic_read(&ref->count)) /* after the last dec */
107                 debug_object_deactivate(ref, &active_debug_desc);
108 }
109
110 static void debug_active_fini(struct i915_active *ref)
111 {
112         debug_object_free(ref, &active_debug_desc);
113 }
114
115 static void debug_active_assert(struct i915_active *ref)
116 {
117         debug_object_assert_init(ref, &active_debug_desc);
118 }
119
120 #else
121
122 static inline void debug_active_init(struct i915_active *ref) { }
123 static inline void debug_active_activate(struct i915_active *ref) { }
124 static inline void debug_active_deactivate(struct i915_active *ref) { }
125 static inline void debug_active_fini(struct i915_active *ref) { }
126 static inline void debug_active_assert(struct i915_active *ref) { }
127
128 #endif
129
130 static void
131 __active_retire(struct i915_active *ref)
132 {
133         struct rb_root root = RB_ROOT;
134         struct active_node *it, *n;
135         unsigned long flags;
136
137         GEM_BUG_ON(i915_active_is_idle(ref));
138
139         /* return the unused nodes to our slabcache -- flushing the allocator */
140         if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141                 return;
142
143         GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144         debug_active_deactivate(ref);
145
146         /* Even if we have not used the cache, we may still have a barrier */
147         if (!ref->cache)
148                 ref->cache = fetch_node(ref->tree.rb_node);
149
150         /* Keep the MRU cached node for reuse */
151         if (ref->cache) {
152                 /* Discard all other nodes in the tree */
153                 rb_erase(&ref->cache->node, &ref->tree);
154                 root = ref->tree;
155
156                 /* Rebuild the tree with only the cached node */
157                 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158                 rb_insert_color(&ref->cache->node, &ref->tree);
159                 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160
161                 /* Make the cached node available for reuse with any timeline */
162                 if (IS_ENABLED(CONFIG_64BIT))
163                         ref->cache->timeline = 0; /* needs cmpxchg(u64) */
164         }
165
166         spin_unlock_irqrestore(&ref->tree_lock, flags);
167
168         /* After the final retire, the entire struct may be freed */
169         if (ref->retire)
170                 ref->retire(ref);
171
172         /* ... except if you wait on it, you must manage your own references! */
173         wake_up_var(ref);
174
175         /* Finally free the discarded timeline tree  */
176         rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
177                 GEM_BUG_ON(i915_active_fence_isset(&it->base));
178                 kmem_cache_free(global.slab_cache, it);
179         }
180 }
181
182 static void
183 active_work(struct work_struct *wrk)
184 {
185         struct i915_active *ref = container_of(wrk, typeof(*ref), work);
186
187         GEM_BUG_ON(!atomic_read(&ref->count));
188         if (atomic_add_unless(&ref->count, -1, 1))
189                 return;
190
191         __active_retire(ref);
192 }
193
194 static void
195 active_retire(struct i915_active *ref)
196 {
197         GEM_BUG_ON(!atomic_read(&ref->count));
198         if (atomic_add_unless(&ref->count, -1, 1))
199                 return;
200
201         if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
202                 queue_work(system_unbound_wq, &ref->work);
203                 return;
204         }
205
206         __active_retire(ref);
207 }
208
209 static inline struct dma_fence **
210 __active_fence_slot(struct i915_active_fence *active)
211 {
212         return (struct dma_fence ** __force)&active->fence;
213 }
214
215 static inline bool
216 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
217 {
218         struct i915_active_fence *active =
219                 container_of(cb, typeof(*active), cb);
220
221         return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
222 }
223
224 static void
225 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
226 {
227         if (active_fence_cb(fence, cb))
228                 active_retire(container_of(cb, struct active_node, base.cb)->ref);
229 }
230
231 static void
232 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
233 {
234         if (active_fence_cb(fence, cb))
235                 active_retire(container_of(cb, struct i915_active, excl.cb));
236 }
237
238 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
239 {
240         struct active_node *it;
241
242         GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
243
244         /*
245          * We track the most recently used timeline to skip a rbtree search
246          * for the common case, under typical loads we never need the rbtree
247          * at all. We can reuse the last slot if it is empty, that is
248          * after the previous activity has been retired, or if it matches the
249          * current timeline.
250          */
251         it = READ_ONCE(ref->cache);
252         if (it) {
253                 u64 cached = READ_ONCE(it->timeline);
254
255                 /* Once claimed, this slot will only belong to this idx */
256                 if (cached == idx)
257                         return it;
258
259 #ifdef CONFIG_64BIT /* for cmpxchg(u64) */
260                 /*
261                  * An unclaimed cache [.timeline=0] can only be claimed once.
262                  *
263                  * If the value is already non-zero, some other thread has
264                  * claimed the cache and we know that is does not match our
265                  * idx. If, and only if, the timeline is currently zero is it
266                  * worth competing to claim it atomically for ourselves (for
267                  * only the winner of that race will cmpxchg return the old
268                  * value of 0).
269                  */
270                 if (!cached && !cmpxchg(&it->timeline, 0, idx))
271                         return it;
272 #endif
273         }
274
275         BUILD_BUG_ON(offsetof(typeof(*it), node));
276
277         /* While active, the tree can only be built; not destroyed */
278         GEM_BUG_ON(i915_active_is_idle(ref));
279
280         it = fetch_node(ref->tree.rb_node);
281         while (it) {
282                 if (it->timeline < idx) {
283                         it = fetch_node(it->node.rb_right);
284                 } else if (it->timeline > idx) {
285                         it = fetch_node(it->node.rb_left);
286                 } else {
287                         WRITE_ONCE(ref->cache, it);
288                         break;
289                 }
290         }
291
292         /* NB: If the tree rotated beneath us, we may miss our target. */
293         return it;
294 }
295
296 static struct i915_active_fence *
297 active_instance(struct i915_active *ref, u64 idx)
298 {
299         struct active_node *node, *prealloc;
300         struct rb_node **p, *parent;
301
302         node = __active_lookup(ref, idx);
303         if (likely(node))
304                 return &node->base;
305
306         /* Preallocate a replacement, just in case */
307         prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
308         if (!prealloc)
309                 return NULL;
310
311         spin_lock_irq(&ref->tree_lock);
312         GEM_BUG_ON(i915_active_is_idle(ref));
313
314         parent = NULL;
315         p = &ref->tree.rb_node;
316         while (*p) {
317                 parent = *p;
318
319                 node = rb_entry(parent, struct active_node, node);
320                 if (node->timeline == idx) {
321                         kmem_cache_free(global.slab_cache, prealloc);
322                         goto out;
323                 }
324
325                 if (node->timeline < idx)
326                         p = &parent->rb_right;
327                 else
328                         p = &parent->rb_left;
329         }
330
331         node = prealloc;
332         __i915_active_fence_init(&node->base, NULL, node_retire);
333         node->ref = ref;
334         node->timeline = idx;
335
336         rb_link_node(&node->node, parent, p);
337         rb_insert_color(&node->node, &ref->tree);
338
339 out:
340         WRITE_ONCE(ref->cache, node);
341         spin_unlock_irq(&ref->tree_lock);
342
343         return &node->base;
344 }
345
346 void __i915_active_init(struct i915_active *ref,
347                         int (*active)(struct i915_active *ref),
348                         void (*retire)(struct i915_active *ref),
349                         struct lock_class_key *mkey,
350                         struct lock_class_key *wkey)
351 {
352         unsigned long bits;
353
354         debug_active_init(ref);
355
356         ref->flags = 0;
357         ref->active = active;
358         ref->retire = ptr_unpack_bits(retire, &bits, 2);
359         if (bits & I915_ACTIVE_MAY_SLEEP)
360                 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
361
362         spin_lock_init(&ref->tree_lock);
363         ref->tree = RB_ROOT;
364         ref->cache = NULL;
365
366         init_llist_head(&ref->preallocated_barriers);
367         atomic_set(&ref->count, 0);
368         __mutex_init(&ref->mutex, "i915_active", mkey);
369         __i915_active_fence_init(&ref->excl, NULL, excl_retire);
370         INIT_WORK(&ref->work, active_work);
371 #if IS_ENABLED(CONFIG_LOCKDEP)
372         lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
373 #endif
374 }
375
376 static bool ____active_del_barrier(struct i915_active *ref,
377                                    struct active_node *node,
378                                    struct intel_engine_cs *engine)
379
380 {
381         struct llist_node *head = NULL, *tail = NULL;
382         struct llist_node *pos, *next;
383
384         GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
385
386         /*
387          * Rebuild the llist excluding our node. We may perform this
388          * outside of the kernel_context timeline mutex and so someone
389          * else may be manipulating the engine->barrier_tasks, in
390          * which case either we or they will be upset :)
391          *
392          * A second __active_del_barrier() will report failure to claim
393          * the active_node and the caller will just shrug and know not to
394          * claim ownership of its node.
395          *
396          * A concurrent i915_request_add_active_barriers() will miss adding
397          * any of the tasks, but we will try again on the next -- and since
398          * we are actively using the barrier, we know that there will be
399          * at least another opportunity when we idle.
400          */
401         llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
402                 if (node == barrier_from_ll(pos)) {
403                         node = NULL;
404                         continue;
405                 }
406
407                 pos->next = head;
408                 head = pos;
409                 if (!tail)
410                         tail = pos;
411         }
412         if (head)
413                 llist_add_batch(head, tail, &engine->barrier_tasks);
414
415         return !node;
416 }
417
418 static bool
419 __active_del_barrier(struct i915_active *ref, struct active_node *node)
420 {
421         return ____active_del_barrier(ref, node, barrier_to_engine(node));
422 }
423
424 static bool
425 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
426 {
427         if (!is_barrier(active)) /* proto-node used by our idle barrier? */
428                 return false;
429
430         /*
431          * This request is on the kernel_context timeline, and so
432          * we can use it to substitute for the pending idle-barrer
433          * request that we want to emit on the kernel_context.
434          */
435         __active_del_barrier(ref, node_from_active(active));
436         return true;
437 }
438
439 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
440 {
441         struct i915_active_fence *active;
442         int err;
443
444         /* Prevent reaping in case we malloc/wait while building the tree */
445         err = i915_active_acquire(ref);
446         if (err)
447                 return err;
448
449         active = active_instance(ref, idx);
450         if (!active) {
451                 err = -ENOMEM;
452                 goto out;
453         }
454
455         if (replace_barrier(ref, active)) {
456                 RCU_INIT_POINTER(active->fence, NULL);
457                 atomic_dec(&ref->count);
458         }
459         if (!__i915_active_fence_set(active, fence))
460                 __i915_active_acquire(ref);
461
462 out:
463         i915_active_release(ref);
464         return err;
465 }
466
467 static struct dma_fence *
468 __i915_active_set_fence(struct i915_active *ref,
469                         struct i915_active_fence *active,
470                         struct dma_fence *fence)
471 {
472         struct dma_fence *prev;
473
474         if (replace_barrier(ref, active)) {
475                 RCU_INIT_POINTER(active->fence, fence);
476                 return NULL;
477         }
478
479         rcu_read_lock();
480         prev = __i915_active_fence_set(active, fence);
481         if (prev)
482                 prev = dma_fence_get_rcu(prev);
483         else
484                 __i915_active_acquire(ref);
485         rcu_read_unlock();
486
487         return prev;
488 }
489
490 static struct i915_active_fence *
491 __active_fence(struct i915_active *ref, u64 idx)
492 {
493         struct active_node *it;
494
495         it = __active_lookup(ref, idx);
496         if (unlikely(!it)) { /* Contention with parallel tree builders! */
497                 spin_lock_irq(&ref->tree_lock);
498                 it = __active_lookup(ref, idx);
499                 spin_unlock_irq(&ref->tree_lock);
500         }
501         GEM_BUG_ON(!it); /* slot must be preallocated */
502
503         return &it->base;
504 }
505
506 struct dma_fence *
507 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
508 {
509         /* Only valid while active, see i915_active_acquire_for_context() */
510         return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
511 }
512
513 struct dma_fence *
514 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
515 {
516         /* We expect the caller to manage the exclusive timeline ordering */
517         return __i915_active_set_fence(ref, &ref->excl, f);
518 }
519
520 bool i915_active_acquire_if_busy(struct i915_active *ref)
521 {
522         debug_active_assert(ref);
523         return atomic_add_unless(&ref->count, 1, 0);
524 }
525
526 static void __i915_active_activate(struct i915_active *ref)
527 {
528         spin_lock_irq(&ref->tree_lock); /* __active_retire() */
529         if (!atomic_fetch_inc(&ref->count))
530                 debug_active_activate(ref);
531         spin_unlock_irq(&ref->tree_lock);
532 }
533
534 int i915_active_acquire(struct i915_active *ref)
535 {
536         int err;
537
538         if (i915_active_acquire_if_busy(ref))
539                 return 0;
540
541         if (!ref->active) {
542                 __i915_active_activate(ref);
543                 return 0;
544         }
545
546         err = mutex_lock_interruptible(&ref->mutex);
547         if (err)
548                 return err;
549
550         if (likely(!i915_active_acquire_if_busy(ref))) {
551                 err = ref->active(ref);
552                 if (!err)
553                         __i915_active_activate(ref);
554         }
555
556         mutex_unlock(&ref->mutex);
557
558         return err;
559 }
560
561 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
562 {
563         struct i915_active_fence *active;
564         int err;
565
566         err = i915_active_acquire(ref);
567         if (err)
568                 return err;
569
570         active = active_instance(ref, idx);
571         if (!active) {
572                 i915_active_release(ref);
573                 return -ENOMEM;
574         }
575
576         return 0; /* return with active ref */
577 }
578
579 void i915_active_release(struct i915_active *ref)
580 {
581         debug_active_assert(ref);
582         active_retire(ref);
583 }
584
585 static void enable_signaling(struct i915_active_fence *active)
586 {
587         struct dma_fence *fence;
588
589         if (unlikely(is_barrier(active)))
590                 return;
591
592         fence = i915_active_fence_get(active);
593         if (!fence)
594                 return;
595
596         dma_fence_enable_sw_signaling(fence);
597         dma_fence_put(fence);
598 }
599
600 static int flush_barrier(struct active_node *it)
601 {
602         struct intel_engine_cs *engine;
603
604         if (likely(!is_barrier(&it->base)))
605                 return 0;
606
607         engine = __barrier_to_engine(it);
608         smp_rmb(); /* serialise with add_active_barriers */
609         if (!is_barrier(&it->base))
610                 return 0;
611
612         return intel_engine_flush_barriers(engine);
613 }
614
615 static int flush_lazy_signals(struct i915_active *ref)
616 {
617         struct active_node *it, *n;
618         int err = 0;
619
620         enable_signaling(&ref->excl);
621         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
622                 err = flush_barrier(it); /* unconnected idle barrier? */
623                 if (err)
624                         break;
625
626                 enable_signaling(&it->base);
627         }
628
629         return err;
630 }
631
632 int __i915_active_wait(struct i915_active *ref, int state)
633 {
634         might_sleep();
635
636         /* Any fence added after the wait begins will not be auto-signaled */
637         if (i915_active_acquire_if_busy(ref)) {
638                 int err;
639
640                 err = flush_lazy_signals(ref);
641                 i915_active_release(ref);
642                 if (err)
643                         return err;
644
645                 if (___wait_var_event(ref, i915_active_is_idle(ref),
646                                       state, 0, 0, schedule()))
647                         return -EINTR;
648         }
649
650         /*
651          * After the wait is complete, the caller may free the active.
652          * We have to flush any concurrent retirement before returning.
653          */
654         flush_work(&ref->work);
655         return 0;
656 }
657
658 static int __await_active(struct i915_active_fence *active,
659                           int (*fn)(void *arg, struct dma_fence *fence),
660                           void *arg)
661 {
662         struct dma_fence *fence;
663
664         if (is_barrier(active)) /* XXX flush the barrier? */
665                 return 0;
666
667         fence = i915_active_fence_get(active);
668         if (fence) {
669                 int err;
670
671                 err = fn(arg, fence);
672                 dma_fence_put(fence);
673                 if (err < 0)
674                         return err;
675         }
676
677         return 0;
678 }
679
680 struct wait_barrier {
681         struct wait_queue_entry base;
682         struct i915_active *ref;
683 };
684
685 static int
686 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
687 {
688         struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
689
690         if (i915_active_is_idle(wb->ref)) {
691                 list_del(&wq->entry);
692                 i915_sw_fence_complete(wq->private);
693                 kfree(wq);
694         }
695
696         return 0;
697 }
698
699 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
700 {
701         struct wait_barrier *wb;
702
703         wb = kmalloc(sizeof(*wb), GFP_KERNEL);
704         if (unlikely(!wb))
705                 return -ENOMEM;
706
707         GEM_BUG_ON(i915_active_is_idle(ref));
708         if (!i915_sw_fence_await(fence)) {
709                 kfree(wb);
710                 return -EINVAL;
711         }
712
713         wb->base.flags = 0;
714         wb->base.func = barrier_wake;
715         wb->base.private = fence;
716         wb->ref = ref;
717
718         add_wait_queue(__var_waitqueue(ref), &wb->base);
719         return 0;
720 }
721
722 static int await_active(struct i915_active *ref,
723                         unsigned int flags,
724                         int (*fn)(void *arg, struct dma_fence *fence),
725                         void *arg, struct i915_sw_fence *barrier)
726 {
727         int err = 0;
728
729         if (!i915_active_acquire_if_busy(ref))
730                 return 0;
731
732         if (flags & I915_ACTIVE_AWAIT_EXCL &&
733             rcu_access_pointer(ref->excl.fence)) {
734                 err = __await_active(&ref->excl, fn, arg);
735                 if (err)
736                         goto out;
737         }
738
739         if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
740                 struct active_node *it, *n;
741
742                 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
743                         err = __await_active(&it->base, fn, arg);
744                         if (err)
745                                 goto out;
746                 }
747         }
748
749         if (flags & I915_ACTIVE_AWAIT_BARRIER) {
750                 err = flush_lazy_signals(ref);
751                 if (err)
752                         goto out;
753
754                 err = __await_barrier(ref, barrier);
755                 if (err)
756                         goto out;
757         }
758
759 out:
760         i915_active_release(ref);
761         return err;
762 }
763
764 static int rq_await_fence(void *arg, struct dma_fence *fence)
765 {
766         return i915_request_await_dma_fence(arg, fence);
767 }
768
769 int i915_request_await_active(struct i915_request *rq,
770                               struct i915_active *ref,
771                               unsigned int flags)
772 {
773         return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
774 }
775
776 static int sw_await_fence(void *arg, struct dma_fence *fence)
777 {
778         return i915_sw_fence_await_dma_fence(arg, fence, 0,
779                                              GFP_NOWAIT | __GFP_NOWARN);
780 }
781
782 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
783                                struct i915_active *ref,
784                                unsigned int flags)
785 {
786         return await_active(ref, flags, sw_await_fence, fence, fence);
787 }
788
789 void i915_active_fini(struct i915_active *ref)
790 {
791         debug_active_fini(ref);
792         GEM_BUG_ON(atomic_read(&ref->count));
793         GEM_BUG_ON(work_pending(&ref->work));
794         mutex_destroy(&ref->mutex);
795
796         if (ref->cache)
797                 kmem_cache_free(global.slab_cache, ref->cache);
798 }
799
800 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
801 {
802         return node->timeline == idx && !i915_active_fence_isset(&node->base);
803 }
804
805 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
806 {
807         struct rb_node *prev, *p;
808
809         if (RB_EMPTY_ROOT(&ref->tree))
810                 return NULL;
811
812         GEM_BUG_ON(i915_active_is_idle(ref));
813
814         /*
815          * Try to reuse any existing barrier nodes already allocated for this
816          * i915_active, due to overlapping active phases there is likely a
817          * node kept alive (as we reuse before parking). We prefer to reuse
818          * completely idle barriers (less hassle in manipulating the llists),
819          * but otherwise any will do.
820          */
821         if (ref->cache && is_idle_barrier(ref->cache, idx)) {
822                 p = &ref->cache->node;
823                 goto match;
824         }
825
826         prev = NULL;
827         p = ref->tree.rb_node;
828         while (p) {
829                 struct active_node *node =
830                         rb_entry(p, struct active_node, node);
831
832                 if (is_idle_barrier(node, idx))
833                         goto match;
834
835                 prev = p;
836                 if (node->timeline < idx)
837                         p = READ_ONCE(p->rb_right);
838                 else
839                         p = READ_ONCE(p->rb_left);
840         }
841
842         /*
843          * No quick match, but we did find the leftmost rb_node for the
844          * kernel_context. Walk the rb_tree in-order to see if there were
845          * any idle-barriers on this timeline that we missed, or just use
846          * the first pending barrier.
847          */
848         for (p = prev; p; p = rb_next(p)) {
849                 struct active_node *node =
850                         rb_entry(p, struct active_node, node);
851                 struct intel_engine_cs *engine;
852
853                 if (node->timeline > idx)
854                         break;
855
856                 if (node->timeline < idx)
857                         continue;
858
859                 if (is_idle_barrier(node, idx))
860                         goto match;
861
862                 /*
863                  * The list of pending barriers is protected by the
864                  * kernel_context timeline, which notably we do not hold
865                  * here. i915_request_add_active_barriers() may consume
866                  * the barrier before we claim it, so we have to check
867                  * for success.
868                  */
869                 engine = __barrier_to_engine(node);
870                 smp_rmb(); /* serialise with add_active_barriers */
871                 if (is_barrier(&node->base) &&
872                     ____active_del_barrier(ref, node, engine))
873                         goto match;
874         }
875
876         return NULL;
877
878 match:
879         spin_lock_irq(&ref->tree_lock);
880         rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
881         if (p == &ref->cache->node)
882                 WRITE_ONCE(ref->cache, NULL);
883         spin_unlock_irq(&ref->tree_lock);
884
885         return rb_entry(p, struct active_node, node);
886 }
887
888 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
889                                             struct intel_engine_cs *engine)
890 {
891         intel_engine_mask_t tmp, mask = engine->mask;
892         struct llist_node *first = NULL, *last = NULL;
893         struct intel_gt *gt = engine->gt;
894
895         GEM_BUG_ON(i915_active_is_idle(ref));
896
897         /* Wait until the previous preallocation is completed */
898         while (!llist_empty(&ref->preallocated_barriers))
899                 cond_resched();
900
901         /*
902          * Preallocate a node for each physical engine supporting the target
903          * engine (remember virtual engines have more than one sibling).
904          * We can then use the preallocated nodes in
905          * i915_active_acquire_barrier()
906          */
907         GEM_BUG_ON(!mask);
908         for_each_engine_masked(engine, gt, mask, tmp) {
909                 u64 idx = engine->kernel_context->timeline->fence_context;
910                 struct llist_node *prev = first;
911                 struct active_node *node;
912
913                 rcu_read_lock();
914                 node = reuse_idle_barrier(ref, idx);
915                 rcu_read_unlock();
916                 if (!node) {
917                         node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
918                         if (!node)
919                                 goto unwind;
920
921                         RCU_INIT_POINTER(node->base.fence, NULL);
922                         node->base.cb.func = node_retire;
923                         node->timeline = idx;
924                         node->ref = ref;
925                 }
926
927                 if (!i915_active_fence_isset(&node->base)) {
928                         /*
929                          * Mark this as being *our* unconnected proto-node.
930                          *
931                          * Since this node is not in any list, and we have
932                          * decoupled it from the rbtree, we can reuse the
933                          * request to indicate this is an idle-barrier node
934                          * and then we can use the rb_node and list pointers
935                          * for our tracking of the pending barrier.
936                          */
937                         RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
938                         node->base.cb.node.prev = (void *)engine;
939                         __i915_active_acquire(ref);
940                 }
941                 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
942
943                 GEM_BUG_ON(barrier_to_engine(node) != engine);
944                 first = barrier_to_ll(node);
945                 first->next = prev;
946                 if (!last)
947                         last = first;
948                 intel_engine_pm_get(engine);
949         }
950
951         GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
952         llist_add_batch(first, last, &ref->preallocated_barriers);
953
954         return 0;
955
956 unwind:
957         while (first) {
958                 struct active_node *node = barrier_from_ll(first);
959
960                 first = first->next;
961
962                 atomic_dec(&ref->count);
963                 intel_engine_pm_put(barrier_to_engine(node));
964
965                 kmem_cache_free(global.slab_cache, node);
966         }
967         return -ENOMEM;
968 }
969
970 void i915_active_acquire_barrier(struct i915_active *ref)
971 {
972         struct llist_node *pos, *next;
973         unsigned long flags;
974
975         GEM_BUG_ON(i915_active_is_idle(ref));
976
977         /*
978          * Transfer the list of preallocated barriers into the
979          * i915_active rbtree, but only as proto-nodes. They will be
980          * populated by i915_request_add_active_barriers() to point to the
981          * request that will eventually release them.
982          */
983         llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
984                 struct active_node *node = barrier_from_ll(pos);
985                 struct intel_engine_cs *engine = barrier_to_engine(node);
986                 struct rb_node **p, *parent;
987
988                 spin_lock_irqsave_nested(&ref->tree_lock, flags,
989                                          SINGLE_DEPTH_NESTING);
990                 parent = NULL;
991                 p = &ref->tree.rb_node;
992                 while (*p) {
993                         struct active_node *it;
994
995                         parent = *p;
996
997                         it = rb_entry(parent, struct active_node, node);
998                         if (it->timeline < node->timeline)
999                                 p = &parent->rb_right;
1000                         else
1001                                 p = &parent->rb_left;
1002                 }
1003                 rb_link_node(&node->node, parent, p);
1004                 rb_insert_color(&node->node, &ref->tree);
1005                 spin_unlock_irqrestore(&ref->tree_lock, flags);
1006
1007                 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1008                 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1009                 intel_engine_pm_put_delay(engine, 1);
1010         }
1011 }
1012
1013 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1014 {
1015         return __active_fence_slot(&barrier_from_ll(node)->base);
1016 }
1017
1018 void i915_request_add_active_barriers(struct i915_request *rq)
1019 {
1020         struct intel_engine_cs *engine = rq->engine;
1021         struct llist_node *node, *next;
1022         unsigned long flags;
1023
1024         GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1025         GEM_BUG_ON(intel_engine_is_virtual(engine));
1026         GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1027
1028         node = llist_del_all(&engine->barrier_tasks);
1029         if (!node)
1030                 return;
1031         /*
1032          * Attach the list of proto-fences to the in-flight request such
1033          * that the parent i915_active will be released when this request
1034          * is retired.
1035          */
1036         spin_lock_irqsave(&rq->lock, flags);
1037         llist_for_each_safe(node, next, node) {
1038                 /* serialise with reuse_idle_barrier */
1039                 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1040                 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1041         }
1042         spin_unlock_irqrestore(&rq->lock, flags);
1043 }
1044
1045 /*
1046  * __i915_active_fence_set: Update the last active fence along its timeline
1047  * @active: the active tracker
1048  * @fence: the new fence (under construction)
1049  *
1050  * Records the new @fence as the last active fence along its timeline in
1051  * this active tracker, moving the tracking callbacks from the previous
1052  * fence onto this one. Returns the previous fence (if not already completed),
1053  * which the caller must ensure is executed before the new fence. To ensure
1054  * that the order of fences within the timeline of the i915_active_fence is
1055  * understood, it should be locked by the caller.
1056  */
1057 struct dma_fence *
1058 __i915_active_fence_set(struct i915_active_fence *active,
1059                         struct dma_fence *fence)
1060 {
1061         struct dma_fence *prev;
1062         unsigned long flags;
1063
1064         if (fence == rcu_access_pointer(active->fence))
1065                 return fence;
1066
1067         GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1068
1069         /*
1070          * Consider that we have two threads arriving (A and B), with
1071          * C already resident as the active->fence.
1072          *
1073          * A does the xchg first, and so it sees C or NULL depending
1074          * on the timing of the interrupt handler. If it is NULL, the
1075          * previous fence must have been signaled and we know that
1076          * we are first on the timeline. If it is still present,
1077          * we acquire the lock on that fence and serialise with the interrupt
1078          * handler, in the process removing it from any future interrupt
1079          * callback. A will then wait on C before executing (if present).
1080          *
1081          * As B is second, it sees A as the previous fence and so waits for
1082          * it to complete its transition and takes over the occupancy for
1083          * itself -- remembering that it needs to wait on A before executing.
1084          *
1085          * Note the strong ordering of the timeline also provides consistent
1086          * nesting rules for the fence->lock; the inner lock is always the
1087          * older lock.
1088          */
1089         spin_lock_irqsave(fence->lock, flags);
1090         prev = xchg(__active_fence_slot(active), fence);
1091         if (prev) {
1092                 GEM_BUG_ON(prev == fence);
1093                 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1094                 __list_del_entry(&active->cb.node);
1095                 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1096         }
1097         list_add_tail(&active->cb.node, &fence->cb_list);
1098         spin_unlock_irqrestore(fence->lock, flags);
1099
1100         return prev;
1101 }
1102
1103 int i915_active_fence_set(struct i915_active_fence *active,
1104                           struct i915_request *rq)
1105 {
1106         struct dma_fence *fence;
1107         int err = 0;
1108
1109         /* Must maintain timeline ordering wrt previous active requests */
1110         rcu_read_lock();
1111         fence = __i915_active_fence_set(active, &rq->fence);
1112         if (fence) /* but the previous fence may not belong to that timeline! */
1113                 fence = dma_fence_get_rcu(fence);
1114         rcu_read_unlock();
1115         if (fence) {
1116                 err = i915_request_await_dma_fence(rq, fence);
1117                 dma_fence_put(fence);
1118         }
1119
1120         return err;
1121 }
1122
1123 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1124 {
1125         active_fence_cb(fence, cb);
1126 }
1127
1128 struct auto_active {
1129         struct i915_active base;
1130         struct kref ref;
1131 };
1132
1133 struct i915_active *i915_active_get(struct i915_active *ref)
1134 {
1135         struct auto_active *aa = container_of(ref, typeof(*aa), base);
1136
1137         kref_get(&aa->ref);
1138         return &aa->base;
1139 }
1140
1141 static void auto_release(struct kref *ref)
1142 {
1143         struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1144
1145         i915_active_fini(&aa->base);
1146         kfree(aa);
1147 }
1148
1149 void i915_active_put(struct i915_active *ref)
1150 {
1151         struct auto_active *aa = container_of(ref, typeof(*aa), base);
1152
1153         kref_put(&aa->ref, auto_release);
1154 }
1155
1156 static int auto_active(struct i915_active *ref)
1157 {
1158         i915_active_get(ref);
1159         return 0;
1160 }
1161
1162 static void auto_retire(struct i915_active *ref)
1163 {
1164         i915_active_put(ref);
1165 }
1166
1167 struct i915_active *i915_active_create(void)
1168 {
1169         struct auto_active *aa;
1170
1171         aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1172         if (!aa)
1173                 return NULL;
1174
1175         kref_init(&aa->ref);
1176         i915_active_init(&aa->base, auto_active, auto_retire);
1177
1178         return &aa->base;
1179 }
1180
1181 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1182 #include "selftests/i915_active.c"
1183 #endif
1184
1185 static void i915_global_active_shrink(void)
1186 {
1187         kmem_cache_shrink(global.slab_cache);
1188 }
1189
1190 static void i915_global_active_exit(void)
1191 {
1192         kmem_cache_destroy(global.slab_cache);
1193 }
1194
1195 static struct i915_global_active global = { {
1196         .shrink = i915_global_active_shrink,
1197         .exit = i915_global_active_exit,
1198 } };
1199
1200 int __init i915_global_active_init(void)
1201 {
1202         global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1203         if (!global.slab_cache)
1204                 return -ENOMEM;
1205
1206         i915_global_register(&global.base);
1207         return 0;
1208 }