2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
15 #include "i915_active.h"
16 #include "i915_globals.h"
19 * Active refs memory management
21 * To be more economical with memory, we reap all the i915_active trees as
22 * they idle (when we know the active requests are inactive) and allocate the
23 * nodes from a local slab cache to hopefully reduce the fragmentation.
25 static struct i915_global_active {
26 struct i915_global base;
27 struct kmem_cache *slab_cache;
32 struct i915_active_fence base;
33 struct i915_active *ref;
37 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
39 static inline struct active_node *
40 node_from_active(struct i915_active_fence *active)
42 return container_of(active, struct active_node, base);
45 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
47 static inline bool is_barrier(const struct i915_active_fence *active)
49 return IS_ERR(rcu_access_pointer(active->fence));
52 static inline struct llist_node *barrier_to_ll(struct active_node *node)
54 GEM_BUG_ON(!is_barrier(&node->base));
55 return (struct llist_node *)&node->base.cb.node;
58 static inline struct intel_engine_cs *
59 __barrier_to_engine(struct active_node *node)
61 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
64 static inline struct intel_engine_cs *
65 barrier_to_engine(struct active_node *node)
67 GEM_BUG_ON(!is_barrier(&node->base));
68 return __barrier_to_engine(node);
71 static inline struct active_node *barrier_from_ll(struct llist_node *x)
73 return container_of((struct list_head *)x,
74 struct active_node, base.cb.node);
77 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
79 static void *active_debug_hint(void *addr)
81 struct i915_active *ref = addr;
83 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
86 static const struct debug_obj_descr active_debug_desc = {
87 .name = "i915_active",
88 .debug_hint = active_debug_hint,
91 static void debug_active_init(struct i915_active *ref)
93 debug_object_init(ref, &active_debug_desc);
96 static void debug_active_activate(struct i915_active *ref)
98 lockdep_assert_held(&ref->tree_lock);
99 if (!atomic_read(&ref->count)) /* before the first inc */
100 debug_object_activate(ref, &active_debug_desc);
103 static void debug_active_deactivate(struct i915_active *ref)
105 lockdep_assert_held(&ref->tree_lock);
106 if (!atomic_read(&ref->count)) /* after the last dec */
107 debug_object_deactivate(ref, &active_debug_desc);
110 static void debug_active_fini(struct i915_active *ref)
112 debug_object_free(ref, &active_debug_desc);
115 static void debug_active_assert(struct i915_active *ref)
117 debug_object_assert_init(ref, &active_debug_desc);
122 static inline void debug_active_init(struct i915_active *ref) { }
123 static inline void debug_active_activate(struct i915_active *ref) { }
124 static inline void debug_active_deactivate(struct i915_active *ref) { }
125 static inline void debug_active_fini(struct i915_active *ref) { }
126 static inline void debug_active_assert(struct i915_active *ref) { }
131 __active_retire(struct i915_active *ref)
133 struct rb_root root = RB_ROOT;
134 struct active_node *it, *n;
137 GEM_BUG_ON(i915_active_is_idle(ref));
139 /* return the unused nodes to our slabcache -- flushing the allocator */
140 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
143 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144 debug_active_deactivate(ref);
146 /* Even if we have not used the cache, we may still have a barrier */
148 ref->cache = fetch_node(ref->tree.rb_node);
150 /* Keep the MRU cached node for reuse */
152 /* Discard all other nodes in the tree */
153 rb_erase(&ref->cache->node, &ref->tree);
156 /* Rebuild the tree with only the cached node */
157 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158 rb_insert_color(&ref->cache->node, &ref->tree);
159 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
161 /* Make the cached node available for reuse with any timeline */
162 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
165 spin_unlock_irqrestore(&ref->tree_lock, flags);
167 /* After the final retire, the entire struct may be freed */
171 /* ... except if you wait on it, you must manage your own references! */
174 /* Finally free the discarded timeline tree */
175 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
176 GEM_BUG_ON(i915_active_fence_isset(&it->base));
177 kmem_cache_free(global.slab_cache, it);
182 active_work(struct work_struct *wrk)
184 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
186 GEM_BUG_ON(!atomic_read(&ref->count));
187 if (atomic_add_unless(&ref->count, -1, 1))
190 __active_retire(ref);
194 active_retire(struct i915_active *ref)
196 GEM_BUG_ON(!atomic_read(&ref->count));
197 if (atomic_add_unless(&ref->count, -1, 1))
200 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
201 queue_work(system_unbound_wq, &ref->work);
205 __active_retire(ref);
208 static inline struct dma_fence **
209 __active_fence_slot(struct i915_active_fence *active)
211 return (struct dma_fence ** __force)&active->fence;
215 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
217 struct i915_active_fence *active =
218 container_of(cb, typeof(*active), cb);
220 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
224 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
226 if (active_fence_cb(fence, cb))
227 active_retire(container_of(cb, struct active_node, base.cb)->ref);
231 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
233 if (active_fence_cb(fence, cb))
234 active_retire(container_of(cb, struct i915_active, excl.cb));
237 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
239 struct active_node *it;
241 GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
244 * We track the most recently used timeline to skip a rbtree search
245 * for the common case, under typical loads we never need the rbtree
246 * at all. We can reuse the last slot if it is empty, that is
247 * after the previous activity has been retired, or if it matches the
250 it = READ_ONCE(ref->cache);
252 u64 cached = READ_ONCE(it->timeline);
254 /* Once claimed, this slot will only belong to this idx */
259 * An unclaimed cache [.timeline=0] can only be claimed once.
261 * If the value is already non-zero, some other thread has
262 * claimed the cache and we know that is does not match our
263 * idx. If, and only if, the timeline is currently zero is it
264 * worth competing to claim it atomically for ourselves (for
265 * only the winner of that race will cmpxchg return the old
268 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
272 BUILD_BUG_ON(offsetof(typeof(*it), node));
274 /* While active, the tree can only be built; not destroyed */
275 GEM_BUG_ON(i915_active_is_idle(ref));
277 it = fetch_node(ref->tree.rb_node);
279 if (it->timeline < idx) {
280 it = fetch_node(it->node.rb_right);
281 } else if (it->timeline > idx) {
282 it = fetch_node(it->node.rb_left);
284 WRITE_ONCE(ref->cache, it);
289 /* NB: If the tree rotated beneath us, we may miss our target. */
293 static struct i915_active_fence *
294 active_instance(struct i915_active *ref, u64 idx)
296 struct active_node *node;
297 struct rb_node **p, *parent;
299 node = __active_lookup(ref, idx);
303 spin_lock_irq(&ref->tree_lock);
304 GEM_BUG_ON(i915_active_is_idle(ref));
307 p = &ref->tree.rb_node;
311 node = rb_entry(parent, struct active_node, node);
312 if (node->timeline == idx)
315 if (node->timeline < idx)
316 p = &parent->rb_right;
318 p = &parent->rb_left;
322 * XXX: We should preallocate this before i915_active_ref() is ever
323 * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
325 node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
329 __i915_active_fence_init(&node->base, NULL, node_retire);
331 node->timeline = idx;
333 rb_link_node(&node->node, parent, p);
334 rb_insert_color(&node->node, &ref->tree);
337 WRITE_ONCE(ref->cache, node);
338 spin_unlock_irq(&ref->tree_lock);
343 void __i915_active_init(struct i915_active *ref,
344 int (*active)(struct i915_active *ref),
345 void (*retire)(struct i915_active *ref),
347 struct lock_class_key *mkey,
348 struct lock_class_key *wkey)
350 debug_active_init(ref);
353 ref->active = active;
354 ref->retire = retire;
356 spin_lock_init(&ref->tree_lock);
360 init_llist_head(&ref->preallocated_barriers);
361 atomic_set(&ref->count, 0);
362 __mutex_init(&ref->mutex, "i915_active", mkey);
363 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
364 INIT_WORK(&ref->work, active_work);
365 #if IS_ENABLED(CONFIG_LOCKDEP)
366 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
370 static bool ____active_del_barrier(struct i915_active *ref,
371 struct active_node *node,
372 struct intel_engine_cs *engine)
375 struct llist_node *head = NULL, *tail = NULL;
376 struct llist_node *pos, *next;
378 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
381 * Rebuild the llist excluding our node. We may perform this
382 * outside of the kernel_context timeline mutex and so someone
383 * else may be manipulating the engine->barrier_tasks, in
384 * which case either we or they will be upset :)
386 * A second __active_del_barrier() will report failure to claim
387 * the active_node and the caller will just shrug and know not to
388 * claim ownership of its node.
390 * A concurrent i915_request_add_active_barriers() will miss adding
391 * any of the tasks, but we will try again on the next -- and since
392 * we are actively using the barrier, we know that there will be
393 * at least another opportunity when we idle.
395 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
396 if (node == barrier_from_ll(pos)) {
407 llist_add_batch(head, tail, &engine->barrier_tasks);
413 __active_del_barrier(struct i915_active *ref, struct active_node *node)
415 return ____active_del_barrier(ref, node, barrier_to_engine(node));
419 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
421 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
425 * This request is on the kernel_context timeline, and so
426 * we can use it to substitute for the pending idle-barrer
427 * request that we want to emit on the kernel_context.
429 __active_del_barrier(ref, node_from_active(active));
433 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
435 struct i915_active_fence *active;
438 /* Prevent reaping in case we malloc/wait while building the tree */
439 err = i915_active_acquire(ref);
443 active = active_instance(ref, idx);
449 if (replace_barrier(ref, active)) {
450 RCU_INIT_POINTER(active->fence, NULL);
451 atomic_dec(&ref->count);
453 if (!__i915_active_fence_set(active, fence))
454 __i915_active_acquire(ref);
457 i915_active_release(ref);
461 static struct dma_fence *
462 __i915_active_set_fence(struct i915_active *ref,
463 struct i915_active_fence *active,
464 struct dma_fence *fence)
466 struct dma_fence *prev;
468 if (replace_barrier(ref, active)) {
469 RCU_INIT_POINTER(active->fence, fence);
474 prev = __i915_active_fence_set(active, fence);
476 prev = dma_fence_get_rcu(prev);
478 __i915_active_acquire(ref);
484 static struct i915_active_fence *
485 __active_fence(struct i915_active *ref, u64 idx)
487 struct active_node *it;
489 it = __active_lookup(ref, idx);
490 if (unlikely(!it)) { /* Contention with parallel tree builders! */
491 spin_lock_irq(&ref->tree_lock);
492 it = __active_lookup(ref, idx);
493 spin_unlock_irq(&ref->tree_lock);
495 GEM_BUG_ON(!it); /* slot must be preallocated */
501 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
503 /* Only valid while active, see i915_active_acquire_for_context() */
504 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
508 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
510 /* We expect the caller to manage the exclusive timeline ordering */
511 return __i915_active_set_fence(ref, &ref->excl, f);
514 bool i915_active_acquire_if_busy(struct i915_active *ref)
516 debug_active_assert(ref);
517 return atomic_add_unless(&ref->count, 1, 0);
520 static void __i915_active_activate(struct i915_active *ref)
522 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
523 if (!atomic_fetch_inc(&ref->count))
524 debug_active_activate(ref);
525 spin_unlock_irq(&ref->tree_lock);
528 int i915_active_acquire(struct i915_active *ref)
532 if (i915_active_acquire_if_busy(ref))
536 __i915_active_activate(ref);
540 err = mutex_lock_interruptible(&ref->mutex);
544 if (likely(!i915_active_acquire_if_busy(ref))) {
545 err = ref->active(ref);
547 __i915_active_activate(ref);
550 mutex_unlock(&ref->mutex);
555 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
557 struct i915_active_fence *active;
560 err = i915_active_acquire(ref);
564 active = active_instance(ref, idx);
566 i915_active_release(ref);
570 return 0; /* return with active ref */
573 void i915_active_release(struct i915_active *ref)
575 debug_active_assert(ref);
579 static void enable_signaling(struct i915_active_fence *active)
581 struct dma_fence *fence;
583 if (unlikely(is_barrier(active)))
586 fence = i915_active_fence_get(active);
590 dma_fence_enable_sw_signaling(fence);
591 dma_fence_put(fence);
594 static int flush_barrier(struct active_node *it)
596 struct intel_engine_cs *engine;
598 if (likely(!is_barrier(&it->base)))
601 engine = __barrier_to_engine(it);
602 smp_rmb(); /* serialise with add_active_barriers */
603 if (!is_barrier(&it->base))
606 return intel_engine_flush_barriers(engine);
609 static int flush_lazy_signals(struct i915_active *ref)
611 struct active_node *it, *n;
614 enable_signaling(&ref->excl);
615 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
616 err = flush_barrier(it); /* unconnected idle barrier? */
620 enable_signaling(&it->base);
626 int __i915_active_wait(struct i915_active *ref, int state)
630 /* Any fence added after the wait begins will not be auto-signaled */
631 if (i915_active_acquire_if_busy(ref)) {
634 err = flush_lazy_signals(ref);
635 i915_active_release(ref);
639 if (___wait_var_event(ref, i915_active_is_idle(ref),
640 state, 0, 0, schedule()))
645 * After the wait is complete, the caller may free the active.
646 * We have to flush any concurrent retirement before returning.
648 flush_work(&ref->work);
652 static int __await_active(struct i915_active_fence *active,
653 int (*fn)(void *arg, struct dma_fence *fence),
656 struct dma_fence *fence;
658 if (is_barrier(active)) /* XXX flush the barrier? */
661 fence = i915_active_fence_get(active);
665 err = fn(arg, fence);
666 dma_fence_put(fence);
674 struct wait_barrier {
675 struct wait_queue_entry base;
676 struct i915_active *ref;
680 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
682 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
684 if (i915_active_is_idle(wb->ref)) {
685 list_del(&wq->entry);
686 i915_sw_fence_complete(wq->private);
693 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
695 struct wait_barrier *wb;
697 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
701 GEM_BUG_ON(i915_active_is_idle(ref));
702 if (!i915_sw_fence_await(fence)) {
708 wb->base.func = barrier_wake;
709 wb->base.private = fence;
712 add_wait_queue(__var_waitqueue(ref), &wb->base);
716 static int await_active(struct i915_active *ref,
718 int (*fn)(void *arg, struct dma_fence *fence),
719 void *arg, struct i915_sw_fence *barrier)
723 if (!i915_active_acquire_if_busy(ref))
726 if (flags & I915_ACTIVE_AWAIT_EXCL &&
727 rcu_access_pointer(ref->excl.fence)) {
728 err = __await_active(&ref->excl, fn, arg);
733 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
734 struct active_node *it, *n;
736 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
737 err = __await_active(&it->base, fn, arg);
743 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
744 err = flush_lazy_signals(ref);
748 err = __await_barrier(ref, barrier);
754 i915_active_release(ref);
758 static int rq_await_fence(void *arg, struct dma_fence *fence)
760 return i915_request_await_dma_fence(arg, fence);
763 int i915_request_await_active(struct i915_request *rq,
764 struct i915_active *ref,
767 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
770 static int sw_await_fence(void *arg, struct dma_fence *fence)
772 return i915_sw_fence_await_dma_fence(arg, fence, 0,
773 GFP_NOWAIT | __GFP_NOWARN);
776 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
777 struct i915_active *ref,
780 return await_active(ref, flags, sw_await_fence, fence, fence);
783 void i915_active_fini(struct i915_active *ref)
785 debug_active_fini(ref);
786 GEM_BUG_ON(atomic_read(&ref->count));
787 GEM_BUG_ON(work_pending(&ref->work));
788 mutex_destroy(&ref->mutex);
791 kmem_cache_free(global.slab_cache, ref->cache);
794 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
796 return node->timeline == idx && !i915_active_fence_isset(&node->base);
799 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
801 struct rb_node *prev, *p;
803 if (RB_EMPTY_ROOT(&ref->tree))
806 GEM_BUG_ON(i915_active_is_idle(ref));
809 * Try to reuse any existing barrier nodes already allocated for this
810 * i915_active, due to overlapping active phases there is likely a
811 * node kept alive (as we reuse before parking). We prefer to reuse
812 * completely idle barriers (less hassle in manipulating the llists),
813 * but otherwise any will do.
815 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
816 p = &ref->cache->node;
821 p = ref->tree.rb_node;
823 struct active_node *node =
824 rb_entry(p, struct active_node, node);
826 if (is_idle_barrier(node, idx))
830 if (node->timeline < idx)
831 p = READ_ONCE(p->rb_right);
833 p = READ_ONCE(p->rb_left);
837 * No quick match, but we did find the leftmost rb_node for the
838 * kernel_context. Walk the rb_tree in-order to see if there were
839 * any idle-barriers on this timeline that we missed, or just use
840 * the first pending barrier.
842 for (p = prev; p; p = rb_next(p)) {
843 struct active_node *node =
844 rb_entry(p, struct active_node, node);
845 struct intel_engine_cs *engine;
847 if (node->timeline > idx)
850 if (node->timeline < idx)
853 if (is_idle_barrier(node, idx))
857 * The list of pending barriers is protected by the
858 * kernel_context timeline, which notably we do not hold
859 * here. i915_request_add_active_barriers() may consume
860 * the barrier before we claim it, so we have to check
863 engine = __barrier_to_engine(node);
864 smp_rmb(); /* serialise with add_active_barriers */
865 if (is_barrier(&node->base) &&
866 ____active_del_barrier(ref, node, engine))
873 spin_lock_irq(&ref->tree_lock);
874 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
875 if (p == &ref->cache->node)
876 WRITE_ONCE(ref->cache, NULL);
877 spin_unlock_irq(&ref->tree_lock);
879 return rb_entry(p, struct active_node, node);
882 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
883 struct intel_engine_cs *engine)
885 intel_engine_mask_t tmp, mask = engine->mask;
886 struct llist_node *first = NULL, *last = NULL;
887 struct intel_gt *gt = engine->gt;
889 GEM_BUG_ON(i915_active_is_idle(ref));
891 /* Wait until the previous preallocation is completed */
892 while (!llist_empty(&ref->preallocated_barriers))
896 * Preallocate a node for each physical engine supporting the target
897 * engine (remember virtual engines have more than one sibling).
898 * We can then use the preallocated nodes in
899 * i915_active_acquire_barrier()
902 for_each_engine_masked(engine, gt, mask, tmp) {
903 u64 idx = engine->kernel_context->timeline->fence_context;
904 struct llist_node *prev = first;
905 struct active_node *node;
908 node = reuse_idle_barrier(ref, idx);
911 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
915 RCU_INIT_POINTER(node->base.fence, NULL);
916 node->base.cb.func = node_retire;
917 node->timeline = idx;
921 if (!i915_active_fence_isset(&node->base)) {
923 * Mark this as being *our* unconnected proto-node.
925 * Since this node is not in any list, and we have
926 * decoupled it from the rbtree, we can reuse the
927 * request to indicate this is an idle-barrier node
928 * and then we can use the rb_node and list pointers
929 * for our tracking of the pending barrier.
931 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
932 node->base.cb.node.prev = (void *)engine;
933 __i915_active_acquire(ref);
935 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
937 GEM_BUG_ON(barrier_to_engine(node) != engine);
938 first = barrier_to_ll(node);
942 intel_engine_pm_get(engine);
945 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
946 llist_add_batch(first, last, &ref->preallocated_barriers);
952 struct active_node *node = barrier_from_ll(first);
956 atomic_dec(&ref->count);
957 intel_engine_pm_put(barrier_to_engine(node));
959 kmem_cache_free(global.slab_cache, node);
964 void i915_active_acquire_barrier(struct i915_active *ref)
966 struct llist_node *pos, *next;
969 GEM_BUG_ON(i915_active_is_idle(ref));
972 * Transfer the list of preallocated barriers into the
973 * i915_active rbtree, but only as proto-nodes. They will be
974 * populated by i915_request_add_active_barriers() to point to the
975 * request that will eventually release them.
977 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
978 struct active_node *node = barrier_from_ll(pos);
979 struct intel_engine_cs *engine = barrier_to_engine(node);
980 struct rb_node **p, *parent;
982 spin_lock_irqsave_nested(&ref->tree_lock, flags,
983 SINGLE_DEPTH_NESTING);
985 p = &ref->tree.rb_node;
987 struct active_node *it;
991 it = rb_entry(parent, struct active_node, node);
992 if (it->timeline < node->timeline)
993 p = &parent->rb_right;
995 p = &parent->rb_left;
997 rb_link_node(&node->node, parent, p);
998 rb_insert_color(&node->node, &ref->tree);
999 spin_unlock_irqrestore(&ref->tree_lock, flags);
1001 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1002 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1003 intel_engine_pm_put_delay(engine, 1);
1007 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1009 return __active_fence_slot(&barrier_from_ll(node)->base);
1012 void i915_request_add_active_barriers(struct i915_request *rq)
1014 struct intel_engine_cs *engine = rq->engine;
1015 struct llist_node *node, *next;
1016 unsigned long flags;
1018 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1019 GEM_BUG_ON(intel_engine_is_virtual(engine));
1020 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1022 node = llist_del_all(&engine->barrier_tasks);
1026 * Attach the list of proto-fences to the in-flight request such
1027 * that the parent i915_active will be released when this request
1030 spin_lock_irqsave(&rq->lock, flags);
1031 llist_for_each_safe(node, next, node) {
1032 /* serialise with reuse_idle_barrier */
1033 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1034 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1036 spin_unlock_irqrestore(&rq->lock, flags);
1040 * __i915_active_fence_set: Update the last active fence along its timeline
1041 * @active: the active tracker
1042 * @fence: the new fence (under construction)
1044 * Records the new @fence as the last active fence along its timeline in
1045 * this active tracker, moving the tracking callbacks from the previous
1046 * fence onto this one. Returns the previous fence (if not already completed),
1047 * which the caller must ensure is executed before the new fence. To ensure
1048 * that the order of fences within the timeline of the i915_active_fence is
1049 * understood, it should be locked by the caller.
1052 __i915_active_fence_set(struct i915_active_fence *active,
1053 struct dma_fence *fence)
1055 struct dma_fence *prev;
1056 unsigned long flags;
1058 if (fence == rcu_access_pointer(active->fence))
1061 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1064 * Consider that we have two threads arriving (A and B), with
1065 * C already resident as the active->fence.
1067 * A does the xchg first, and so it sees C or NULL depending
1068 * on the timing of the interrupt handler. If it is NULL, the
1069 * previous fence must have been signaled and we know that
1070 * we are first on the timeline. If it is still present,
1071 * we acquire the lock on that fence and serialise with the interrupt
1072 * handler, in the process removing it from any future interrupt
1073 * callback. A will then wait on C before executing (if present).
1075 * As B is second, it sees A as the previous fence and so waits for
1076 * it to complete its transition and takes over the occupancy for
1077 * itself -- remembering that it needs to wait on A before executing.
1079 * Note the strong ordering of the timeline also provides consistent
1080 * nesting rules for the fence->lock; the inner lock is always the
1083 spin_lock_irqsave(fence->lock, flags);
1084 prev = xchg(__active_fence_slot(active), fence);
1086 GEM_BUG_ON(prev == fence);
1087 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1088 __list_del_entry(&active->cb.node);
1089 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1091 list_add_tail(&active->cb.node, &fence->cb_list);
1092 spin_unlock_irqrestore(fence->lock, flags);
1097 int i915_active_fence_set(struct i915_active_fence *active,
1098 struct i915_request *rq)
1100 struct dma_fence *fence;
1103 /* Must maintain timeline ordering wrt previous active requests */
1105 fence = __i915_active_fence_set(active, &rq->fence);
1106 if (fence) /* but the previous fence may not belong to that timeline! */
1107 fence = dma_fence_get_rcu(fence);
1110 err = i915_request_await_dma_fence(rq, fence);
1111 dma_fence_put(fence);
1117 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1119 active_fence_cb(fence, cb);
1122 struct auto_active {
1123 struct i915_active base;
1127 struct i915_active *i915_active_get(struct i915_active *ref)
1129 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1135 static void auto_release(struct kref *ref)
1137 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1139 i915_active_fini(&aa->base);
1143 void i915_active_put(struct i915_active *ref)
1145 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1147 kref_put(&aa->ref, auto_release);
1150 static int auto_active(struct i915_active *ref)
1152 i915_active_get(ref);
1156 static void auto_retire(struct i915_active *ref)
1158 i915_active_put(ref);
1161 struct i915_active *i915_active_create(void)
1163 struct auto_active *aa;
1165 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1169 kref_init(&aa->ref);
1170 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1175 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1176 #include "selftests/i915_active.c"
1179 static void i915_global_active_shrink(void)
1181 kmem_cache_shrink(global.slab_cache);
1184 static void i915_global_active_exit(void)
1186 kmem_cache_destroy(global.slab_cache);
1189 static struct i915_global_active global = { {
1190 .shrink = i915_global_active_shrink,
1191 .exit = i915_global_active_exit,
1194 int __init i915_global_active_init(void)
1196 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1197 if (!global.slab_cache)
1200 i915_global_register(&global.base);