2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
15 #include "i915_active.h"
16 #include "i915_globals.h"
19 * Active refs memory management
21 * To be more economical with memory, we reap all the i915_active trees as
22 * they idle (when we know the active requests are inactive) and allocate the
23 * nodes from a local slab cache to hopefully reduce the fragmentation.
25 static struct i915_global_active {
26 struct i915_global base;
27 struct kmem_cache *slab_cache;
31 struct i915_active_fence base;
32 struct i915_active *ref;
37 static inline struct active_node *
38 node_from_active(struct i915_active_fence *active)
40 return container_of(active, struct active_node, base);
43 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
45 static inline bool is_barrier(const struct i915_active_fence *active)
47 return IS_ERR(rcu_access_pointer(active->fence));
50 static inline struct llist_node *barrier_to_ll(struct active_node *node)
52 GEM_BUG_ON(!is_barrier(&node->base));
53 return (struct llist_node *)&node->base.cb.node;
56 static inline struct intel_engine_cs *
57 __barrier_to_engine(struct active_node *node)
59 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62 static inline struct intel_engine_cs *
63 barrier_to_engine(struct active_node *node)
65 GEM_BUG_ON(!is_barrier(&node->base));
66 return __barrier_to_engine(node);
69 static inline struct active_node *barrier_from_ll(struct llist_node *x)
71 return container_of((struct list_head *)x,
72 struct active_node, base.cb.node);
75 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
77 static void *active_debug_hint(void *addr)
79 struct i915_active *ref = addr;
81 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84 static struct debug_obj_descr active_debug_desc = {
85 .name = "i915_active",
86 .debug_hint = active_debug_hint,
89 static void debug_active_init(struct i915_active *ref)
91 debug_object_init(ref, &active_debug_desc);
94 static void debug_active_activate(struct i915_active *ref)
96 lockdep_assert_held(&ref->tree_lock);
97 if (!atomic_read(&ref->count)) /* before the first inc */
98 debug_object_activate(ref, &active_debug_desc);
101 static void debug_active_deactivate(struct i915_active *ref)
103 lockdep_assert_held(&ref->tree_lock);
104 if (!atomic_read(&ref->count)) /* after the last dec */
105 debug_object_deactivate(ref, &active_debug_desc);
108 static void debug_active_fini(struct i915_active *ref)
110 debug_object_free(ref, &active_debug_desc);
113 static void debug_active_assert(struct i915_active *ref)
115 debug_object_assert_init(ref, &active_debug_desc);
120 static inline void debug_active_init(struct i915_active *ref) { }
121 static inline void debug_active_activate(struct i915_active *ref) { }
122 static inline void debug_active_deactivate(struct i915_active *ref) { }
123 static inline void debug_active_fini(struct i915_active *ref) { }
124 static inline void debug_active_assert(struct i915_active *ref) { }
129 __active_retire(struct i915_active *ref)
131 struct active_node *it, *n;
135 GEM_BUG_ON(i915_active_is_idle(ref));
137 /* return the unused nodes to our slabcache -- flushing the allocator */
138 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
142 debug_active_deactivate(ref);
148 spin_unlock_irqrestore(&ref->tree_lock, flags);
150 /* After the final retire, the entire struct may be freed */
154 /* ... except if you wait on it, you must manage your own references! */
157 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
158 GEM_BUG_ON(i915_active_fence_isset(&it->base));
159 kmem_cache_free(global.slab_cache, it);
164 active_work(struct work_struct *wrk)
166 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
168 GEM_BUG_ON(!atomic_read(&ref->count));
169 if (atomic_add_unless(&ref->count, -1, 1))
172 __active_retire(ref);
176 active_retire(struct i915_active *ref)
178 GEM_BUG_ON(!atomic_read(&ref->count));
179 if (atomic_add_unless(&ref->count, -1, 1))
182 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
183 queue_work(system_unbound_wq, &ref->work);
187 __active_retire(ref);
190 static inline struct dma_fence **
191 __active_fence_slot(struct i915_active_fence *active)
193 return (struct dma_fence ** __force)&active->fence;
197 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
199 struct i915_active_fence *active =
200 container_of(cb, typeof(*active), cb);
202 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
206 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
208 if (active_fence_cb(fence, cb))
209 active_retire(container_of(cb, struct active_node, base.cb)->ref);
213 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
215 if (active_fence_cb(fence, cb))
216 active_retire(container_of(cb, struct i915_active, excl.cb));
219 static struct i915_active_fence *
220 active_instance(struct i915_active *ref, struct intel_timeline *tl)
222 struct active_node *node, *prealloc;
223 struct rb_node **p, *parent;
224 u64 idx = tl->fence_context;
227 * We track the most recently used timeline to skip a rbtree search
228 * for the common case, under typical loads we never need the rbtree
229 * at all. We can reuse the last slot if it is empty, that is
230 * after the previous activity has been retired, or if it matches the
233 node = READ_ONCE(ref->cache);
234 if (node && node->timeline == idx)
237 /* Preallocate a replacement, just in case */
238 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
242 spin_lock_irq(&ref->tree_lock);
243 GEM_BUG_ON(i915_active_is_idle(ref));
246 p = &ref->tree.rb_node;
250 node = rb_entry(parent, struct active_node, node);
251 if (node->timeline == idx) {
252 kmem_cache_free(global.slab_cache, prealloc);
256 if (node->timeline < idx)
257 p = &parent->rb_right;
259 p = &parent->rb_left;
263 __i915_active_fence_init(&node->base, NULL, node_retire);
265 node->timeline = idx;
267 rb_link_node(&node->node, parent, p);
268 rb_insert_color(&node->node, &ref->tree);
272 spin_unlock_irq(&ref->tree_lock);
274 BUILD_BUG_ON(offsetof(typeof(*node), base));
278 void __i915_active_init(struct i915_active *ref,
279 int (*active)(struct i915_active *ref),
280 void (*retire)(struct i915_active *ref),
281 struct lock_class_key *mkey,
282 struct lock_class_key *wkey)
286 debug_active_init(ref);
289 ref->active = active;
290 ref->retire = ptr_unpack_bits(retire, &bits, 2);
291 if (bits & I915_ACTIVE_MAY_SLEEP)
292 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
294 spin_lock_init(&ref->tree_lock);
298 init_llist_head(&ref->preallocated_barriers);
299 atomic_set(&ref->count, 0);
300 __mutex_init(&ref->mutex, "i915_active", mkey);
301 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
302 INIT_WORK(&ref->work, active_work);
303 #if IS_ENABLED(CONFIG_LOCKDEP)
304 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
308 static bool ____active_del_barrier(struct i915_active *ref,
309 struct active_node *node,
310 struct intel_engine_cs *engine)
313 struct llist_node *head = NULL, *tail = NULL;
314 struct llist_node *pos, *next;
316 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
319 * Rebuild the llist excluding our node. We may perform this
320 * outside of the kernel_context timeline mutex and so someone
321 * else may be manipulating the engine->barrier_tasks, in
322 * which case either we or they will be upset :)
324 * A second __active_del_barrier() will report failure to claim
325 * the active_node and the caller will just shrug and know not to
326 * claim ownership of its node.
328 * A concurrent i915_request_add_active_barriers() will miss adding
329 * any of the tasks, but we will try again on the next -- and since
330 * we are actively using the barrier, we know that there will be
331 * at least another opportunity when we idle.
333 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
334 if (node == barrier_from_ll(pos)) {
345 llist_add_batch(head, tail, &engine->barrier_tasks);
351 __active_del_barrier(struct i915_active *ref, struct active_node *node)
353 return ____active_del_barrier(ref, node, barrier_to_engine(node));
356 int i915_active_ref(struct i915_active *ref,
357 struct intel_timeline *tl,
358 struct dma_fence *fence)
360 struct i915_active_fence *active;
363 lockdep_assert_held(&tl->mutex);
365 /* Prevent reaping in case we malloc/wait while building the tree */
366 err = i915_active_acquire(ref);
370 active = active_instance(ref, tl);
376 if (is_barrier(active)) { /* proto-node used by our idle barrier */
378 * This request is on the kernel_context timeline, and so
379 * we can use it to substitute for the pending idle-barrer
380 * request that we want to emit on the kernel_context.
382 __active_del_barrier(ref, node_from_active(active));
383 RCU_INIT_POINTER(active->fence, NULL);
384 atomic_dec(&ref->count);
386 if (!__i915_active_fence_set(active, fence))
387 atomic_inc(&ref->count);
390 i915_active_release(ref);
395 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
397 struct dma_fence *prev;
399 /* We expect the caller to manage the exclusive timeline ordering */
400 GEM_BUG_ON(i915_active_is_idle(ref));
403 prev = __i915_active_fence_set(&ref->excl, f);
405 prev = dma_fence_get_rcu(prev);
407 atomic_inc(&ref->count);
413 bool i915_active_acquire_if_busy(struct i915_active *ref)
415 debug_active_assert(ref);
416 return atomic_add_unless(&ref->count, 1, 0);
419 int i915_active_acquire(struct i915_active *ref)
423 if (i915_active_acquire_if_busy(ref))
426 err = mutex_lock_interruptible(&ref->mutex);
430 if (likely(!i915_active_acquire_if_busy(ref))) {
432 err = ref->active(ref);
434 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
435 debug_active_activate(ref);
436 atomic_inc(&ref->count);
437 spin_unlock_irq(&ref->tree_lock);
441 mutex_unlock(&ref->mutex);
446 void i915_active_release(struct i915_active *ref)
448 debug_active_assert(ref);
452 static void enable_signaling(struct i915_active_fence *active)
454 struct dma_fence *fence;
456 fence = i915_active_fence_get(active);
460 dma_fence_enable_sw_signaling(fence);
461 dma_fence_put(fence);
464 static int flush_barrier(struct active_node *it)
466 struct intel_engine_cs *engine;
468 if (likely(!is_barrier(&it->base)))
471 engine = __barrier_to_engine(it);
472 smp_rmb(); /* serialise with add_active_barriers */
473 if (!is_barrier(&it->base))
476 return intel_engine_flush_barriers(engine);
479 static int flush_lazy_signals(struct i915_active *ref)
481 struct active_node *it, *n;
484 enable_signaling(&ref->excl);
485 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
486 err = flush_barrier(it); /* unconnected idle barrier? */
490 enable_signaling(&it->base);
496 int i915_active_wait(struct i915_active *ref)
502 if (!i915_active_acquire_if_busy(ref))
505 /* Any fence added after the wait begins will not be auto-signaled */
506 err = flush_lazy_signals(ref);
507 i915_active_release(ref);
511 if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
514 flush_work(&ref->work);
518 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
522 if (rcu_access_pointer(ref->excl.fence)) {
523 struct dma_fence *fence;
526 fence = dma_fence_get_rcu_safe(&ref->excl.fence);
529 err = i915_request_await_dma_fence(rq, fence);
530 dma_fence_put(fence);
534 /* In the future we may choose to await on all fences */
539 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
540 void i915_active_fini(struct i915_active *ref)
542 debug_active_fini(ref);
543 GEM_BUG_ON(atomic_read(&ref->count));
544 GEM_BUG_ON(work_pending(&ref->work));
545 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
546 mutex_destroy(&ref->mutex);
550 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
552 return node->timeline == idx && !i915_active_fence_isset(&node->base);
555 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
557 struct rb_node *prev, *p;
559 if (RB_EMPTY_ROOT(&ref->tree))
562 spin_lock_irq(&ref->tree_lock);
563 GEM_BUG_ON(i915_active_is_idle(ref));
566 * Try to reuse any existing barrier nodes already allocated for this
567 * i915_active, due to overlapping active phases there is likely a
568 * node kept alive (as we reuse before parking). We prefer to reuse
569 * completely idle barriers (less hassle in manipulating the llists),
570 * but otherwise any will do.
572 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
573 p = &ref->cache->node;
578 p = ref->tree.rb_node;
580 struct active_node *node =
581 rb_entry(p, struct active_node, node);
583 if (is_idle_barrier(node, idx))
587 if (node->timeline < idx)
594 * No quick match, but we did find the leftmost rb_node for the
595 * kernel_context. Walk the rb_tree in-order to see if there were
596 * any idle-barriers on this timeline that we missed, or just use
597 * the first pending barrier.
599 for (p = prev; p; p = rb_next(p)) {
600 struct active_node *node =
601 rb_entry(p, struct active_node, node);
602 struct intel_engine_cs *engine;
604 if (node->timeline > idx)
607 if (node->timeline < idx)
610 if (is_idle_barrier(node, idx))
614 * The list of pending barriers is protected by the
615 * kernel_context timeline, which notably we do not hold
616 * here. i915_request_add_active_barriers() may consume
617 * the barrier before we claim it, so we have to check
620 engine = __barrier_to_engine(node);
621 smp_rmb(); /* serialise with add_active_barriers */
622 if (is_barrier(&node->base) &&
623 ____active_del_barrier(ref, node, engine))
627 spin_unlock_irq(&ref->tree_lock);
632 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
633 if (p == &ref->cache->node)
635 spin_unlock_irq(&ref->tree_lock);
637 return rb_entry(p, struct active_node, node);
640 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
641 struct intel_engine_cs *engine)
643 intel_engine_mask_t tmp, mask = engine->mask;
644 struct llist_node *first = NULL, *last = NULL;
645 struct intel_gt *gt = engine->gt;
648 GEM_BUG_ON(i915_active_is_idle(ref));
650 /* Wait until the previous preallocation is completed */
651 while (!llist_empty(&ref->preallocated_barriers))
655 * Preallocate a node for each physical engine supporting the target
656 * engine (remember virtual engines have more than one sibling).
657 * We can then use the preallocated nodes in
658 * i915_active_acquire_barrier()
661 for_each_engine_masked(engine, gt, mask, tmp) {
662 u64 idx = engine->kernel_context->timeline->fence_context;
663 struct llist_node *prev = first;
664 struct active_node *node;
666 node = reuse_idle_barrier(ref, idx);
668 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
674 RCU_INIT_POINTER(node->base.fence, NULL);
675 node->base.cb.func = node_retire;
676 node->timeline = idx;
680 if (!i915_active_fence_isset(&node->base)) {
682 * Mark this as being *our* unconnected proto-node.
684 * Since this node is not in any list, and we have
685 * decoupled it from the rbtree, we can reuse the
686 * request to indicate this is an idle-barrier node
687 * and then we can use the rb_node and list pointers
688 * for our tracking of the pending barrier.
690 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
691 node->base.cb.node.prev = (void *)engine;
692 atomic_inc(&ref->count);
694 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
696 GEM_BUG_ON(barrier_to_engine(node) != engine);
697 first = barrier_to_ll(node);
701 intel_engine_pm_get(engine);
704 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
705 llist_add_batch(first, last, &ref->preallocated_barriers);
711 struct active_node *node = barrier_from_ll(first);
715 atomic_dec(&ref->count);
716 intel_engine_pm_put(barrier_to_engine(node));
718 kmem_cache_free(global.slab_cache, node);
723 void i915_active_acquire_barrier(struct i915_active *ref)
725 struct llist_node *pos, *next;
728 GEM_BUG_ON(i915_active_is_idle(ref));
731 * Transfer the list of preallocated barriers into the
732 * i915_active rbtree, but only as proto-nodes. They will be
733 * populated by i915_request_add_active_barriers() to point to the
734 * request that will eventually release them.
736 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
737 struct active_node *node = barrier_from_ll(pos);
738 struct intel_engine_cs *engine = barrier_to_engine(node);
739 struct rb_node **p, *parent;
741 spin_lock_irqsave_nested(&ref->tree_lock, flags,
742 SINGLE_DEPTH_NESTING);
744 p = &ref->tree.rb_node;
746 struct active_node *it;
750 it = rb_entry(parent, struct active_node, node);
751 if (it->timeline < node->timeline)
752 p = &parent->rb_right;
754 p = &parent->rb_left;
756 rb_link_node(&node->node, parent, p);
757 rb_insert_color(&node->node, &ref->tree);
758 spin_unlock_irqrestore(&ref->tree_lock, flags);
760 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
761 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
762 intel_engine_pm_put(engine);
766 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
768 return __active_fence_slot(&barrier_from_ll(node)->base);
771 void i915_request_add_active_barriers(struct i915_request *rq)
773 struct intel_engine_cs *engine = rq->engine;
774 struct llist_node *node, *next;
777 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
778 GEM_BUG_ON(intel_engine_is_virtual(engine));
779 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
781 node = llist_del_all(&engine->barrier_tasks);
785 * Attach the list of proto-fences to the in-flight request such
786 * that the parent i915_active will be released when this request
789 spin_lock_irqsave(&rq->lock, flags);
790 llist_for_each_safe(node, next, node) {
791 /* serialise with reuse_idle_barrier */
792 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
793 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
795 spin_unlock_irqrestore(&rq->lock, flags);
799 * __i915_active_fence_set: Update the last active fence along its timeline
800 * @active: the active tracker
801 * @fence: the new fence (under construction)
803 * Records the new @fence as the last active fence along its timeline in
804 * this active tracker, moving the tracking callbacks from the previous
805 * fence onto this one. Returns the previous fence (if not already completed),
806 * which the caller must ensure is executed before the new fence. To ensure
807 * that the order of fences within the timeline of the i915_active_fence is
808 * understood, it should be locked by the caller.
811 __i915_active_fence_set(struct i915_active_fence *active,
812 struct dma_fence *fence)
814 struct dma_fence *prev;
817 if (fence == rcu_access_pointer(active->fence))
820 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
823 * Consider that we have two threads arriving (A and B), with
824 * C already resident as the active->fence.
826 * A does the xchg first, and so it sees C or NULL depending
827 * on the timing of the interrupt handler. If it is NULL, the
828 * previous fence must have been signaled and we know that
829 * we are first on the timeline. If it is still present,
830 * we acquire the lock on that fence and serialise with the interrupt
831 * handler, in the process removing it from any future interrupt
832 * callback. A will then wait on C before executing (if present).
834 * As B is second, it sees A as the previous fence and so waits for
835 * it to complete its transition and takes over the occupancy for
836 * itself -- remembering that it needs to wait on A before executing.
838 * Note the strong ordering of the timeline also provides consistent
839 * nesting rules for the fence->lock; the inner lock is always the
842 spin_lock_irqsave(fence->lock, flags);
843 prev = xchg(__active_fence_slot(active), fence);
845 GEM_BUG_ON(prev == fence);
846 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
847 __list_del_entry(&active->cb.node);
848 spin_unlock(prev->lock); /* serialise with prev->cb_list */
850 list_add_tail(&active->cb.node, &fence->cb_list);
851 spin_unlock_irqrestore(fence->lock, flags);
856 int i915_active_fence_set(struct i915_active_fence *active,
857 struct i915_request *rq)
859 struct dma_fence *fence;
862 /* Must maintain timeline ordering wrt previous active requests */
864 fence = __i915_active_fence_set(active, &rq->fence);
865 if (fence) /* but the previous fence may not belong to that timeline! */
866 fence = dma_fence_get_rcu(fence);
869 err = i915_request_await_dma_fence(rq, fence);
870 dma_fence_put(fence);
876 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
878 active_fence_cb(fence, cb);
881 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
882 #include "selftests/i915_active.c"
885 static void i915_global_active_shrink(void)
887 kmem_cache_shrink(global.slab_cache);
890 static void i915_global_active_exit(void)
892 kmem_cache_destroy(global.slab_cache);
895 static struct i915_global_active global = { {
896 .shrink = i915_global_active_shrink,
897 .exit = i915_global_active_exit,
900 int __init i915_global_active_init(void)
902 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
903 if (!global.slab_cache)
906 i915_global_register(&global.base);