2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_engine_pm.h"
12 #include "i915_active.h"
13 #include "i915_globals.h"
15 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
18 * Active refs memory management
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
24 static struct i915_global_active {
25 struct i915_global base;
26 struct kmem_cache *slab_cache;
30 struct i915_active_request base;
31 struct i915_active *ref;
36 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
38 static void *active_debug_hint(void *addr)
40 struct i915_active *ref = addr;
42 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
45 static struct debug_obj_descr active_debug_desc = {
46 .name = "i915_active",
47 .debug_hint = active_debug_hint,
50 static void debug_active_init(struct i915_active *ref)
52 debug_object_init(ref, &active_debug_desc);
55 static void debug_active_activate(struct i915_active *ref)
57 debug_object_activate(ref, &active_debug_desc);
60 static void debug_active_deactivate(struct i915_active *ref)
62 debug_object_deactivate(ref, &active_debug_desc);
65 static void debug_active_fini(struct i915_active *ref)
67 debug_object_free(ref, &active_debug_desc);
70 static void debug_active_assert(struct i915_active *ref)
72 debug_object_assert_init(ref, &active_debug_desc);
77 static inline void debug_active_init(struct i915_active *ref) { }
78 static inline void debug_active_activate(struct i915_active *ref) { }
79 static inline void debug_active_deactivate(struct i915_active *ref) { }
80 static inline void debug_active_fini(struct i915_active *ref) { }
81 static inline void debug_active_assert(struct i915_active *ref) { }
86 __active_retire(struct i915_active *ref)
88 struct active_node *it, *n;
92 lockdep_assert_held(&ref->mutex);
94 /* return the unused nodes to our slabcache -- flushing the allocator */
95 if (atomic_dec_and_test(&ref->count)) {
96 debug_active_deactivate(ref);
103 mutex_unlock(&ref->mutex);
109 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
110 GEM_BUG_ON(i915_active_request_isset(&it->base));
111 kmem_cache_free(global.slab_cache, it);
116 active_retire(struct i915_active *ref)
118 GEM_BUG_ON(!atomic_read(&ref->count));
119 if (atomic_add_unless(&ref->count, -1, 1))
122 /* One active may be flushed from inside the acquire of another */
123 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
124 __active_retire(ref);
128 node_retire(struct i915_active_request *base, struct i915_request *rq)
130 active_retire(container_of(base, struct active_node, base)->ref);
133 static struct i915_active_request *
134 active_instance(struct i915_active *ref, u64 idx)
136 struct active_node *node, *prealloc;
137 struct rb_node **p, *parent;
140 * We track the most recently used timeline to skip a rbtree search
141 * for the common case, under typical loads we never need the rbtree
142 * at all. We can reuse the last slot if it is empty, that is
143 * after the previous activity has been retired, or if it matches the
146 node = READ_ONCE(ref->cache);
147 if (node && node->timeline == idx)
150 /* Preallocate a replacement, just in case */
151 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
155 mutex_lock(&ref->mutex);
156 GEM_BUG_ON(i915_active_is_idle(ref));
159 p = &ref->tree.rb_node;
163 node = rb_entry(parent, struct active_node, node);
164 if (node->timeline == idx) {
165 kmem_cache_free(global.slab_cache, prealloc);
169 if (node->timeline < idx)
170 p = &parent->rb_right;
172 p = &parent->rb_left;
176 i915_active_request_init(&node->base, NULL, node_retire);
178 node->timeline = idx;
180 rb_link_node(&node->node, parent, p);
181 rb_insert_color(&node->node, &ref->tree);
185 mutex_unlock(&ref->mutex);
190 void __i915_active_init(struct drm_i915_private *i915,
191 struct i915_active *ref,
192 int (*active)(struct i915_active *ref),
193 void (*retire)(struct i915_active *ref),
194 struct lock_class_key *key)
196 debug_active_init(ref);
199 ref->active = active;
200 ref->retire = retire;
203 init_llist_head(&ref->barriers);
204 atomic_set(&ref->count, 0);
205 __mutex_init(&ref->mutex, "i915_active", key);
208 int i915_active_ref(struct i915_active *ref,
210 struct i915_request *rq)
212 struct i915_active_request *active;
215 /* Prevent reaping in case we malloc/wait while building the tree */
216 err = i915_active_acquire(ref);
220 active = active_instance(ref, timeline);
226 if (!i915_active_request_isset(active))
227 atomic_inc(&ref->count);
228 __i915_active_request_set(active, rq);
231 i915_active_release(ref);
235 int i915_active_acquire(struct i915_active *ref)
239 debug_active_assert(ref);
240 if (atomic_add_unless(&ref->count, 1, 0))
243 err = mutex_lock_interruptible(&ref->mutex);
247 if (!atomic_read(&ref->count) && ref->active)
248 err = ref->active(ref);
250 debug_active_activate(ref);
251 atomic_inc(&ref->count);
254 mutex_unlock(&ref->mutex);
259 void i915_active_release(struct i915_active *ref)
261 debug_active_assert(ref);
265 int i915_active_wait(struct i915_active *ref)
267 struct active_node *it, *n;
271 if (RB_EMPTY_ROOT(&ref->tree))
274 err = mutex_lock_interruptible(&ref->mutex);
278 if (!atomic_add_unless(&ref->count, 1, 0)) {
279 mutex_unlock(&ref->mutex);
283 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
284 err = i915_active_request_retire(&it->base, BKL(ref));
289 __active_retire(ref);
293 if (!i915_active_is_idle(ref))
299 int i915_request_await_active_request(struct i915_request *rq,
300 struct i915_active_request *active)
302 struct i915_request *barrier =
303 i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
305 return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
308 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
310 struct active_node *it, *n;
313 if (RB_EMPTY_ROOT(&ref->tree))
316 /* await allocates and so we need to avoid hitting the shrinker */
317 err = i915_active_acquire(ref);
321 mutex_lock(&ref->mutex);
322 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
323 err = i915_request_await_active_request(rq, &it->base);
327 mutex_unlock(&ref->mutex);
329 i915_active_release(ref);
333 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
334 void i915_active_fini(struct i915_active *ref)
336 debug_active_fini(ref);
337 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
338 GEM_BUG_ON(atomic_read(&ref->count));
339 mutex_destroy(&ref->mutex);
343 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
344 struct intel_engine_cs *engine)
346 struct drm_i915_private *i915 = engine->i915;
347 struct llist_node *pos, *next;
351 GEM_BUG_ON(!engine->mask);
352 for_each_engine_masked(engine, i915, engine->mask, tmp) {
353 struct intel_context *kctx = engine->kernel_context;
354 struct active_node *node;
356 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
357 if (unlikely(!node)) {
362 i915_active_request_init(&node->base,
363 (void *)engine, node_retire);
364 node->timeline = kctx->ring->timeline->fence_context;
366 atomic_inc(&ref->count);
368 intel_engine_pm_get(engine);
369 llist_add((struct llist_node *)&node->base.link,
376 llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
377 struct active_node *node;
379 node = container_of((struct list_head *)pos,
380 typeof(*node), base.link);
381 engine = (void *)rcu_access_pointer(node->base.request);
383 intel_engine_pm_put(engine);
384 kmem_cache_free(global.slab_cache, node);
389 void i915_active_acquire_barrier(struct i915_active *ref)
391 struct llist_node *pos, *next;
393 GEM_BUG_ON(i915_active_is_idle(ref));
395 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
396 llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
397 struct intel_engine_cs *engine;
398 struct active_node *node;
399 struct rb_node **p, *parent;
401 node = container_of((struct list_head *)pos,
402 typeof(*node), base.link);
404 engine = (void *)rcu_access_pointer(node->base.request);
405 RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
408 p = &ref->tree.rb_node;
413 node)->timeline < node->timeline)
414 p = &parent->rb_right;
416 p = &parent->rb_left;
418 rb_link_node(&node->node, parent, p);
419 rb_insert_color(&node->node, &ref->tree);
421 llist_add((struct llist_node *)&node->base.link,
422 &engine->barrier_tasks);
423 intel_engine_pm_put(engine);
425 mutex_unlock(&ref->mutex);
428 void i915_request_add_barriers(struct i915_request *rq)
430 struct intel_engine_cs *engine = rq->engine;
431 struct llist_node *node, *next;
433 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
434 list_add_tail((struct list_head *)node, &rq->active_list);
437 int i915_active_request_set(struct i915_active_request *active,
438 struct i915_request *rq)
442 /* Must maintain ordering wrt previous active requests */
443 err = i915_request_await_active_request(rq, active);
447 __i915_active_request_set(active, rq);
451 void i915_active_retire_noop(struct i915_active_request *active,
452 struct i915_request *request)
454 /* Space left intentionally blank */
457 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
458 #include "selftests/i915_active.c"
461 static void i915_global_active_shrink(void)
463 kmem_cache_shrink(global.slab_cache);
466 static void i915_global_active_exit(void)
468 kmem_cache_destroy(global.slab_cache);
471 static struct i915_global_active global = { {
472 .shrink = i915_global_active_shrink,
473 .exit = i915_global_active_exit,
476 int __init i915_global_active_init(void)
478 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
479 if (!global.slab_cache)
482 i915_global_register(&global.base);