debugobjects: Collect newly allocated objects in a list to reduce lock contention
authorZhen Lei <thunder.leizhen@huawei.com>
Mon, 7 Oct 2024 16:49:53 +0000 (18:49 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 15 Oct 2024 15:30:30 +0000 (17:30 +0200)
Collect the newly allocated debug objects in a list outside the lock, so
that the lock held time and the potential lock contention is reduced.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20240911083521.2257-3-thunder.leizhen@huawei.com
Link: https://lore.kernel.org/all/20241007164913.073653668@linutronix.de
lib/debugobjects.c

index df48acc..798ce5a 100644 (file)
@@ -161,23 +161,25 @@ static void fill_pool(void)
                return;
 
        while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
-               struct debug_obj *new[ODEBUG_BATCH_SIZE];
+               struct debug_obj *new, *last = NULL;
+               HLIST_HEAD(head);
                int cnt;
 
                for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
-                       new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
-                       if (!new[cnt])
+                       new = kmem_cache_zalloc(obj_cache, gfp);
+                       if (!new)
                                break;
+                       hlist_add_head(&new->node, &head);
+                       if (!last)
+                               last = new;
                }
                if (!cnt)
                        return;
 
                raw_spin_lock_irqsave(&pool_lock, flags);
-               while (cnt) {
-                       hlist_add_head(&new[--cnt]->node, &obj_pool);
-                       debug_objects_allocated++;
-                       WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
-               }
+               hlist_splice_init(&head, &last->node, &obj_pool);
+               debug_objects_allocated += cnt;
+               WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
 }