slub: Don't read nr_slabs and total_objects directly
authorXiongwei Song <xiongwei.song@windriver.com>
Thu, 13 Apr 2023 14:34:52 +0000 (22:34 +0800)
committerVlastimil Babka <vbabka@suse.cz>
Mon, 22 May 2023 13:20:21 +0000 (15:20 +0200)
We have node_nr_slabs() to read nr_slabs, node_nr_objs() to read
total_objects in a kmem_cache_node, so no need to access the two
members directly.

Signed-off-by: Xiongwei Song <xiongwei.song@windriver.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 53be9a2..58cc832 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5156,9 +5156,9 @@ static int validate_slab_node(struct kmem_cache *s,
                validate_slab(s, slab, obj_map);
                count++;
        }
-       if (count != atomic_long_read(&n->nr_slabs)) {
+       if (count != node_nr_slabs(n)) {
                pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
-                      s->name, count, atomic_long_read(&n->nr_slabs));
+                      s->name, count, node_nr_slabs(n));
                slab_add_kunit_errors();
        }
 
@@ -5442,12 +5442,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                for_each_kmem_cache_node(s, node, n) {
 
                        if (flags & SO_TOTAL)
-                               x = atomic_long_read(&n->total_objects);
+                               x = node_nr_objs(n);
                        else if (flags & SO_OBJECTS)
-                               x = atomic_long_read(&n->total_objects) -
-                                       count_partial(n, count_free);
+                               x = node_nr_objs(n) - count_partial(n, count_free);
                        else
-                               x = atomic_long_read(&n->nr_slabs);
+                               x = node_nr_slabs(n);
                        total += x;
                        nodes[node] += x;
                }
@@ -6386,7 +6385,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
                unsigned long flags;
                struct slab *slab;
 
-               if (!atomic_long_read(&n->nr_slabs))
+               if (!node_nr_slabs(n))
                        continue;
 
                spin_lock_irqsave(&n->list_lock, flags);