bcachefs: Add a tracepoint for the btree cache shrinker
authorKent Overstreet <kent.overstreet@gmail.com>
Tue, 28 Dec 2021 01:45:07 +0000 (20:45 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:20 +0000 (17:09 -0400)
This is to help with diagnosing why the btree node can doesn't seem to
be shrinking - we've had issues in the past with granularity/batch size,
since btree nodes are so big.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
fs/bcachefs/btree_cache.c
fs/bcachefs/trace.h

index 5bf493a..4006188 100644 (file)
@@ -275,6 +275,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
        unsigned long touched = 0;
        unsigned long freed = 0;
        unsigned i, flags;
+       unsigned long ret = SHRINK_STOP;
 
        if (bch2_btree_shrinker_disabled)
                return SHRINK_STOP;
@@ -283,7 +284,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
        if (sc->gfp_mask & __GFP_FS)
                mutex_lock(&bc->lock);
        else if (!mutex_trylock(&bc->lock))
-               return -1;
+               goto out_norestore;
 
        flags = memalloc_nofs_save();
 
@@ -358,8 +359,14 @@ restart:
 
        mutex_unlock(&bc->lock);
 out:
+       ret = (unsigned long) freed * btree_pages(c);
        memalloc_nofs_restore(flags);
-       return (unsigned long) freed * btree_pages(c);
+out_norestore:
+       trace_btree_cache_scan(sc->nr_to_scan,
+                              sc->nr_to_scan / btree_pages(c),
+                              btree_cache_can_free(bc),
+                              ret);
+       return ret;
 }
 
 static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
index 21d0262..a1122fa 100644 (file)
@@ -318,6 +318,34 @@ DEFINE_EVENT(btree_node, btree_set_root,
        TP_ARGS(c, b)
 );
 
+TRACE_EVENT(btree_cache_scan,
+       TP_PROTO(unsigned long nr_to_scan_pages,
+                unsigned long nr_to_scan_nodes,
+                unsigned long can_free_nodes,
+                long ret),
+       TP_ARGS(nr_to_scan_pages, nr_to_scan_nodes, can_free_nodes, ret),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,  nr_to_scan_pages        )
+               __field(unsigned long,  nr_to_scan_nodes        )
+               __field(unsigned long,  can_free_nodes          )
+               __field(long,           ret                     )
+       ),
+
+       TP_fast_assign(
+               __entry->nr_to_scan_pages       = nr_to_scan_pages;
+               __entry->nr_to_scan_nodes       = nr_to_scan_nodes;
+               __entry->can_free_nodes         = can_free_nodes;
+               __entry->ret                    = ret;
+       ),
+
+       TP_printk("scanned for %lu pages, %lu nodes, can free %lu nodes, ret %li",
+                 __entry->nr_to_scan_pages,
+                 __entry->nr_to_scan_nodes,
+                 __entry->can_free_nodes,
+                 __entry->ret)
+);
+
 /* Garbage collection */
 
 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,