bcachefs: Improve btree write buffer tracepoints
authorKent Overstreet <kent.overstreet@linux.dev>
Fri, 3 Nov 2023 02:31:16 +0000 (22:31 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:39 +0000 (11:47 -0500)
 - add a tracepoint for write_buffer_flush_sync; this is expensive
 - fix the write_buffer_flush_slowpath tracepoint

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs_format.h
fs/bcachefs/btree_write_buffer.c
fs/bcachefs/trace.h

index 41fa8b9..545df77 100644 (file)
@@ -1578,7 +1578,9 @@ struct bch_sb_field_disk_groups {
        x(write_super,                                  73)     \
        x(trans_restart_would_deadlock_recursion_limit, 74)     \
        x(trans_restart_write_buffer_flush,             75)     \
-       x(trans_restart_split_race,                     76)
+       x(trans_restart_split_race,                     76)     \
+       x(write_buffer_flush_slowpath,                  77)     \
+       x(write_buffer_flush_sync,                      78)
 
 enum bch_persistent_counters {
 #define x(t, n, ...) BCH_COUNTER_##t,
index a6bf6ed..02ed0f2 100644 (file)
@@ -241,7 +241,7 @@ out:
        mutex_unlock(&wb->flush_lock);
        return ret;
 slowpath:
-       trace_write_buffer_flush_slowpath(trans, i - keys, nr);
+       trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, nr);
 
        /*
         * Now sort the rest by journal seq and bump the journal pin as we go.
@@ -277,8 +277,12 @@ slowpath:
 
 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
 {
+       struct bch_fs *c = trans->c;
+
+       trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
+
        bch2_trans_unlock(trans);
-       mutex_lock(&trans->c->btree_write_buffer.flush_lock);
+       mutex_lock(&c->btree_write_buffer.flush_lock);
        return __bch2_btree_write_buffer_flush(trans, 0, true);
 }
 
index f2e405c..6eced95 100644 (file)
@@ -1334,21 +1334,38 @@ TRACE_EVENT(write_buffer_flush,
                  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
 );
 
+TRACE_EVENT(write_buffer_flush_sync,
+       TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip),
+
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+       ),
+
+       TP_fast_assign(
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+       ),
+
+       TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
+);
+
 TRACE_EVENT(write_buffer_flush_slowpath,
-       TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
-       TP_ARGS(trans, nr, size),
+       TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
+       TP_ARGS(trans, slowpath, total),
 
        TP_STRUCT__entry(
-               __field(size_t,         nr              )
-               __field(size_t,         size            )
+               __field(size_t,         slowpath        )
+               __field(size_t,         total           )
        ),
 
        TP_fast_assign(
-               __entry->nr     = nr;
-               __entry->size   = size;
+               __entry->slowpath       = slowpath;
+               __entry->total          = total;
        ),
 
-       TP_printk("%zu/%zu", __entry->nr, __entry->size)
+       TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
 );
 
 DEFINE_EVENT(str, rebalance_extent,