bcachefs: Fix large key cache keys
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 18 Mar 2022 01:35:51 +0000 (21:35 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:28 +0000 (17:09 -0400)
Previously, we'd go into an infinite loop when attempting to cache a
bkey in the key cache larger than 128 u64s - since we were only using a
u8 for the size field, it'd get rounded up to 256 then truncated to 0.
Oops.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/trace.h

index 51eb686..a8b0895 100644 (file)
@@ -323,7 +323,7 @@ struct bkey_cached {
        struct btree_bkey_cached_common c;
 
        unsigned long           flags;
-       u                     u64s;
+       u16                     u64s;
        bool                    valid;
        u32                     btree_trans_barrier_seq;
        struct bkey_cached_key  key;
index cb0cab7..e482d1b 100644 (file)
@@ -351,7 +351,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
        struct bkey_cached *ck = (void *) path->l[0].b;
-       unsigned new_u64s;
+       unsigned old_u64s = ck->u64s, new_u64s;
        struct bkey_i *new_k;
 
        EBUG_ON(path->level);
@@ -385,7 +385,8 @@ btree_key_can_insert_cached(struct btree_trans *trans,
         * transaction restart:
         */
        trace_trans_restart_key_cache_key_realloced(trans->fn, _RET_IP_,
-                                            path->btree_id, &path->pos);
+                                            path->btree_id, &path->pos,
+                                            old_u64s, new_u64s);
        /*
         * Not using btree_trans_restart() because we can't unlock here, we have
         * write locks held:
index af37852..bb938dd 100644 (file)
@@ -918,12 +918,46 @@ TRACE_EVENT(trans_restart_mem_realloced,
                  __entry->bytes)
 );
 
-DEFINE_EVENT(transaction_restart_iter, trans_restart_key_cache_key_realloced,
+TRACE_EVENT(trans_restart_key_cache_key_realloced,
        TP_PROTO(const char *trans_fn,
                 unsigned long caller_ip,
                 enum btree_id btree_id,
-                struct bpos *pos),
-       TP_ARGS(trans_fn, caller_ip, btree_id, pos)
+                struct bpos *pos,
+                unsigned old_u64s,
+                unsigned new_u64s),
+       TP_ARGS(trans_fn, caller_ip, btree_id, pos, old_u64s, new_u64s),
+
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 24    )
+               __field(unsigned long,          caller_ip       )
+               __field(enum btree_id,          btree_id        )
+               __field(u64,                    inode           )
+               __field(u64,                    offset          )
+               __field(u32,                    snapshot        )
+               __field(u32,                    old_u64s        )
+               __field(u32,                    new_u64s        )
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip      = caller_ip;
+               __entry->btree_id       = btree_id;
+               __entry->inode          = pos->inode;
+               __entry->offset         = pos->offset;
+               __entry->snapshot       = pos->snapshot;
+               __entry->old_u64s       = old_u64s;
+               __entry->new_u64s       = new_u64s;
+       ),
+
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->inode,
+                 __entry->offset,
+                 __entry->snapshot,
+                 __entry->old_u64s,
+                 __entry->new_u64s)
 );
 
 #endif /* _TRACE_BCACHEFS_H */