bcachefs: Fix a bug with the journal_seq_blacklist mechanism
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 5 Aug 2020 03:10:08 +0000 (23:10 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:43 +0000 (17:08 -0400)
Previously, we would start doing btree updates before writing the first
journal entry; if this was after an unclean shutdown, this could cause
those btree updates to not be blacklisted.

Also, move some code to headers for userspace debug tools.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/journal_seq_blacklist.c
fs/bcachefs/journal_seq_blacklist.h
fs/bcachefs/recovery.c
fs/bcachefs/super.c

index d3ea43d..996fc0c 100644 (file)
@@ -597,34 +597,6 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
                bch2_btree_iter_reinit_node(iter, b);
 }
 
-static struct nonce btree_nonce(struct bset *i, unsigned offset)
-{
-       return (struct nonce) {{
-               [0] = cpu_to_le32(offset),
-               [1] = ((__le32 *) &i->seq)[0],
-               [2] = ((__le32 *) &i->seq)[1],
-               [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
-       }};
-}
-
-static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
-{
-       struct nonce nonce = btree_nonce(i, offset);
-
-       if (!offset) {
-               struct btree_node *bn = container_of(i, struct btree_node, keys);
-               unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
-
-               bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
-                            bytes);
-
-               nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
-       }
-
-       bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
-                    vstruct_end(i) - (void *) i->_data);
-}
-
 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
                          struct btree *b, struct bset *i,
                          unsigned offset, int write)
index 66ebdd3..626d0f0 100644 (file)
@@ -5,6 +5,7 @@
 #include "bkey_methods.h"
 #include "bset.h"
 #include "btree_locking.h"
+#include "checksum.h"
 #include "extents.h"
 #include "io_types.h"
 
@@ -82,6 +83,34 @@ static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *
        return false;
 }
 
+static inline struct nonce btree_nonce(struct bset *i, unsigned offset)
+{
+       return (struct nonce) {{
+               [0] = cpu_to_le32(offset),
+               [1] = ((__le32 *) &i->seq)[0],
+               [2] = ((__le32 *) &i->seq)[1],
+               [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
+       }};
+}
+
+static inline void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
+{
+       struct nonce nonce = btree_nonce(i, offset);
+
+       if (!offset) {
+               struct btree_node *bn = container_of(i, struct btree_node, keys);
+               unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+               bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
+                            bytes);
+
+               nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
+       }
+
+       bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
+                    vstruct_end(i) - (void *) i->_data);
+}
+
 void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
 
 void bch2_btree_build_aux_trees(struct btree *);
index a21de00..d0f1bbf 100644 (file)
  * that bset, until that btree node is rewritten.
  */
 
-static unsigned
-blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
-{
-       return bl
-               ? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
-                  sizeof(struct journal_seq_blacklist_entry))
-               : 0;
-}
-
 static unsigned sb_blacklist_u64s(unsigned nr)
 {
        struct bch_sb_field_journal_seq_blacklist *bl;
index 03f4b97..afb886e 100644 (file)
@@ -2,6 +2,15 @@
 #ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
 #define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
 
+static inline unsigned
+blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
+{
+       return bl
+               ? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
+                  sizeof(struct journal_seq_blacklist_entry))
+               : 0;
+}
+
 bool bch2_journal_seq_is_blacklisted(struct bch_fs *, u64, bool);
 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64, u64);
 int bch2_blacklist_table_initialize(struct bch_fs *);
index 28972f3..6e829bf 100644 (file)
@@ -1039,6 +1039,11 @@ int bch2_fs_recovery(struct bch_fs *c)
                }
 
                journal_seq += 4;
+
+               /*
+                * The superblock needs to be written before we do any btree
+                * node writes: it will be in the read_write() path
+                */
        }
 
        ret = bch2_blacklist_table_initialize(c);
index 084976c..7377f44 100644 (file)
@@ -352,8 +352,8 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
 {
        bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
 
-       bch2_fs_read_only_async(c);
        bch2_journal_halt(&c->journal);
+       bch2_fs_read_only_async(c);
 
        wake_up(&bch_read_only_wait);
        return ret;
@@ -410,6 +410,13 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        if (ret)
                goto err;
 
+       /*
+        * We need to write out a journal entry before we start doing btree
+        * updates, to ensure that on unclean shutdown new journal blacklist
+        * entries are created:
+        */
+       bch2_journal_meta(&c->journal);
+
        clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
 
        for_each_rw_member(ca, c, i)