1 // SPDX-License-Identifier: GPL-2.0
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
13 #include <linux/random.h>
18 * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19 * exist to provide a stable identifier for the whole lifetime of a snapshot
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
26 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
28 prt_printf(out, "subvol %u root snapshot %u",
29 le32_to_cpu(t.v->master_subvol),
30 le32_to_cpu(t.v->root_snapshot));
33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
34 enum bkey_invalid_flags flags,
39 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40 bkey_lt(k.k->p, POS(0, 1)), c, err,
41 snapshot_tree_pos_bad,
47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48 struct bch_snapshot_tree *s)
50 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51 BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
53 if (bch2_err_matches(ret, ENOENT))
54 ret = -BCH_ERR_ENOENT_snapshot_tree;
58 struct bkey_i_snapshot_tree *
59 __bch2_snapshot_tree_create(struct btree_trans *trans)
61 struct btree_iter iter;
62 int ret = bch2_bkey_get_empty_slot(trans, &iter,
63 BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64 struct bkey_i_snapshot_tree *s_t;
66 if (ret == -BCH_ERR_ENOSPC_btree_slot)
67 ret = -BCH_ERR_ENOSPC_snapshot_tree;
71 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72 ret = PTR_ERR_OR_ZERO(s_t);
73 bch2_trans_iter_exit(trans, &iter);
74 return ret ? ERR_PTR(ret) : s_t;
77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78 u32 root_id, u32 subvol_id, u32 *tree_id)
80 struct bkey_i_snapshot_tree *n_tree =
81 __bch2_snapshot_tree_create(trans);
84 return PTR_ERR(n_tree);
86 n_tree->v.master_subvol = cpu_to_le32(subvol_id);
87 n_tree->v.root_snapshot = cpu_to_le32(root_id);
88 *tree_id = n_tree->k.p.offset;
94 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
96 struct snapshot_table *t;
99 t = rcu_dereference(c->snapshots);
101 while (id && id < ancestor)
102 id = __snapshot_t(t, id)->parent;
105 return id == ancestor;
108 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
110 const struct snapshot_t *s = __snapshot_t(t, id);
112 if (s->skip[2] <= ancestor)
114 if (s->skip[1] <= ancestor)
116 if (s->skip[0] <= ancestor)
121 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
123 struct snapshot_table *t;
126 EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
129 t = rcu_dereference(c->snapshots);
131 while (id && id < ancestor - IS_ANCESTOR_BITMAP)
132 id = get_ancestor_below(t, id, ancestor);
134 if (id && id < ancestor) {
135 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
137 EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
139 ret = id == ancestor;
147 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
149 size_t idx = U32_MAX - id;
151 struct snapshot_table *new, *old;
153 new_size = max(16UL, roundup_pow_of_two(idx + 1));
155 new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
159 old = rcu_dereference_protected(c->snapshots, true);
162 rcu_dereference_protected(c->snapshots, true)->s,
163 sizeof(new->s[0]) * c->snapshot_table_size);
165 rcu_assign_pointer(c->snapshots, new);
166 c->snapshot_table_size = new_size;
167 kvfree_rcu_mightsleep(old);
169 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
172 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
174 size_t idx = U32_MAX - id;
176 lockdep_assert_held(&c->snapshot_table_lock);
178 if (likely(idx < c->snapshot_table_size))
179 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
181 return __snapshot_t_mut(c, id);
184 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
187 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
189 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
190 BCH_SNAPSHOT_SUBVOL(s.v),
191 BCH_SNAPSHOT_DELETED(s.v),
192 le32_to_cpu(s.v->parent),
193 le32_to_cpu(s.v->children[0]),
194 le32_to_cpu(s.v->children[1]),
195 le32_to_cpu(s.v->subvol),
196 le32_to_cpu(s.v->tree));
198 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
199 prt_printf(out, " depth %u skiplist %u %u %u",
200 le32_to_cpu(s.v->depth),
201 le32_to_cpu(s.v->skip[0]),
202 le32_to_cpu(s.v->skip[1]),
203 le32_to_cpu(s.v->skip[2]));
206 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
207 enum bkey_invalid_flags flags,
208 struct printbuf *err)
210 struct bkey_s_c_snapshot s;
214 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
215 bkey_lt(k.k->p, POS(0, 1)), c, err,
219 s = bkey_s_c_to_snapshot(k);
221 id = le32_to_cpu(s.v->parent);
222 bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
224 "bad parent node (%u <= %llu)",
227 bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
228 snapshot_children_not_normalized,
229 "children not normalized");
231 bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
232 snapshot_child_duplicate,
233 "duplicate child nodes");
235 for (i = 0; i < 2; i++) {
236 id = le32_to_cpu(s.v->children[i]);
238 bkey_fsck_err_on(id >= k.k->p.offset, c, err,
240 "bad child node (%u >= %llu)",
244 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
245 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
246 le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
247 snapshot_skiplist_not_normalized,
248 "skiplist not normalized");
250 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
251 id = le32_to_cpu(s.v->skip[i]);
253 bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
254 snapshot_skiplist_bad,
255 "bad skiplist node %u", id);
262 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
264 struct snapshot_t *t = snapshot_t_mut(c, id);
267 while ((parent = bch2_snapshot_parent_early(c, parent)) &&
268 parent - id - 1 < IS_ANCESTOR_BITMAP)
269 __set_bit(parent - id - 1, t->is_ancestor);
272 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
274 mutex_lock(&c->snapshot_table_lock);
275 __set_is_ancestor_bitmap(c, id);
276 mutex_unlock(&c->snapshot_table_lock);
279 int bch2_mark_snapshot(struct btree_trans *trans,
280 enum btree_id btree, unsigned level,
281 struct bkey_s_c old, struct bkey_s_c new,
284 struct bch_fs *c = trans->c;
285 struct snapshot_t *t;
286 u32 id = new.k->p.offset;
289 mutex_lock(&c->snapshot_table_lock);
291 t = snapshot_t_mut(c, id);
293 ret = -BCH_ERR_ENOMEM_mark_snapshot;
297 if (new.k->type == KEY_TYPE_snapshot) {
298 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
300 t->parent = le32_to_cpu(s.v->parent);
301 t->children[0] = le32_to_cpu(s.v->children[0]);
302 t->children[1] = le32_to_cpu(s.v->children[1]);
303 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
304 t->tree = le32_to_cpu(s.v->tree);
306 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
307 t->depth = le32_to_cpu(s.v->depth);
308 t->skip[0] = le32_to_cpu(s.v->skip[0]);
309 t->skip[1] = le32_to_cpu(s.v->skip[1]);
310 t->skip[2] = le32_to_cpu(s.v->skip[2]);
318 __set_is_ancestor_bitmap(c, id);
320 if (BCH_SNAPSHOT_DELETED(s.v)) {
321 set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
322 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
323 bch2_delete_dead_snapshots_async(c);
326 memset(t, 0, sizeof(*t));
329 mutex_unlock(&c->snapshot_table_lock);
333 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
334 struct bch_snapshot *s)
336 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
337 BTREE_ITER_WITH_UPDATES, snapshot, s);
340 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
342 struct bch_snapshot v;
348 ret = bch2_snapshot_lookup(trans, id, &v);
349 if (bch2_err_matches(ret, ENOENT))
350 bch_err(trans->c, "snapshot node %u not found", id);
354 return !BCH_SNAPSHOT_DELETED(&v);
358 * If @k is a snapshot with just one live child, it's part of a linear chain,
359 * which we consider to be an equivalence class: and then after snapshot
360 * deletion cleanup, there should only be a single key at a given position in
361 * this equivalence class.
363 * This sets the equivalence class of @k to be the child's equivalence class, if
364 * it's part of such a linear chain: this correctly sets equivalence classes on
365 * startup if we run leaf to root (i.e. in natural key order).
367 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
369 struct bch_fs *c = trans->c;
370 unsigned i, nr_live = 0, live_idx = 0;
371 struct bkey_s_c_snapshot snap;
372 u32 id = k.k->p.offset, child[2];
374 if (k.k->type != KEY_TYPE_snapshot)
377 snap = bkey_s_c_to_snapshot(k);
379 child[0] = le32_to_cpu(snap.v->children[0]);
380 child[1] = le32_to_cpu(snap.v->children[1]);
382 for (i = 0; i < 2; i++) {
383 int ret = bch2_snapshot_live(trans, child[i]);
393 mutex_lock(&c->snapshot_table_lock);
395 snapshot_t_mut(c, id)->equiv = nr_live == 1
396 ? snapshot_t_mut(c, child[live_idx])->equiv
399 mutex_unlock(&c->snapshot_table_lock);
406 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
408 return snapshot_t(c, id)->children[child];
411 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
413 return bch2_snapshot_child(c, id, 0);
416 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
418 return bch2_snapshot_child(c, id, 1);
421 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
425 n = bch2_snapshot_left_child(c, id);
429 while ((parent = bch2_snapshot_parent(c, id))) {
430 n = bch2_snapshot_right_child(c, parent);
439 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
441 u32 id = snapshot_root;
445 s = snapshot_t(c, id)->subvol;
447 if (s && (!subvol || s < subvol))
450 id = bch2_snapshot_tree_next(c, id);
456 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
457 u32 snapshot_root, u32 *subvol_id)
459 struct bch_fs *c = trans->c;
460 struct btree_iter iter;
462 struct bkey_s_c_subvolume s;
466 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
468 if (k.k->type != KEY_TYPE_subvolume)
471 s = bkey_s_c_to_subvolume(k);
472 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
474 if (!BCH_SUBVOLUME_SNAP(s.v)) {
475 *subvol_id = s.k->p.offset;
481 bch2_trans_iter_exit(trans, &iter);
483 if (!ret && !found) {
484 struct bkey_i_subvolume *u;
486 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
488 u = bch2_bkey_get_mut_typed(trans, &iter,
489 BTREE_ID_subvolumes, POS(0, *subvol_id),
491 ret = PTR_ERR_OR_ZERO(u);
495 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
501 static int check_snapshot_tree(struct btree_trans *trans,
502 struct btree_iter *iter,
505 struct bch_fs *c = trans->c;
506 struct bkey_s_c_snapshot_tree st;
507 struct bch_snapshot s;
508 struct bch_subvolume subvol;
509 struct printbuf buf = PRINTBUF;
513 if (k.k->type != KEY_TYPE_snapshot_tree)
516 st = bkey_s_c_to_snapshot_tree(k);
517 root_id = le32_to_cpu(st.v->root_snapshot);
519 ret = bch2_snapshot_lookup(trans, root_id, &s);
520 if (ret && !bch2_err_matches(ret, ENOENT))
523 if (fsck_err_on(ret ||
524 root_id != bch2_snapshot_root(c, root_id) ||
525 st.k->p.offset != le32_to_cpu(s.tree),
526 c, snapshot_tree_to_missing_snapshot,
527 "snapshot tree points to missing/incorrect snapshot:\n %s",
528 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
529 ret = bch2_btree_delete_at(trans, iter, 0);
533 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
535 if (ret && !bch2_err_matches(ret, ENOENT))
539 c, snapshot_tree_to_missing_subvol,
540 "snapshot tree points to missing subvolume:\n %s",
541 (printbuf_reset(&buf),
542 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
543 fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
544 le32_to_cpu(subvol.snapshot),
546 c, snapshot_tree_to_wrong_subvol,
547 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
548 (printbuf_reset(&buf),
549 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
550 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
551 c, snapshot_tree_to_snapshot_subvol,
552 "snapshot tree points to snapshot subvolume:\n %s",
553 (printbuf_reset(&buf),
554 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
555 struct bkey_i_snapshot_tree *u;
558 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
562 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
563 ret = PTR_ERR_OR_ZERO(u);
567 u->v.master_subvol = cpu_to_le32(subvol_id);
568 st = snapshot_tree_i_to_s_c(u);
577 * For each snapshot_tree, make sure it points to the root of a snapshot tree
578 * and that snapshot entry points back to it, or delete it.
580 * And, make sure it points to a subvolume within that snapshot tree, or correct
581 * it to point to the oldest subvolume within that snapshot tree.
583 int bch2_check_snapshot_trees(struct bch_fs *c)
585 struct btree_iter iter;
589 ret = bch2_trans_run(c,
590 for_each_btree_key_commit(trans, iter,
591 BTREE_ID_snapshot_trees, POS_MIN,
592 BTREE_ITER_PREFETCH, k,
593 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
594 check_snapshot_tree(trans, &iter, k)));
597 bch_err(c, "error %i checking snapshot trees", ret);
602 * Look up snapshot tree for @tree_id and find root,
603 * make sure @snap_id is a descendent:
605 static int snapshot_tree_ptr_good(struct btree_trans *trans,
606 u32 snap_id, u32 tree_id)
608 struct bch_snapshot_tree s_t;
609 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
611 if (bch2_err_matches(ret, ENOENT))
616 return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
619 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
621 const struct snapshot_t *s;
627 s = snapshot_t(c, id);
629 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
635 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
639 for (i = 0; i < 3; i++)
644 if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
652 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
653 * its snapshot_tree pointer is correct (allocate new one if necessary), then
654 * update this node's pointer to root node's pointer:
656 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
657 struct btree_iter *iter,
659 struct bch_snapshot *s)
661 struct bch_fs *c = trans->c;
662 struct btree_iter root_iter;
663 struct bch_snapshot_tree s_t;
664 struct bkey_s_c_snapshot root;
665 struct bkey_i_snapshot *u;
666 u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
669 root = bch2_bkey_get_iter_typed(trans, &root_iter,
670 BTREE_ID_snapshots, POS(0, root_id),
671 BTREE_ITER_WITH_UPDATES, snapshot);
672 ret = bkey_err(root);
676 tree_id = le32_to_cpu(root.v->tree);
678 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
679 if (ret && !bch2_err_matches(ret, ENOENT))
682 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
683 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
684 ret = PTR_ERR_OR_ZERO(u) ?:
685 bch2_snapshot_tree_create(trans, root_id,
686 bch2_snapshot_tree_oldest_subvol(c, root_id),
691 u->v.tree = cpu_to_le32(tree_id);
692 if (k.k->p.offset == root_id)
696 if (k.k->p.offset != root_id) {
697 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
698 ret = PTR_ERR_OR_ZERO(u);
702 u->v.tree = cpu_to_le32(tree_id);
706 bch2_trans_iter_exit(trans, &root_iter);
710 static int check_snapshot(struct btree_trans *trans,
711 struct btree_iter *iter,
714 struct bch_fs *c = trans->c;
715 struct bch_snapshot s;
716 struct bch_subvolume subvol;
717 struct bch_snapshot v;
718 struct bkey_i_snapshot *u;
719 u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
721 struct printbuf buf = PRINTBUF;
722 bool should_have_subvol;
726 if (k.k->type != KEY_TYPE_snapshot)
729 memset(&s, 0, sizeof(s));
730 memcpy(&s, k.v, bkey_val_bytes(k.k));
732 id = le32_to_cpu(s.parent);
734 ret = bch2_snapshot_lookup(trans, id, &v);
735 if (bch2_err_matches(ret, ENOENT))
736 bch_err(c, "snapshot with nonexistent parent:\n %s",
737 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
741 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
742 le32_to_cpu(v.children[1]) != k.k->p.offset) {
743 bch_err(c, "snapshot parent %u missing pointer to child %llu",
750 for (i = 0; i < 2 && s.children[i]; i++) {
751 id = le32_to_cpu(s.children[i]);
753 ret = bch2_snapshot_lookup(trans, id, &v);
754 if (bch2_err_matches(ret, ENOENT))
755 bch_err(c, "snapshot node %llu has nonexistent child %u",
760 if (le32_to_cpu(v.parent) != k.k->p.offset) {
761 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
762 id, le32_to_cpu(v.parent), k.k->p.offset);
768 should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
769 !BCH_SNAPSHOT_DELETED(&s);
771 if (should_have_subvol) {
772 id = le32_to_cpu(s.subvol);
773 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
774 if (bch2_err_matches(ret, ENOENT))
775 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
776 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
780 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
781 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
787 if (fsck_err_on(s.subvol,
788 c, snapshot_should_not_have_subvol,
789 "snapshot should not point to subvol:\n %s",
790 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
791 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
792 ret = PTR_ERR_OR_ZERO(u);
801 ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
805 if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
806 "snapshot points to missing/incorrect tree:\n %s",
807 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
808 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
814 real_depth = bch2_snapshot_depth(c, parent_id);
816 if (le32_to_cpu(s.depth) != real_depth &&
817 (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
818 fsck_err(c, snapshot_bad_depth,
819 "snapshot with incorrect depth field, should be %u:\n %s",
820 real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
821 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
822 ret = PTR_ERR_OR_ZERO(u);
826 u->v.depth = cpu_to_le32(real_depth);
830 ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
835 (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
836 fsck_err(c, snapshot_bad_skiplist,
837 "snapshot with bad skiplist field:\n %s",
838 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
839 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
840 ret = PTR_ERR_OR_ZERO(u);
844 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
845 u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
847 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
857 int bch2_check_snapshots(struct bch_fs *c)
859 struct btree_iter iter;
864 * We iterate backwards as checking/fixing the depth field requires that
865 * the parent's depth already be correct:
867 ret = bch2_trans_run(c,
868 for_each_btree_key_reverse_commit(trans, iter,
869 BTREE_ID_snapshots, POS_MAX,
870 BTREE_ITER_PREFETCH, k,
871 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
872 check_snapshot(trans, &iter, k)));
879 * Mark a snapshot as deleted, for future cleanup:
881 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
883 struct btree_iter iter;
884 struct bkey_i_snapshot *s;
887 s = bch2_bkey_get_mut_typed(trans, &iter,
888 BTREE_ID_snapshots, POS(0, id),
890 ret = PTR_ERR_OR_ZERO(s);
892 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
893 trans->c, "missing snapshot %u", id);
897 /* already deleted? */
898 if (BCH_SNAPSHOT_DELETED(&s->v))
901 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
902 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
905 bch2_trans_iter_exit(trans, &iter);
909 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
911 if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
912 swap(s->children[0], s->children[1]);
915 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
917 struct bch_fs *c = trans->c;
918 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
919 struct btree_iter c_iter = (struct btree_iter) { NULL };
920 struct btree_iter tree_iter = (struct btree_iter) { NULL };
921 struct bkey_s_c_snapshot s;
922 u32 parent_id, child_id;
926 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
927 BTREE_ITER_INTENT, snapshot);
929 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
930 "missing snapshot %u", id);
935 BUG_ON(s.v->children[1]);
937 parent_id = le32_to_cpu(s.v->parent);
938 child_id = le32_to_cpu(s.v->children[0]);
941 struct bkey_i_snapshot *parent;
943 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
944 BTREE_ID_snapshots, POS(0, parent_id),
946 ret = PTR_ERR_OR_ZERO(parent);
947 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
948 "missing snapshot %u", parent_id);
952 /* find entry in parent->children for node being deleted */
953 for (i = 0; i < 2; i++)
954 if (le32_to_cpu(parent->v.children[i]) == id)
957 if (bch2_fs_inconsistent_on(i == 2, c,
958 "snapshot %u missing child pointer to %u",
962 parent->v.children[i] = le32_to_cpu(child_id);
964 normalize_snapshot_child_pointers(&parent->v);
968 struct bkey_i_snapshot *child;
970 child = bch2_bkey_get_mut_typed(trans, &c_iter,
971 BTREE_ID_snapshots, POS(0, child_id),
973 ret = PTR_ERR_OR_ZERO(child);
974 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
975 "missing snapshot %u", child_id);
979 child->v.parent = cpu_to_le32(parent_id);
981 if (!child->v.parent) {
982 child->v.skip[0] = 0;
983 child->v.skip[1] = 0;
984 child->v.skip[2] = 0;
990 * We're deleting the root of a snapshot tree: update the
991 * snapshot_tree entry to point to the new root, or delete it if
992 * this is the last snapshot ID in this tree:
994 struct bkey_i_snapshot_tree *s_t;
996 BUG_ON(s.v->children[1]);
998 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
999 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
1001 ret = PTR_ERR_OR_ZERO(s_t);
1005 if (s.v->children[0]) {
1006 s_t->v.root_snapshot = s.v->children[0];
1008 s_t->k.type = KEY_TYPE_deleted;
1009 set_bkey_val_u64s(&s_t->k, 0);
1013 ret = bch2_btree_delete_at(trans, &iter, 0);
1015 bch2_trans_iter_exit(trans, &tree_iter);
1016 bch2_trans_iter_exit(trans, &p_iter);
1017 bch2_trans_iter_exit(trans, &c_iter);
1018 bch2_trans_iter_exit(trans, &iter);
1022 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1024 u32 *snapshot_subvols,
1025 unsigned nr_snapids)
1027 struct bch_fs *c = trans->c;
1028 struct btree_iter iter;
1029 struct bkey_i_snapshot *n;
1032 u32 depth = bch2_snapshot_depth(c, parent);
1035 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1036 POS_MIN, BTREE_ITER_INTENT);
1037 k = bch2_btree_iter_peek(&iter);
1042 for (i = 0; i < nr_snapids; i++) {
1043 k = bch2_btree_iter_prev_slot(&iter);
1048 if (!k.k || !k.k->p.offset) {
1049 ret = -BCH_ERR_ENOSPC_snapshot_create;
1053 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1054 ret = PTR_ERR_OR_ZERO(n);
1059 n->v.parent = cpu_to_le32(parent);
1060 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
1061 n->v.tree = cpu_to_le32(tree);
1062 n->v.depth = cpu_to_le32(depth);
1064 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1065 n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1067 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1068 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1070 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1071 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1075 new_snapids[i] = iter.pos.offset;
1077 mutex_lock(&c->snapshot_table_lock);
1078 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1079 mutex_unlock(&c->snapshot_table_lock);
1082 bch2_trans_iter_exit(trans, &iter);
1087 * Create new snapshot IDs as children of an existing snapshot ID:
1089 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1091 u32 *snapshot_subvols,
1092 unsigned nr_snapids)
1094 struct btree_iter iter;
1095 struct bkey_i_snapshot *n_parent;
1098 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1099 BTREE_ID_snapshots, POS(0, parent),
1101 ret = PTR_ERR_OR_ZERO(n_parent);
1102 if (unlikely(ret)) {
1103 if (bch2_err_matches(ret, ENOENT))
1104 bch_err(trans->c, "snapshot %u not found", parent);
1108 if (n_parent->v.children[0] || n_parent->v.children[1]) {
1109 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1114 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1115 new_snapids, snapshot_subvols, nr_snapids);
1119 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1120 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1121 n_parent->v.subvol = 0;
1122 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1124 bch2_trans_iter_exit(trans, &iter);
1129 * Create a snapshot node that is the root of a new tree:
1131 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1133 u32 *snapshot_subvols,
1134 unsigned nr_snapids)
1136 struct bkey_i_snapshot_tree *n_tree;
1139 n_tree = __bch2_snapshot_tree_create(trans);
1140 ret = PTR_ERR_OR_ZERO(n_tree) ?:
1141 create_snapids(trans, 0, n_tree->k.p.offset,
1142 new_snapids, snapshot_subvols, nr_snapids);
1146 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1147 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1151 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1153 u32 *snapshot_subvols,
1154 unsigned nr_snapids)
1156 BUG_ON((parent == 0) != (nr_snapids == 1));
1157 BUG_ON((parent != 0) != (nr_snapids == 2));
1160 ? bch2_snapshot_node_create_children(trans, parent,
1161 new_snapids, snapshot_subvols, nr_snapids)
1162 : bch2_snapshot_node_create_tree(trans,
1163 new_snapids, snapshot_subvols, nr_snapids);
1168 * If we have an unlinked inode in an internal snapshot node, and the inode
1169 * really has been deleted in all child snapshots, how does this get cleaned up?
1171 * first there is the problem of how keys that have been overwritten in all
1172 * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1175 * also: unlinked inode in internal snapshot appears to not be getting deleted
1176 * correctly if inode doesn't exist in leaf snapshots
1180 * for a key in an interior snapshot node that needs work to be done that
1181 * requires it to be mutated: iterate over all descendent leaf nodes and copy
1182 * that key to snapshot leaf nodes, where we can mutate it
1185 static int snapshot_delete_key(struct btree_trans *trans,
1186 struct btree_iter *iter,
1188 snapshot_id_list *deleted,
1189 snapshot_id_list *equiv_seen,
1190 struct bpos *last_pos)
1192 struct bch_fs *c = trans->c;
1193 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1195 if (!bkey_eq(k.k->p, *last_pos))
1199 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1200 snapshot_list_has_id(equiv_seen, equiv)) {
1201 return bch2_btree_delete_at(trans, iter,
1202 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1204 return snapshot_list_add(c, equiv_seen, equiv);
1208 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1209 struct btree_iter *iter,
1212 struct bch_fs *c = trans->c;
1213 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1216 * When we have a linear chain of snapshot nodes, we consider
1217 * those to form an equivalence class: we're going to collapse
1218 * them all down to a single node, and keep the leaf-most node -
1219 * which has the same id as the equivalence class id.
1221 * If there are multiple keys in different snapshots at the same
1222 * position, we're only going to keep the one in the newest
1223 * snapshot - the rest have been overwritten and are redundant,
1224 * and for the key we're going to keep we need to move it to the
1225 * equivalance class ID if it's not there already.
1227 if (equiv != k.k->p.snapshot) {
1228 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1229 struct btree_iter new_iter;
1232 ret = PTR_ERR_OR_ZERO(new);
1236 new->k.p.snapshot = equiv;
1238 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1239 BTREE_ITER_ALL_SNAPSHOTS|
1243 ret = bch2_btree_iter_traverse(&new_iter) ?:
1244 bch2_trans_update(trans, &new_iter, new,
1245 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1246 bch2_btree_delete_at(trans, iter,
1247 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1248 bch2_trans_iter_exit(trans, &new_iter);
1256 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1258 struct bkey_s_c_snapshot snap;
1262 if (k.k->type != KEY_TYPE_snapshot)
1265 snap = bkey_s_c_to_snapshot(k);
1266 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1267 BCH_SNAPSHOT_SUBVOL(snap.v))
1270 children[0] = le32_to_cpu(snap.v->children[0]);
1271 children[1] = le32_to_cpu(snap.v->children[1]);
1273 ret = bch2_snapshot_live(trans, children[0]) ?:
1274 bch2_snapshot_live(trans, children[1]);
1281 * For a given snapshot, if it doesn't have a subvolume that points to it, and
1282 * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1285 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1287 int ret = bch2_snapshot_needs_delete(trans, k);
1291 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1294 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1295 snapshot_id_list *skip)
1298 while (snapshot_list_has_id(skip, id))
1299 id = __bch2_snapshot_parent(c, id);
1303 id = __bch2_snapshot_parent(c, id);
1304 } while (snapshot_list_has_id(skip, id));
1311 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1312 struct btree_iter *iter, struct bkey_s_c k,
1313 snapshot_id_list *deleted)
1315 struct bch_fs *c = trans->c;
1316 u32 nr_deleted_ancestors = 0;
1317 struct bkey_i_snapshot *s;
1321 if (k.k->type != KEY_TYPE_snapshot)
1324 if (snapshot_list_has_id(deleted, k.k->p.offset))
1327 s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1328 ret = PTR_ERR_OR_ZERO(s);
1332 darray_for_each(*deleted, i)
1333 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1335 if (!nr_deleted_ancestors)
1338 le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1345 u32 depth = le32_to_cpu(s->v.depth);
1346 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1348 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1349 u32 id = le32_to_cpu(s->v.skip[j]);
1351 if (snapshot_list_has_id(deleted, id)) {
1352 id = bch2_snapshot_nth_parent_skip(c,
1355 ? get_random_u32_below(depth - 1)
1358 s->v.skip[j] = cpu_to_le32(id);
1362 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1365 return bch2_trans_update(trans, iter, &s->k_i, 0);
1368 int bch2_delete_dead_snapshots(struct bch_fs *c)
1370 struct btree_trans *trans;
1371 struct btree_iter iter;
1373 struct bkey_s_c_snapshot snap;
1374 snapshot_id_list deleted = { 0 };
1375 snapshot_id_list deleted_interior = { 0 };
1379 if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
1382 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1383 ret = bch2_fs_read_write_early(c);
1385 bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1390 trans = bch2_trans_get(c);
1393 * For every snapshot node: If we have no live children and it's not
1394 * pointed to by a subvolume, delete it:
1396 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1399 bch2_delete_redundant_snapshot(trans, k));
1401 bch_err_msg(c, ret, "deleting redundant snapshots");
1405 ret = for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1407 bch2_snapshot_set_equiv(trans, k));
1409 bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1413 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1414 POS_MIN, 0, k, ret) {
1415 if (k.k->type != KEY_TYPE_snapshot)
1418 snap = bkey_s_c_to_snapshot(k);
1419 if (BCH_SNAPSHOT_DELETED(snap.v)) {
1420 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1425 bch2_trans_iter_exit(trans, &iter);
1428 bch_err_msg(c, ret, "walking snapshots");
1432 for (id = 0; id < BTREE_ID_NR; id++) {
1433 struct bpos last_pos = POS_MIN;
1434 snapshot_id_list equiv_seen = { 0 };
1435 struct disk_reservation res = { 0 };
1437 if (!btree_type_has_snapshots(id))
1441 * deleted inodes btree is maintained by a trigger on the inodes
1442 * btree - no work for us to do here, and it's not safe to scan
1443 * it because we'll see out of date keys due to the btree write
1446 if (id == BTREE_ID_deleted_inodes)
1449 ret = for_each_btree_key_commit(trans, iter,
1451 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1452 &res, NULL, BTREE_INSERT_NOFAIL,
1453 snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1454 for_each_btree_key_commit(trans, iter,
1456 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1457 &res, NULL, BTREE_INSERT_NOFAIL,
1458 move_key_to_correct_snapshot(trans, &iter, k));
1460 bch2_disk_reservation_put(c, &res);
1461 darray_exit(&equiv_seen);
1464 bch_err_msg(c, ret, "deleting keys from dying snapshots");
1469 bch2_trans_unlock(trans);
1470 down_write(&c->snapshot_create_lock);
1472 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1473 POS_MIN, 0, k, ret) {
1474 u32 snapshot = k.k->p.offset;
1475 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1477 if (equiv != snapshot)
1478 snapshot_list_add(c, &deleted_interior, snapshot);
1480 bch2_trans_iter_exit(trans, &iter);
1483 goto err_create_lock;
1486 * Fixing children of deleted snapshots can't be done completely
1487 * atomically, if we crash between here and when we delete the interior
1488 * nodes some depth fields will be off:
1490 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1491 BTREE_ITER_INTENT, k,
1492 NULL, NULL, BTREE_INSERT_NOFAIL,
1493 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1495 goto err_create_lock;
1497 darray_for_each(deleted, i) {
1498 ret = commit_do(trans, NULL, NULL, 0,
1499 bch2_snapshot_node_delete(trans, *i));
1501 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1502 goto err_create_lock;
1506 darray_for_each(deleted_interior, i) {
1507 ret = commit_do(trans, NULL, NULL, 0,
1508 bch2_snapshot_node_delete(trans, *i));
1510 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1511 goto err_create_lock;
1515 up_write(&c->snapshot_create_lock);
1517 darray_exit(&deleted_interior);
1518 darray_exit(&deleted);
1519 bch2_trans_put(trans);
1525 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1527 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1529 bch2_delete_dead_snapshots(c);
1530 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1533 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1535 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1536 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1537 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1540 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1544 struct bch_fs *c = trans->c;
1545 struct btree_iter iter;
1549 bch2_trans_iter_init(trans, &iter, id, pos,
1550 BTREE_ITER_NOT_EXTENTS|
1551 BTREE_ITER_ALL_SNAPSHOTS);
1553 k = bch2_btree_iter_prev(&iter);
1561 if (!bkey_eq(pos, k.k->p))
1564 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1569 bch2_trans_iter_exit(trans, &iter);
1574 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1576 const struct snapshot_t *s = snapshot_t(c, id);
1578 return s->children[1] ?: s->children[0];
1581 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1585 while ((child = bch2_snapshot_smallest_child(c, id)))
1590 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1591 enum btree_id btree,
1592 struct bkey_s_c interior_k,
1593 u32 leaf_id, struct bpos *new_min_pos)
1595 struct btree_iter iter;
1596 struct bpos pos = interior_k.k->p;
1601 pos.snapshot = leaf_id;
1603 bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1604 k = bch2_btree_iter_peek_slot(&iter);
1609 /* key already overwritten in this snapshot? */
1610 if (k.k->p.snapshot != interior_k.k->p.snapshot)
1613 if (bpos_eq(*new_min_pos, POS_MIN)) {
1614 *new_min_pos = k.k->p;
1615 new_min_pos->snapshot = leaf_id;
1618 new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1619 ret = PTR_ERR_OR_ZERO(new);
1623 new->k.p.snapshot = leaf_id;
1624 ret = bch2_trans_update(trans, &iter, new, 0);
1626 bch2_trans_iter_exit(trans, &iter);
1630 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1631 enum btree_id btree,
1633 struct bpos *new_min_pos)
1635 struct bch_fs *c = trans->c;
1637 u32 restart_count = trans->restart_count;
1640 bch2_bkey_buf_init(&sk);
1641 bch2_bkey_buf_reassemble(&sk, c, k);
1642 k = bkey_i_to_s_c(sk.k);
1644 *new_min_pos = POS_MIN;
1646 for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1647 id < k.k->p.snapshot;
1649 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1650 !bch2_snapshot_is_leaf(c, id))
1653 ret = btree_trans_too_many_iters(trans) ?:
1654 bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1655 bch2_trans_commit(trans, NULL, NULL, 0);
1656 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1657 bch2_trans_begin(trans);
1665 bch2_bkey_buf_exit(&sk, c);
1667 return ret ?: trans_was_restarted(trans, restart_count);
1670 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1672 struct bch_fs *c = trans->c;
1673 struct bkey_s_c_snapshot snap;
1676 if (k.k->type != KEY_TYPE_snapshot)
1679 snap = bkey_s_c_to_snapshot(k);
1680 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1681 bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1682 (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1683 set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
1690 int bch2_snapshots_read(struct bch_fs *c)
1692 struct btree_iter iter;
1696 ret = bch2_trans_run(c,
1697 for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1699 bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1700 bch2_snapshot_set_equiv(trans, k) ?:
1701 bch2_check_snapshot_needs_deletion(trans, k)) ?:
1702 for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1704 (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1710 void bch2_fs_snapshots_exit(struct bch_fs *c)
1712 kfree(rcu_dereference_protected(c->snapshots, true));