1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
15 #include "transaction.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
29 #include "inode-item.h"
30 #include "space-info.h"
32 #include "accessors.h"
33 #include "extent-tree.h"
34 #include "root-tree.h"
35 #include "file-item.h"
36 #include "relocation.h"
42 * [What does relocation do]
44 * The objective of relocation is to relocate all extents of the target block
45 * group to other block groups.
46 * This is utilized by resize (shrink only), profile converting, compacting
47 * space, or balance routine to spread chunks over devices.
50 * ------------------------------------------------------------------
51 * BG A: 10 data extents | BG A: deleted
52 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
53 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
55 * [How does relocation work]
57 * 1. Mark the target block group read-only
58 * New extents won't be allocated from the target block group.
60 * 2.1 Record each extent in the target block group
61 * To build a proper map of extents to be relocated.
63 * 2.2 Build data reloc tree and reloc trees
64 * Data reloc tree will contain an inode, recording all newly relocated
66 * There will be only one data reloc tree for one data block group.
68 * Reloc tree will be a special snapshot of its source tree, containing
69 * relocated tree blocks.
70 * Each tree referring to a tree block in target block group will get its
73 * 2.3 Swap source tree with its corresponding reloc tree
74 * Each involved tree only refers to new extents after swap.
76 * 3. Cleanup reloc trees and data reloc tree.
77 * As old extents in the target block group are still referenced by reloc
78 * trees, we need to clean them up before really freeing the target block
81 * The main complexity is in steps 2.2 and 2.3.
83 * The entry point of relocation is relocate_block_group() function.
86 #define RELOCATION_RESERVED_NODES 256
88 * map address of tree root to tree
92 struct rb_node rb_node;
94 }; /* Use rb_simle_node for search/insert */
99 struct rb_root rb_root;
104 * present a tree block to process
108 struct rb_node rb_node;
110 }; /* Use rb_simple_node for search/insert */
112 struct btrfs_key key;
113 unsigned int level:8;
114 unsigned int key_ready:1;
117 #define MAX_EXTENTS 128
119 struct file_extent_cluster {
122 u64 boundary[MAX_EXTENTS];
126 struct reloc_control {
127 /* block group to relocate */
128 struct btrfs_block_group *block_group;
130 struct btrfs_root *extent_root;
131 /* inode for moving data */
132 struct inode *data_inode;
134 struct btrfs_block_rsv *block_rsv;
136 struct btrfs_backref_cache backref_cache;
138 struct file_extent_cluster cluster;
139 /* tree blocks have been processed */
140 struct extent_io_tree processed_blocks;
141 /* map start of tree root to corresponding reloc tree */
142 struct mapping_tree reloc_root_tree;
143 /* list of reloc trees */
144 struct list_head reloc_roots;
145 /* list of subvolume trees that get relocated */
146 struct list_head dirty_subvol_roots;
147 /* size of metadata reservation for merging reloc trees */
148 u64 merging_rsv_size;
149 /* size of relocated tree nodes */
151 /* reserved size for block group relocation*/
157 unsigned int stage:8;
158 unsigned int create_reloc_tree:1;
159 unsigned int merge_reloc_tree:1;
160 unsigned int found_file_extent:1;
163 /* stages of data relocation */
164 #define MOVE_DATA_EXTENTS 0
165 #define UPDATE_DATA_PTRS 1
167 static void mark_block_processed(struct reloc_control *rc,
168 struct btrfs_backref_node *node)
172 if (node->level == 0 ||
173 in_range(node->bytenr, rc->block_group->start,
174 rc->block_group->length)) {
175 blocksize = rc->extent_root->fs_info->nodesize;
176 set_extent_bits(&rc->processed_blocks, node->bytenr,
177 node->bytenr + blocksize - 1, EXTENT_DIRTY);
183 static void mapping_tree_init(struct mapping_tree *tree)
185 tree->rb_root = RB_ROOT;
186 spin_lock_init(&tree->lock);
190 * walk up backref nodes until reach node presents tree root
192 static struct btrfs_backref_node *walk_up_backref(
193 struct btrfs_backref_node *node,
194 struct btrfs_backref_edge *edges[], int *index)
196 struct btrfs_backref_edge *edge;
199 while (!list_empty(&node->upper)) {
200 edge = list_entry(node->upper.next,
201 struct btrfs_backref_edge, list[LOWER]);
203 node = edge->node[UPPER];
205 BUG_ON(node->detached);
211 * walk down backref nodes to find start of next reference path
213 static struct btrfs_backref_node *walk_down_backref(
214 struct btrfs_backref_edge *edges[], int *index)
216 struct btrfs_backref_edge *edge;
217 struct btrfs_backref_node *lower;
221 edge = edges[idx - 1];
222 lower = edge->node[LOWER];
223 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
227 edge = list_entry(edge->list[LOWER].next,
228 struct btrfs_backref_edge, list[LOWER]);
229 edges[idx - 1] = edge;
231 return edge->node[UPPER];
237 static void update_backref_node(struct btrfs_backref_cache *cache,
238 struct btrfs_backref_node *node, u64 bytenr)
240 struct rb_node *rb_node;
241 rb_erase(&node->rb_node, &cache->rb_root);
242 node->bytenr = bytenr;
243 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
245 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
249 * update backref cache after a transaction commit
251 static int update_backref_cache(struct btrfs_trans_handle *trans,
252 struct btrfs_backref_cache *cache)
254 struct btrfs_backref_node *node;
257 if (cache->last_trans == 0) {
258 cache->last_trans = trans->transid;
262 if (cache->last_trans == trans->transid)
266 * detached nodes are used to avoid unnecessary backref
267 * lookup. transaction commit changes the extent tree.
268 * so the detached nodes are no longer useful.
270 while (!list_empty(&cache->detached)) {
271 node = list_entry(cache->detached.next,
272 struct btrfs_backref_node, list);
273 btrfs_backref_cleanup_node(cache, node);
276 while (!list_empty(&cache->changed)) {
277 node = list_entry(cache->changed.next,
278 struct btrfs_backref_node, list);
279 list_del_init(&node->list);
280 BUG_ON(node->pending);
281 update_backref_node(cache, node, node->new_bytenr);
285 * some nodes can be left in the pending list if there were
286 * errors during processing the pending nodes.
288 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
289 list_for_each_entry(node, &cache->pending[level], list) {
290 BUG_ON(!node->pending);
291 if (node->bytenr == node->new_bytenr)
293 update_backref_node(cache, node, node->new_bytenr);
297 cache->last_trans = 0;
301 static bool reloc_root_is_dead(struct btrfs_root *root)
304 * Pair with set_bit/clear_bit in clean_dirty_subvols and
305 * btrfs_update_reloc_root. We need to see the updated bit before
306 * trying to access reloc_root
309 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
315 * Check if this subvolume tree has valid reloc tree.
317 * Reloc tree after swap is considered dead, thus not considered as valid.
318 * This is enough for most callers, as they don't distinguish dead reloc root
319 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
322 static bool have_reloc_root(struct btrfs_root *root)
324 if (reloc_root_is_dead(root))
326 if (!root->reloc_root)
331 int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
333 struct btrfs_root *reloc_root;
335 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
338 /* This root has been merged with its reloc tree, we can ignore it */
339 if (reloc_root_is_dead(root))
342 reloc_root = root->reloc_root;
346 if (btrfs_header_generation(reloc_root->commit_root) ==
347 root->fs_info->running_transaction->transid)
350 * if there is reloc tree and it was created in previous
351 * transaction backref lookup can find the reloc tree,
352 * so backref node for the fs tree root is useless for
359 * find reloc tree by address of tree root
361 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
363 struct reloc_control *rc = fs_info->reloc_ctl;
364 struct rb_node *rb_node;
365 struct mapping_node *node;
366 struct btrfs_root *root = NULL;
369 spin_lock(&rc->reloc_root_tree.lock);
370 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
372 node = rb_entry(rb_node, struct mapping_node, rb_node);
375 spin_unlock(&rc->reloc_root_tree.lock);
376 return btrfs_grab_root(root);
380 * For useless nodes, do two major clean ups:
382 * - Cleanup the children edges and nodes
383 * If child node is also orphan (no parent) during cleanup, then the child
384 * node will also be cleaned up.
386 * - Freeing up leaves (level 0), keeps nodes detached
387 * For nodes, the node is still cached as "detached"
389 * Return false if @node is not in the @useless_nodes list.
390 * Return true if @node is in the @useless_nodes list.
392 static bool handle_useless_nodes(struct reloc_control *rc,
393 struct btrfs_backref_node *node)
395 struct btrfs_backref_cache *cache = &rc->backref_cache;
396 struct list_head *useless_node = &cache->useless_node;
399 while (!list_empty(useless_node)) {
400 struct btrfs_backref_node *cur;
402 cur = list_first_entry(useless_node, struct btrfs_backref_node,
404 list_del_init(&cur->list);
406 /* Only tree root nodes can be added to @useless_nodes */
407 ASSERT(list_empty(&cur->upper));
412 /* The node is the lowest node */
414 list_del_init(&cur->lower);
418 /* Cleanup the lower edges */
419 while (!list_empty(&cur->lower)) {
420 struct btrfs_backref_edge *edge;
421 struct btrfs_backref_node *lower;
423 edge = list_entry(cur->lower.next,
424 struct btrfs_backref_edge, list[UPPER]);
425 list_del(&edge->list[UPPER]);
426 list_del(&edge->list[LOWER]);
427 lower = edge->node[LOWER];
428 btrfs_backref_free_edge(cache, edge);
430 /* Child node is also orphan, queue for cleanup */
431 if (list_empty(&lower->upper))
432 list_add(&lower->list, useless_node);
434 /* Mark this block processed for relocation */
435 mark_block_processed(rc, cur);
438 * Backref nodes for tree leaves are deleted from the cache.
439 * Backref nodes for upper level tree blocks are left in the
440 * cache to avoid unnecessary backref lookup.
442 if (cur->level > 0) {
443 list_add(&cur->list, &cache->detached);
446 rb_erase(&cur->rb_node, &cache->rb_root);
447 btrfs_backref_free_node(cache, cur);
454 * Build backref tree for a given tree block. Root of the backref tree
455 * corresponds the tree block, leaves of the backref tree correspond roots of
456 * b-trees that reference the tree block.
458 * The basic idea of this function is check backrefs of a given block to find
459 * upper level blocks that reference the block, and then check backrefs of
460 * these upper level blocks recursively. The recursion stops when tree root is
461 * reached or backrefs for the block is cached.
463 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
464 * all upper level blocks that directly/indirectly reference the block are also
467 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
468 struct reloc_control *rc, struct btrfs_key *node_key,
469 int level, u64 bytenr)
471 struct btrfs_backref_iter *iter;
472 struct btrfs_backref_cache *cache = &rc->backref_cache;
473 /* For searching parent of TREE_BLOCK_REF */
474 struct btrfs_path *path;
475 struct btrfs_backref_node *cur;
476 struct btrfs_backref_node *node = NULL;
477 struct btrfs_backref_edge *edge;
481 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
483 return ERR_PTR(-ENOMEM);
484 path = btrfs_alloc_path();
490 node = btrfs_backref_alloc_node(cache, bytenr, level);
499 /* Breadth-first search to build backref cache */
501 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
507 edge = list_first_entry_or_null(&cache->pending_edge,
508 struct btrfs_backref_edge, list[UPPER]);
510 * The pending list isn't empty, take the first block to
514 list_del_init(&edge->list[UPPER]);
515 cur = edge->node[UPPER];
519 /* Finish the upper linkage of newly added edges/nodes */
520 ret = btrfs_backref_finish_upper_links(cache, node);
526 if (handle_useless_nodes(rc, node))
529 btrfs_backref_iter_free(iter);
530 btrfs_free_path(path);
532 btrfs_backref_error_cleanup(cache, node);
535 ASSERT(!node || !node->detached);
536 ASSERT(list_empty(&cache->useless_node) &&
537 list_empty(&cache->pending_edge));
542 * helper to add backref node for the newly created snapshot.
543 * the backref node is created by cloning backref node that
544 * corresponds to root of source tree
546 static int clone_backref_node(struct btrfs_trans_handle *trans,
547 struct reloc_control *rc,
548 struct btrfs_root *src,
549 struct btrfs_root *dest)
551 struct btrfs_root *reloc_root = src->reloc_root;
552 struct btrfs_backref_cache *cache = &rc->backref_cache;
553 struct btrfs_backref_node *node = NULL;
554 struct btrfs_backref_node *new_node;
555 struct btrfs_backref_edge *edge;
556 struct btrfs_backref_edge *new_edge;
557 struct rb_node *rb_node;
559 if (cache->last_trans > 0)
560 update_backref_cache(trans, cache);
562 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
564 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
568 BUG_ON(node->new_bytenr != reloc_root->node->start);
572 rb_node = rb_simple_search(&cache->rb_root,
573 reloc_root->commit_root->start);
575 node = rb_entry(rb_node, struct btrfs_backref_node,
577 BUG_ON(node->detached);
584 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
589 new_node->lowest = node->lowest;
590 new_node->checked = 1;
591 new_node->root = btrfs_grab_root(dest);
592 ASSERT(new_node->root);
595 list_for_each_entry(edge, &node->lower, list[UPPER]) {
596 new_edge = btrfs_backref_alloc_edge(cache);
600 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
601 new_node, LINK_UPPER);
604 list_add_tail(&new_node->lower, &cache->leaves);
607 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
610 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
612 if (!new_node->lowest) {
613 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
614 list_add_tail(&new_edge->list[LOWER],
615 &new_edge->node[LOWER]->upper);
620 while (!list_empty(&new_node->lower)) {
621 new_edge = list_entry(new_node->lower.next,
622 struct btrfs_backref_edge, list[UPPER]);
623 list_del(&new_edge->list[UPPER]);
624 btrfs_backref_free_edge(cache, new_edge);
626 btrfs_backref_free_node(cache, new_node);
631 * helper to add 'address of tree root -> reloc tree' mapping
633 static int __must_check __add_reloc_root(struct btrfs_root *root)
635 struct btrfs_fs_info *fs_info = root->fs_info;
636 struct rb_node *rb_node;
637 struct mapping_node *node;
638 struct reloc_control *rc = fs_info->reloc_ctl;
640 node = kmalloc(sizeof(*node), GFP_NOFS);
644 node->bytenr = root->commit_root->start;
647 spin_lock(&rc->reloc_root_tree.lock);
648 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
649 node->bytenr, &node->rb_node);
650 spin_unlock(&rc->reloc_root_tree.lock);
653 "Duplicate root found for start=%llu while inserting into relocation tree",
658 list_add_tail(&root->root_list, &rc->reloc_roots);
663 * helper to delete the 'address of tree root -> reloc tree'
666 static void __del_reloc_root(struct btrfs_root *root)
668 struct btrfs_fs_info *fs_info = root->fs_info;
669 struct rb_node *rb_node;
670 struct mapping_node *node = NULL;
671 struct reloc_control *rc = fs_info->reloc_ctl;
672 bool put_ref = false;
674 if (rc && root->node) {
675 spin_lock(&rc->reloc_root_tree.lock);
676 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
677 root->commit_root->start);
679 node = rb_entry(rb_node, struct mapping_node, rb_node);
680 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
681 RB_CLEAR_NODE(&node->rb_node);
683 spin_unlock(&rc->reloc_root_tree.lock);
684 ASSERT(!node || (struct btrfs_root *)node->data == root);
688 * We only put the reloc root here if it's on the list. There's a lot
689 * of places where the pattern is to splice the rc->reloc_roots, process
690 * the reloc roots, and then add the reloc root back onto
691 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
692 * list we don't want the reference being dropped, because the guy
693 * messing with the list is in charge of the reference.
695 spin_lock(&fs_info->trans_lock);
696 if (!list_empty(&root->root_list)) {
698 list_del_init(&root->root_list);
700 spin_unlock(&fs_info->trans_lock);
702 btrfs_put_root(root);
707 * helper to update the 'address of tree root -> reloc tree'
710 static int __update_reloc_root(struct btrfs_root *root)
712 struct btrfs_fs_info *fs_info = root->fs_info;
713 struct rb_node *rb_node;
714 struct mapping_node *node = NULL;
715 struct reloc_control *rc = fs_info->reloc_ctl;
717 spin_lock(&rc->reloc_root_tree.lock);
718 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
719 root->commit_root->start);
721 node = rb_entry(rb_node, struct mapping_node, rb_node);
722 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
724 spin_unlock(&rc->reloc_root_tree.lock);
728 BUG_ON((struct btrfs_root *)node->data != root);
730 spin_lock(&rc->reloc_root_tree.lock);
731 node->bytenr = root->node->start;
732 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
733 node->bytenr, &node->rb_node);
734 spin_unlock(&rc->reloc_root_tree.lock);
736 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
740 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
741 struct btrfs_root *root, u64 objectid)
743 struct btrfs_fs_info *fs_info = root->fs_info;
744 struct btrfs_root *reloc_root;
745 struct extent_buffer *eb;
746 struct btrfs_root_item *root_item;
747 struct btrfs_key root_key;
749 bool must_abort = false;
751 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
753 return ERR_PTR(-ENOMEM);
755 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
756 root_key.type = BTRFS_ROOT_ITEM_KEY;
757 root_key.offset = objectid;
759 if (root->root_key.objectid == objectid) {
762 /* called by btrfs_init_reloc_root */
763 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
764 BTRFS_TREE_RELOC_OBJECTID);
769 * Set the last_snapshot field to the generation of the commit
770 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
771 * correctly (returns true) when the relocation root is created
772 * either inside the critical section of a transaction commit
773 * (through transaction.c:qgroup_account_snapshot()) and when
774 * it's created before the transaction commit is started.
776 commit_root_gen = btrfs_header_generation(root->commit_root);
777 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
780 * called by btrfs_reloc_post_snapshot_hook.
781 * the source tree is a reloc tree, all tree blocks
782 * modified after it was created have RELOC flag
783 * set in their headers. so it's OK to not update
784 * the 'last_snapshot'.
786 ret = btrfs_copy_root(trans, root, root->node, &eb,
787 BTRFS_TREE_RELOC_OBJECTID);
793 * We have changed references at this point, we must abort the
794 * transaction if anything fails.
798 memcpy(root_item, &root->root_item, sizeof(*root_item));
799 btrfs_set_root_bytenr(root_item, eb->start);
800 btrfs_set_root_level(root_item, btrfs_header_level(eb));
801 btrfs_set_root_generation(root_item, trans->transid);
803 if (root->root_key.objectid == objectid) {
804 btrfs_set_root_refs(root_item, 0);
805 memset(&root_item->drop_progress, 0,
806 sizeof(struct btrfs_disk_key));
807 btrfs_set_root_drop_level(root_item, 0);
810 btrfs_tree_unlock(eb);
811 free_extent_buffer(eb);
813 ret = btrfs_insert_root(trans, fs_info->tree_root,
814 &root_key, root_item);
820 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
821 if (IS_ERR(reloc_root)) {
822 ret = PTR_ERR(reloc_root);
825 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
826 reloc_root->last_trans = trans->transid;
832 btrfs_abort_transaction(trans, ret);
837 * create reloc tree for a given fs tree. reloc tree is just a
838 * snapshot of the fs tree with special root objectid.
840 * The reloc_root comes out of here with two references, one for
841 * root->reloc_root, and another for being on the rc->reloc_roots list.
843 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
844 struct btrfs_root *root)
846 struct btrfs_fs_info *fs_info = root->fs_info;
847 struct btrfs_root *reloc_root;
848 struct reloc_control *rc = fs_info->reloc_ctl;
849 struct btrfs_block_rsv *rsv;
857 * The subvolume has reloc tree but the swap is finished, no need to
858 * create/update the dead reloc tree
860 if (reloc_root_is_dead(root))
864 * This is subtle but important. We do not do
865 * record_root_in_transaction for reloc roots, instead we record their
866 * corresponding fs root, and then here we update the last trans for the
867 * reloc root. This means that we have to do this for the entire life
868 * of the reloc root, regardless of which stage of the relocation we are
871 if (root->reloc_root) {
872 reloc_root = root->reloc_root;
873 reloc_root->last_trans = trans->transid;
878 * We are merging reloc roots, we do not need new reloc trees. Also
879 * reloc trees never need their own reloc tree.
881 if (!rc->create_reloc_tree ||
882 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
885 if (!trans->reloc_reserved) {
886 rsv = trans->block_rsv;
887 trans->block_rsv = rc->block_rsv;
890 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
892 trans->block_rsv = rsv;
893 if (IS_ERR(reloc_root))
894 return PTR_ERR(reloc_root);
896 ret = __add_reloc_root(reloc_root);
897 ASSERT(ret != -EEXIST);
899 /* Pairs with create_reloc_root */
900 btrfs_put_root(reloc_root);
903 root->reloc_root = btrfs_grab_root(reloc_root);
908 * update root item of reloc tree
910 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
911 struct btrfs_root *root)
913 struct btrfs_fs_info *fs_info = root->fs_info;
914 struct btrfs_root *reloc_root;
915 struct btrfs_root_item *root_item;
918 if (!have_reloc_root(root))
921 reloc_root = root->reloc_root;
922 root_item = &reloc_root->root_item;
925 * We are probably ok here, but __del_reloc_root() will drop its ref of
926 * the root. We have the ref for root->reloc_root, but just in case
927 * hold it while we update the reloc root.
929 btrfs_grab_root(reloc_root);
931 /* root->reloc_root will stay until current relocation finished */
932 if (fs_info->reloc_ctl->merge_reloc_tree &&
933 btrfs_root_refs(root_item) == 0) {
934 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
936 * Mark the tree as dead before we change reloc_root so
937 * have_reloc_root will not touch it from now on.
940 __del_reloc_root(reloc_root);
943 if (reloc_root->commit_root != reloc_root->node) {
944 __update_reloc_root(reloc_root);
945 btrfs_set_root_node(root_item, reloc_root->node);
946 free_extent_buffer(reloc_root->commit_root);
947 reloc_root->commit_root = btrfs_root_node(reloc_root);
950 ret = btrfs_update_root(trans, fs_info->tree_root,
951 &reloc_root->root_key, root_item);
952 btrfs_put_root(reloc_root);
957 * helper to find first cached inode with inode number >= objectid
960 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
962 struct rb_node *node;
963 struct rb_node *prev;
964 struct btrfs_inode *entry;
967 spin_lock(&root->inode_lock);
969 node = root->inode_tree.rb_node;
973 entry = rb_entry(node, struct btrfs_inode, rb_node);
975 if (objectid < btrfs_ino(entry))
976 node = node->rb_left;
977 else if (objectid > btrfs_ino(entry))
978 node = node->rb_right;
984 entry = rb_entry(prev, struct btrfs_inode, rb_node);
985 if (objectid <= btrfs_ino(entry)) {
989 prev = rb_next(prev);
993 entry = rb_entry(node, struct btrfs_inode, rb_node);
994 inode = igrab(&entry->vfs_inode);
996 spin_unlock(&root->inode_lock);
1000 objectid = btrfs_ino(entry) + 1;
1001 if (cond_resched_lock(&root->inode_lock))
1004 node = rb_next(node);
1006 spin_unlock(&root->inode_lock);
1011 * get new location of data
1013 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1014 u64 bytenr, u64 num_bytes)
1016 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1017 struct btrfs_path *path;
1018 struct btrfs_file_extent_item *fi;
1019 struct extent_buffer *leaf;
1022 path = btrfs_alloc_path();
1026 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1027 ret = btrfs_lookup_file_extent(NULL, root, path,
1028 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1036 leaf = path->nodes[0];
1037 fi = btrfs_item_ptr(leaf, path->slots[0],
1038 struct btrfs_file_extent_item);
1040 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1041 btrfs_file_extent_compression(leaf, fi) ||
1042 btrfs_file_extent_encryption(leaf, fi) ||
1043 btrfs_file_extent_other_encoding(leaf, fi));
1045 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1050 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1053 btrfs_free_path(path);
1058 * update file extent items in the tree leaf to point to
1059 * the new locations.
1061 static noinline_for_stack
1062 int replace_file_extents(struct btrfs_trans_handle *trans,
1063 struct reloc_control *rc,
1064 struct btrfs_root *root,
1065 struct extent_buffer *leaf)
1067 struct btrfs_fs_info *fs_info = root->fs_info;
1068 struct btrfs_key key;
1069 struct btrfs_file_extent_item *fi;
1070 struct inode *inode = NULL;
1082 if (rc->stage != UPDATE_DATA_PTRS)
1085 /* reloc trees always use full backref */
1086 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1087 parent = leaf->start;
1091 nritems = btrfs_header_nritems(leaf);
1092 for (i = 0; i < nritems; i++) {
1093 struct btrfs_ref ref = { 0 };
1096 btrfs_item_key_to_cpu(leaf, &key, i);
1097 if (key.type != BTRFS_EXTENT_DATA_KEY)
1099 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1100 if (btrfs_file_extent_type(leaf, fi) ==
1101 BTRFS_FILE_EXTENT_INLINE)
1103 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1104 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1107 if (!in_range(bytenr, rc->block_group->start,
1108 rc->block_group->length))
1112 * if we are modifying block in fs tree, wait for read_folio
1113 * to complete and drop the extent cache
1115 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1117 inode = find_next_inode(root, key.objectid);
1119 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1120 btrfs_add_delayed_iput(inode);
1121 inode = find_next_inode(root, key.objectid);
1123 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1124 struct extent_state *cached_state = NULL;
1127 btrfs_file_extent_num_bytes(leaf, fi);
1128 WARN_ON(!IS_ALIGNED(key.offset,
1129 fs_info->sectorsize));
1130 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1132 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1138 btrfs_drop_extent_map_range(BTRFS_I(inode),
1139 key.offset, end, true);
1140 unlock_extent(&BTRFS_I(inode)->io_tree,
1141 key.offset, end, &cached_state);
1145 ret = get_new_location(rc->data_inode, &new_bytenr,
1149 * Don't have to abort since we've not changed anything
1150 * in the file extent yet.
1155 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1158 key.offset -= btrfs_file_extent_offset(leaf, fi);
1159 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1161 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1162 key.objectid, key.offset,
1163 root->root_key.objectid, false);
1164 ret = btrfs_inc_extent_ref(trans, &ref);
1166 btrfs_abort_transaction(trans, ret);
1170 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1172 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1173 key.objectid, key.offset,
1174 root->root_key.objectid, false);
1175 ret = btrfs_free_extent(trans, &ref);
1177 btrfs_abort_transaction(trans, ret);
1182 btrfs_mark_buffer_dirty(leaf);
1184 btrfs_add_delayed_iput(inode);
1188 static noinline_for_stack
1189 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1190 struct btrfs_path *path, int level)
1192 struct btrfs_disk_key key1;
1193 struct btrfs_disk_key key2;
1194 btrfs_node_key(eb, &key1, slot);
1195 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1196 return memcmp(&key1, &key2, sizeof(key1));
1200 * try to replace tree blocks in fs tree with the new blocks
1201 * in reloc tree. tree blocks haven't been modified since the
1202 * reloc tree was create can be replaced.
1204 * if a block was replaced, level of the block + 1 is returned.
1205 * if no block got replaced, 0 is returned. if there are other
1206 * errors, a negative error number is returned.
1208 static noinline_for_stack
1209 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1210 struct btrfs_root *dest, struct btrfs_root *src,
1211 struct btrfs_path *path, struct btrfs_key *next_key,
1212 int lowest_level, int max_level)
1214 struct btrfs_fs_info *fs_info = dest->fs_info;
1215 struct extent_buffer *eb;
1216 struct extent_buffer *parent;
1217 struct btrfs_ref ref = { 0 };
1218 struct btrfs_key key;
1230 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1231 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1233 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1235 slot = path->slots[lowest_level];
1236 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1238 eb = btrfs_lock_root_node(dest);
1239 level = btrfs_header_level(eb);
1241 if (level < lowest_level) {
1242 btrfs_tree_unlock(eb);
1243 free_extent_buffer(eb);
1248 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1251 btrfs_tree_unlock(eb);
1252 free_extent_buffer(eb);
1258 next_key->objectid = (u64)-1;
1259 next_key->type = (u8)-1;
1260 next_key->offset = (u64)-1;
1265 level = btrfs_header_level(parent);
1266 ASSERT(level >= lowest_level);
1268 ret = btrfs_bin_search(parent, &key, &slot);
1271 if (ret && slot > 0)
1274 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1275 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1277 old_bytenr = btrfs_node_blockptr(parent, slot);
1278 blocksize = fs_info->nodesize;
1279 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1281 if (level <= max_level) {
1282 eb = path->nodes[level];
1283 new_bytenr = btrfs_node_blockptr(eb,
1284 path->slots[level]);
1285 new_ptr_gen = btrfs_node_ptr_generation(eb,
1286 path->slots[level]);
1292 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1297 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1298 memcmp_node_keys(parent, slot, path, level)) {
1299 if (level <= lowest_level) {
1304 eb = btrfs_read_node_slot(parent, slot);
1309 btrfs_tree_lock(eb);
1311 ret = btrfs_cow_block(trans, dest, eb, parent,
1315 btrfs_tree_unlock(eb);
1316 free_extent_buffer(eb);
1321 btrfs_tree_unlock(parent);
1322 free_extent_buffer(parent);
1329 btrfs_tree_unlock(parent);
1330 free_extent_buffer(parent);
1335 btrfs_node_key_to_cpu(path->nodes[level], &key,
1336 path->slots[level]);
1337 btrfs_release_path(path);
1339 path->lowest_level = level;
1340 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1341 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1342 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1343 path->lowest_level = 0;
1351 * Info qgroup to trace both subtrees.
1353 * We must trace both trees.
1354 * 1) Tree reloc subtree
1355 * If not traced, we will leak data numbers
1357 * If not traced, we will double count old data
1359 * We don't scan the subtree right now, but only record
1360 * the swapped tree blocks.
1361 * The real subtree rescan is delayed until we have new
1362 * CoW on the subtree root node before transaction commit.
1364 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1365 rc->block_group, parent, slot,
1366 path->nodes[level], path->slots[level],
1371 * swap blocks in fs tree and reloc tree.
1373 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1374 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1375 btrfs_mark_buffer_dirty(parent);
1377 btrfs_set_node_blockptr(path->nodes[level],
1378 path->slots[level], old_bytenr);
1379 btrfs_set_node_ptr_generation(path->nodes[level],
1380 path->slots[level], old_ptr_gen);
1381 btrfs_mark_buffer_dirty(path->nodes[level]);
1383 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1384 blocksize, path->nodes[level]->start);
1385 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1387 ret = btrfs_inc_extent_ref(trans, &ref);
1389 btrfs_abort_transaction(trans, ret);
1392 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1394 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1396 ret = btrfs_inc_extent_ref(trans, &ref);
1398 btrfs_abort_transaction(trans, ret);
1402 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1403 blocksize, path->nodes[level]->start);
1404 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1406 ret = btrfs_free_extent(trans, &ref);
1408 btrfs_abort_transaction(trans, ret);
1412 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1414 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1416 ret = btrfs_free_extent(trans, &ref);
1418 btrfs_abort_transaction(trans, ret);
1422 btrfs_unlock_up_safe(path, 0);
1427 btrfs_tree_unlock(parent);
1428 free_extent_buffer(parent);
1433 * helper to find next relocated block in reloc tree
1435 static noinline_for_stack
1436 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1439 struct extent_buffer *eb;
1444 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1446 for (i = 0; i < *level; i++) {
1447 free_extent_buffer(path->nodes[i]);
1448 path->nodes[i] = NULL;
1451 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1452 eb = path->nodes[i];
1453 nritems = btrfs_header_nritems(eb);
1454 while (path->slots[i] + 1 < nritems) {
1456 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1463 free_extent_buffer(path->nodes[i]);
1464 path->nodes[i] = NULL;
1470 * walk down reloc tree to find relocated block of lowest level
1472 static noinline_for_stack
1473 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1476 struct extent_buffer *eb = NULL;
1482 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1484 for (i = *level; i > 0; i--) {
1485 eb = path->nodes[i];
1486 nritems = btrfs_header_nritems(eb);
1487 while (path->slots[i] < nritems) {
1488 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1489 if (ptr_gen > last_snapshot)
1493 if (path->slots[i] >= nritems) {
1504 eb = btrfs_read_node_slot(eb, path->slots[i]);
1507 BUG_ON(btrfs_header_level(eb) != i - 1);
1508 path->nodes[i - 1] = eb;
1509 path->slots[i - 1] = 0;
1515 * invalidate extent cache for file extents whose key in range of
1516 * [min_key, max_key)
1518 static int invalidate_extent_cache(struct btrfs_root *root,
1519 struct btrfs_key *min_key,
1520 struct btrfs_key *max_key)
1522 struct btrfs_fs_info *fs_info = root->fs_info;
1523 struct inode *inode = NULL;
1528 objectid = min_key->objectid;
1530 struct extent_state *cached_state = NULL;
1535 if (objectid > max_key->objectid)
1538 inode = find_next_inode(root, objectid);
1541 ino = btrfs_ino(BTRFS_I(inode));
1543 if (ino > max_key->objectid) {
1549 if (!S_ISREG(inode->i_mode))
1552 if (unlikely(min_key->objectid == ino)) {
1553 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1555 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1558 start = min_key->offset;
1559 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1565 if (unlikely(max_key->objectid == ino)) {
1566 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1568 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1571 if (max_key->offset == 0)
1573 end = max_key->offset;
1574 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1581 /* the lock_extent waits for read_folio to complete */
1582 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1583 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1584 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1589 static int find_next_key(struct btrfs_path *path, int level,
1590 struct btrfs_key *key)
1593 while (level < BTRFS_MAX_LEVEL) {
1594 if (!path->nodes[level])
1596 if (path->slots[level] + 1 <
1597 btrfs_header_nritems(path->nodes[level])) {
1598 btrfs_node_key_to_cpu(path->nodes[level], key,
1599 path->slots[level] + 1);
1608 * Insert current subvolume into reloc_control::dirty_subvol_roots
1610 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1611 struct reloc_control *rc,
1612 struct btrfs_root *root)
1614 struct btrfs_root *reloc_root = root->reloc_root;
1615 struct btrfs_root_item *reloc_root_item;
1618 /* @root must be a subvolume tree root with a valid reloc tree */
1619 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1622 reloc_root_item = &reloc_root->root_item;
1623 memset(&reloc_root_item->drop_progress, 0,
1624 sizeof(reloc_root_item->drop_progress));
1625 btrfs_set_root_drop_level(reloc_root_item, 0);
1626 btrfs_set_root_refs(reloc_root_item, 0);
1627 ret = btrfs_update_reloc_root(trans, root);
1631 if (list_empty(&root->reloc_dirty_list)) {
1632 btrfs_grab_root(root);
1633 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1639 static int clean_dirty_subvols(struct reloc_control *rc)
1641 struct btrfs_root *root;
1642 struct btrfs_root *next;
1646 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1648 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1649 /* Merged subvolume, cleanup its reloc root */
1650 struct btrfs_root *reloc_root = root->reloc_root;
1652 list_del_init(&root->reloc_dirty_list);
1653 root->reloc_root = NULL;
1655 * Need barrier to ensure clear_bit() only happens after
1656 * root->reloc_root = NULL. Pairs with have_reloc_root.
1659 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1662 * btrfs_drop_snapshot drops our ref we hold for
1663 * ->reloc_root. If it fails however we must
1664 * drop the ref ourselves.
1666 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1668 btrfs_put_root(reloc_root);
1673 btrfs_put_root(root);
1675 /* Orphan reloc tree, just clean it up */
1676 ret2 = btrfs_drop_snapshot(root, 0, 1);
1678 btrfs_put_root(root);
1688 * merge the relocated tree blocks in reloc tree with corresponding
1691 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1692 struct btrfs_root *root)
1694 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1695 struct btrfs_key key;
1696 struct btrfs_key next_key;
1697 struct btrfs_trans_handle *trans = NULL;
1698 struct btrfs_root *reloc_root;
1699 struct btrfs_root_item *root_item;
1700 struct btrfs_path *path;
1701 struct extent_buffer *leaf;
1709 path = btrfs_alloc_path();
1712 path->reada = READA_FORWARD;
1714 reloc_root = root->reloc_root;
1715 root_item = &reloc_root->root_item;
1717 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1718 level = btrfs_root_level(root_item);
1719 atomic_inc(&reloc_root->node->refs);
1720 path->nodes[level] = reloc_root->node;
1721 path->slots[level] = 0;
1723 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1725 level = btrfs_root_drop_level(root_item);
1727 path->lowest_level = level;
1728 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1729 path->lowest_level = 0;
1731 btrfs_free_path(path);
1735 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1736 path->slots[level]);
1737 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1739 btrfs_unlock_up_safe(path, 0);
1743 * In merge_reloc_root(), we modify the upper level pointer to swap the
1744 * tree blocks between reloc tree and subvolume tree. Thus for tree
1745 * block COW, we COW at most from level 1 to root level for each tree.
1747 * Thus the needed metadata size is at most root_level * nodesize,
1748 * and * 2 since we have two trees to COW.
1750 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1751 min_reserved = fs_info->nodesize * reserve_level * 2;
1752 memset(&next_key, 0, sizeof(next_key));
1755 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1757 BTRFS_RESERVE_FLUSH_LIMIT);
1760 trans = btrfs_start_transaction(root, 0);
1761 if (IS_ERR(trans)) {
1762 ret = PTR_ERR(trans);
1768 * At this point we no longer have a reloc_control, so we can't
1769 * depend on btrfs_init_reloc_root to update our last_trans.
1771 * But that's ok, we started the trans handle on our
1772 * corresponding fs_root, which means it's been added to the
1773 * dirty list. At commit time we'll still call
1774 * btrfs_update_reloc_root() and update our root item
1777 reloc_root->last_trans = trans->transid;
1778 trans->block_rsv = rc->block_rsv;
1783 ret = walk_down_reloc_tree(reloc_root, path, &level);
1789 if (!find_next_key(path, level, &key) &&
1790 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1793 ret = replace_path(trans, rc, root, reloc_root, path,
1794 &next_key, level, max_level);
1800 btrfs_node_key_to_cpu(path->nodes[level], &key,
1801 path->slots[level]);
1805 ret = walk_up_reloc_tree(reloc_root, path, &level);
1811 * save the merging progress in the drop_progress.
1812 * this is OK since root refs == 1 in this case.
1814 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1815 path->slots[level]);
1816 btrfs_set_root_drop_level(root_item, level);
1818 btrfs_end_transaction_throttle(trans);
1821 btrfs_btree_balance_dirty(fs_info);
1823 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1824 invalidate_extent_cache(root, &key, &next_key);
1828 * handle the case only one block in the fs tree need to be
1829 * relocated and the block is tree root.
1831 leaf = btrfs_lock_root_node(root);
1832 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1834 btrfs_tree_unlock(leaf);
1835 free_extent_buffer(leaf);
1837 btrfs_free_path(path);
1840 ret = insert_dirty_subvol(trans, rc, root);
1842 btrfs_abort_transaction(trans, ret);
1846 btrfs_end_transaction_throttle(trans);
1848 btrfs_btree_balance_dirty(fs_info);
1850 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1851 invalidate_extent_cache(root, &key, &next_key);
1856 static noinline_for_stack
1857 int prepare_to_merge(struct reloc_control *rc, int err)
1859 struct btrfs_root *root = rc->extent_root;
1860 struct btrfs_fs_info *fs_info = root->fs_info;
1861 struct btrfs_root *reloc_root;
1862 struct btrfs_trans_handle *trans;
1863 LIST_HEAD(reloc_roots);
1867 mutex_lock(&fs_info->reloc_mutex);
1868 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1869 rc->merging_rsv_size += rc->nodes_relocated * 2;
1870 mutex_unlock(&fs_info->reloc_mutex);
1874 num_bytes = rc->merging_rsv_size;
1875 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1876 BTRFS_RESERVE_FLUSH_ALL);
1881 trans = btrfs_join_transaction(rc->extent_root);
1882 if (IS_ERR(trans)) {
1884 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1886 return PTR_ERR(trans);
1890 if (num_bytes != rc->merging_rsv_size) {
1891 btrfs_end_transaction(trans);
1892 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1898 rc->merge_reloc_tree = 1;
1900 while (!list_empty(&rc->reloc_roots)) {
1901 reloc_root = list_entry(rc->reloc_roots.next,
1902 struct btrfs_root, root_list);
1903 list_del_init(&reloc_root->root_list);
1905 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1909 * Even if we have an error we need this reloc root
1910 * back on our list so we can clean up properly.
1912 list_add(&reloc_root->root_list, &reloc_roots);
1913 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1915 err = PTR_ERR(root);
1918 ASSERT(root->reloc_root == reloc_root);
1921 * set reference count to 1, so btrfs_recover_relocation
1922 * knows it should resumes merging
1925 btrfs_set_root_refs(&reloc_root->root_item, 1);
1926 ret = btrfs_update_reloc_root(trans, root);
1929 * Even if we have an error we need this reloc root back on our
1930 * list so we can clean up properly.
1932 list_add(&reloc_root->root_list, &reloc_roots);
1933 btrfs_put_root(root);
1936 btrfs_abort_transaction(trans, ret);
1943 list_splice(&reloc_roots, &rc->reloc_roots);
1946 err = btrfs_commit_transaction(trans);
1948 btrfs_end_transaction(trans);
1952 static noinline_for_stack
1953 void free_reloc_roots(struct list_head *list)
1955 struct btrfs_root *reloc_root, *tmp;
1957 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1958 __del_reloc_root(reloc_root);
1961 static noinline_for_stack
1962 void merge_reloc_roots(struct reloc_control *rc)
1964 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1965 struct btrfs_root *root;
1966 struct btrfs_root *reloc_root;
1967 LIST_HEAD(reloc_roots);
1971 root = rc->extent_root;
1974 * this serializes us with btrfs_record_root_in_transaction,
1975 * we have to make sure nobody is in the middle of
1976 * adding their roots to the list while we are
1979 mutex_lock(&fs_info->reloc_mutex);
1980 list_splice_init(&rc->reloc_roots, &reloc_roots);
1981 mutex_unlock(&fs_info->reloc_mutex);
1983 while (!list_empty(&reloc_roots)) {
1985 reloc_root = list_entry(reloc_roots.next,
1986 struct btrfs_root, root_list);
1988 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1990 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1993 * For recovery we read the fs roots on mount,
1994 * and if we didn't find the root then we marked
1995 * the reloc root as a garbage root. For normal
1996 * relocation obviously the root should exist in
1997 * memory. However there's no reason we can't
1998 * handle the error properly here just in case.
2001 ret = PTR_ERR(root);
2004 if (root->reloc_root != reloc_root) {
2006 * This is actually impossible without something
2007 * going really wrong (like weird race condition
2014 ret = merge_reloc_root(rc, root);
2015 btrfs_put_root(root);
2017 if (list_empty(&reloc_root->root_list))
2018 list_add_tail(&reloc_root->root_list,
2023 if (!IS_ERR(root)) {
2024 if (root->reloc_root == reloc_root) {
2025 root->reloc_root = NULL;
2026 btrfs_put_root(reloc_root);
2028 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2030 btrfs_put_root(root);
2033 list_del_init(&reloc_root->root_list);
2034 /* Don't forget to queue this reloc root for cleanup */
2035 list_add_tail(&reloc_root->reloc_dirty_list,
2036 &rc->dirty_subvol_roots);
2046 btrfs_handle_fs_error(fs_info, ret, NULL);
2047 free_reloc_roots(&reloc_roots);
2049 /* new reloc root may be added */
2050 mutex_lock(&fs_info->reloc_mutex);
2051 list_splice_init(&rc->reloc_roots, &reloc_roots);
2052 mutex_unlock(&fs_info->reloc_mutex);
2053 free_reloc_roots(&reloc_roots);
2059 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2061 * here, but it's wrong. If we fail to start the transaction in
2062 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2063 * have actually been removed from the reloc_root_tree rb tree. This is
2064 * fine because we're bailing here, and we hold a reference on the root
2065 * for the list that holds it, so these roots will be cleaned up when we
2066 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2067 * will be cleaned up on unmount.
2069 * The remaining nodes will be cleaned up by free_reloc_control.
2073 static void free_block_list(struct rb_root *blocks)
2075 struct tree_block *block;
2076 struct rb_node *rb_node;
2077 while ((rb_node = rb_first(blocks))) {
2078 block = rb_entry(rb_node, struct tree_block, rb_node);
2079 rb_erase(rb_node, blocks);
2084 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2085 struct btrfs_root *reloc_root)
2087 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2088 struct btrfs_root *root;
2091 if (reloc_root->last_trans == trans->transid)
2094 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2097 * This should succeed, since we can't have a reloc root without having
2098 * already looked up the actual root and created the reloc root for this
2101 * However if there's some sort of corruption where we have a ref to a
2102 * reloc root without a corresponding root this could return ENOENT.
2106 return PTR_ERR(root);
2108 if (root->reloc_root != reloc_root) {
2111 "root %llu has two reloc roots associated with it",
2112 reloc_root->root_key.offset);
2113 btrfs_put_root(root);
2116 ret = btrfs_record_root_in_trans(trans, root);
2117 btrfs_put_root(root);
2122 static noinline_for_stack
2123 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2124 struct reloc_control *rc,
2125 struct btrfs_backref_node *node,
2126 struct btrfs_backref_edge *edges[])
2128 struct btrfs_backref_node *next;
2129 struct btrfs_root *root;
2136 next = walk_up_backref(next, edges, &index);
2140 * If there is no root, then our references for this block are
2141 * incomplete, as we should be able to walk all the way up to a
2142 * block that is owned by a root.
2144 * This path is only for SHAREABLE roots, so if we come upon a
2145 * non-SHAREABLE root then we have backrefs that resolve
2148 * Both of these cases indicate file system corruption, or a bug
2149 * in the backref walking code.
2153 btrfs_err(trans->fs_info,
2154 "bytenr %llu doesn't have a backref path ending in a root",
2156 return ERR_PTR(-EUCLEAN);
2158 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2160 btrfs_err(trans->fs_info,
2161 "bytenr %llu has multiple refs with one ending in a non-shareable root",
2163 return ERR_PTR(-EUCLEAN);
2166 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2167 ret = record_reloc_root_in_trans(trans, root);
2169 return ERR_PTR(ret);
2173 ret = btrfs_record_root_in_trans(trans, root);
2175 return ERR_PTR(ret);
2176 root = root->reloc_root;
2179 * We could have raced with another thread which failed, so
2180 * root->reloc_root may not be set, return ENOENT in this case.
2183 return ERR_PTR(-ENOENT);
2185 if (next->new_bytenr != root->node->start) {
2187 * We just created the reloc root, so we shouldn't have
2188 * ->new_bytenr set and this shouldn't be in the changed
2189 * list. If it is then we have multiple roots pointing
2190 * at the same bytenr which indicates corruption, or
2191 * we've made a mistake in the backref walking code.
2193 ASSERT(next->new_bytenr == 0);
2194 ASSERT(list_empty(&next->list));
2195 if (next->new_bytenr || !list_empty(&next->list)) {
2196 btrfs_err(trans->fs_info,
2197 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2198 node->bytenr, next->bytenr);
2199 return ERR_PTR(-EUCLEAN);
2202 next->new_bytenr = root->node->start;
2203 btrfs_put_root(next->root);
2204 next->root = btrfs_grab_root(root);
2206 list_add_tail(&next->list,
2207 &rc->backref_cache.changed);
2208 mark_block_processed(rc, next);
2214 next = walk_down_backref(edges, &index);
2215 if (!next || next->level <= node->level)
2220 * This can happen if there's fs corruption or if there's a bug
2221 * in the backref lookup code.
2224 return ERR_PTR(-ENOENT);
2228 /* setup backref node path for btrfs_reloc_cow_block */
2230 rc->backref_cache.path[next->level] = next;
2233 next = edges[index]->node[UPPER];
2239 * Select a tree root for relocation.
2241 * Return NULL if the block is not shareable. We should use do_relocation() in
2244 * Return a tree root pointer if the block is shareable.
2245 * Return -ENOENT if the block is root of reloc tree.
2247 static noinline_for_stack
2248 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2250 struct btrfs_backref_node *next;
2251 struct btrfs_root *root;
2252 struct btrfs_root *fs_root = NULL;
2253 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2259 next = walk_up_backref(next, edges, &index);
2263 * This can occur if we have incomplete extent refs leading all
2264 * the way up a particular path, in this case return -EUCLEAN.
2267 return ERR_PTR(-EUCLEAN);
2269 /* No other choice for non-shareable tree */
2270 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2273 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2279 next = walk_down_backref(edges, &index);
2280 if (!next || next->level <= node->level)
2285 return ERR_PTR(-ENOENT);
2289 static noinline_for_stack
2290 u64 calcu_metadata_size(struct reloc_control *rc,
2291 struct btrfs_backref_node *node, int reserve)
2293 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2294 struct btrfs_backref_node *next = node;
2295 struct btrfs_backref_edge *edge;
2296 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2300 BUG_ON(reserve && node->processed);
2305 if (next->processed && (reserve || next != node))
2308 num_bytes += fs_info->nodesize;
2310 if (list_empty(&next->upper))
2313 edge = list_entry(next->upper.next,
2314 struct btrfs_backref_edge, list[LOWER]);
2315 edges[index++] = edge;
2316 next = edge->node[UPPER];
2318 next = walk_down_backref(edges, &index);
2323 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2324 struct reloc_control *rc,
2325 struct btrfs_backref_node *node)
2327 struct btrfs_root *root = rc->extent_root;
2328 struct btrfs_fs_info *fs_info = root->fs_info;
2333 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2335 trans->block_rsv = rc->block_rsv;
2336 rc->reserved_bytes += num_bytes;
2339 * We are under a transaction here so we can only do limited flushing.
2340 * If we get an enospc just kick back -EAGAIN so we know to drop the
2341 * transaction and try to refill when we can flush all the things.
2343 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2344 BTRFS_RESERVE_FLUSH_LIMIT);
2346 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2347 while (tmp <= rc->reserved_bytes)
2350 * only one thread can access block_rsv at this point,
2351 * so we don't need hold lock to protect block_rsv.
2352 * we expand more reservation size here to allow enough
2353 * space for relocation and we will return earlier in
2356 rc->block_rsv->size = tmp + fs_info->nodesize *
2357 RELOCATION_RESERVED_NODES;
2365 * relocate a block tree, and then update pointers in upper level
2366 * blocks that reference the block to point to the new location.
2368 * if called by link_to_upper, the block has already been relocated.
2369 * in that case this function just updates pointers.
2371 static int do_relocation(struct btrfs_trans_handle *trans,
2372 struct reloc_control *rc,
2373 struct btrfs_backref_node *node,
2374 struct btrfs_key *key,
2375 struct btrfs_path *path, int lowest)
2377 struct btrfs_backref_node *upper;
2378 struct btrfs_backref_edge *edge;
2379 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2380 struct btrfs_root *root;
2381 struct extent_buffer *eb;
2388 * If we are lowest then this is the first time we're processing this
2389 * block, and thus shouldn't have an eb associated with it yet.
2391 ASSERT(!lowest || !node->eb);
2393 path->lowest_level = node->level + 1;
2394 rc->backref_cache.path[node->level] = node;
2395 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2396 struct btrfs_ref ref = { 0 };
2400 upper = edge->node[UPPER];
2401 root = select_reloc_root(trans, rc, upper, edges);
2403 ret = PTR_ERR(root);
2407 if (upper->eb && !upper->locked) {
2409 ret = btrfs_bin_search(upper->eb, key, &slot);
2413 bytenr = btrfs_node_blockptr(upper->eb, slot);
2414 if (node->eb->start == bytenr)
2417 btrfs_backref_drop_node_buffer(upper);
2421 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2426 btrfs_release_path(path);
2431 upper->eb = path->nodes[upper->level];
2432 path->nodes[upper->level] = NULL;
2434 BUG_ON(upper->eb != path->nodes[upper->level]);
2438 path->locks[upper->level] = 0;
2440 slot = path->slots[upper->level];
2441 btrfs_release_path(path);
2443 ret = btrfs_bin_search(upper->eb, key, &slot);
2449 bytenr = btrfs_node_blockptr(upper->eb, slot);
2451 if (bytenr != node->bytenr) {
2452 btrfs_err(root->fs_info,
2453 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2454 bytenr, node->bytenr, slot,
2460 if (node->eb->start == bytenr)
2464 blocksize = root->fs_info->nodesize;
2465 eb = btrfs_read_node_slot(upper->eb, slot);
2470 btrfs_tree_lock(eb);
2473 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2474 slot, &eb, BTRFS_NESTING_COW);
2475 btrfs_tree_unlock(eb);
2476 free_extent_buffer(eb);
2480 * We've just COWed this block, it should have updated
2481 * the correct backref node entry.
2483 ASSERT(node->eb == eb);
2485 btrfs_set_node_blockptr(upper->eb, slot,
2487 btrfs_set_node_ptr_generation(upper->eb, slot,
2489 btrfs_mark_buffer_dirty(upper->eb);
2491 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2492 node->eb->start, blocksize,
2494 btrfs_init_tree_ref(&ref, node->level,
2495 btrfs_header_owner(upper->eb),
2496 root->root_key.objectid, false);
2497 ret = btrfs_inc_extent_ref(trans, &ref);
2499 ret = btrfs_drop_subtree(trans, root, eb,
2502 btrfs_abort_transaction(trans, ret);
2505 if (!upper->pending)
2506 btrfs_backref_drop_node_buffer(upper);
2508 btrfs_backref_unlock_node_buffer(upper);
2513 if (!ret && node->pending) {
2514 btrfs_backref_drop_node_buffer(node);
2515 list_move_tail(&node->list, &rc->backref_cache.changed);
2519 path->lowest_level = 0;
2522 * We should have allocated all of our space in the block rsv and thus
2525 ASSERT(ret != -ENOSPC);
2529 static int link_to_upper(struct btrfs_trans_handle *trans,
2530 struct reloc_control *rc,
2531 struct btrfs_backref_node *node,
2532 struct btrfs_path *path)
2534 struct btrfs_key key;
2536 btrfs_node_key_to_cpu(node->eb, &key, 0);
2537 return do_relocation(trans, rc, node, &key, path, 0);
2540 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2541 struct reloc_control *rc,
2542 struct btrfs_path *path, int err)
2545 struct btrfs_backref_cache *cache = &rc->backref_cache;
2546 struct btrfs_backref_node *node;
2550 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2551 while (!list_empty(&cache->pending[level])) {
2552 node = list_entry(cache->pending[level].next,
2553 struct btrfs_backref_node, list);
2554 list_move_tail(&node->list, &list);
2555 BUG_ON(!node->pending);
2558 ret = link_to_upper(trans, rc, node, path);
2563 list_splice_init(&list, &cache->pending[level]);
2569 * mark a block and all blocks directly/indirectly reference the block
2572 static void update_processed_blocks(struct reloc_control *rc,
2573 struct btrfs_backref_node *node)
2575 struct btrfs_backref_node *next = node;
2576 struct btrfs_backref_edge *edge;
2577 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2583 if (next->processed)
2586 mark_block_processed(rc, next);
2588 if (list_empty(&next->upper))
2591 edge = list_entry(next->upper.next,
2592 struct btrfs_backref_edge, list[LOWER]);
2593 edges[index++] = edge;
2594 next = edge->node[UPPER];
2596 next = walk_down_backref(edges, &index);
2600 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2602 u32 blocksize = rc->extent_root->fs_info->nodesize;
2604 if (test_range_bit(&rc->processed_blocks, bytenr,
2605 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2610 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2611 struct tree_block *block)
2613 struct extent_buffer *eb;
2615 eb = read_tree_block(fs_info, block->bytenr, block->owner,
2616 block->key.offset, block->level, NULL);
2619 if (!extent_buffer_uptodate(eb)) {
2620 free_extent_buffer(eb);
2623 if (block->level == 0)
2624 btrfs_item_key_to_cpu(eb, &block->key, 0);
2626 btrfs_node_key_to_cpu(eb, &block->key, 0);
2627 free_extent_buffer(eb);
2628 block->key_ready = 1;
2633 * helper function to relocate a tree block
2635 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2636 struct reloc_control *rc,
2637 struct btrfs_backref_node *node,
2638 struct btrfs_key *key,
2639 struct btrfs_path *path)
2641 struct btrfs_root *root;
2648 * If we fail here we want to drop our backref_node because we are going
2649 * to start over and regenerate the tree for it.
2651 ret = reserve_metadata_space(trans, rc, node);
2655 BUG_ON(node->processed);
2656 root = select_one_root(node);
2658 ret = PTR_ERR(root);
2660 /* See explanation in select_one_root for the -EUCLEAN case. */
2661 ASSERT(ret == -ENOENT);
2662 if (ret == -ENOENT) {
2664 update_processed_blocks(rc, node);
2670 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2672 * This block was the root block of a root, and this is
2673 * the first time we're processing the block and thus it
2674 * should not have had the ->new_bytenr modified and
2675 * should have not been included on the changed list.
2677 * However in the case of corruption we could have
2678 * multiple refs pointing to the same block improperly,
2679 * and thus we would trip over these checks. ASSERT()
2680 * for the developer case, because it could indicate a
2681 * bug in the backref code, however error out for a
2682 * normal user in the case of corruption.
2684 ASSERT(node->new_bytenr == 0);
2685 ASSERT(list_empty(&node->list));
2686 if (node->new_bytenr || !list_empty(&node->list)) {
2687 btrfs_err(root->fs_info,
2688 "bytenr %llu has improper references to it",
2693 ret = btrfs_record_root_in_trans(trans, root);
2697 * Another thread could have failed, need to check if we
2698 * have reloc_root actually set.
2700 if (!root->reloc_root) {
2704 root = root->reloc_root;
2705 node->new_bytenr = root->node->start;
2706 btrfs_put_root(node->root);
2707 node->root = btrfs_grab_root(root);
2709 list_add_tail(&node->list, &rc->backref_cache.changed);
2711 path->lowest_level = node->level;
2712 if (root == root->fs_info->chunk_root)
2713 btrfs_reserve_chunk_metadata(trans, false);
2714 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2715 btrfs_release_path(path);
2716 if (root == root->fs_info->chunk_root)
2717 btrfs_trans_release_chunk_metadata(trans);
2722 update_processed_blocks(rc, node);
2724 ret = do_relocation(trans, rc, node, key, path, 1);
2727 if (ret || node->level == 0 || node->cowonly)
2728 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2733 * relocate a list of blocks
2735 static noinline_for_stack
2736 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2737 struct reloc_control *rc, struct rb_root *blocks)
2739 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2740 struct btrfs_backref_node *node;
2741 struct btrfs_path *path;
2742 struct tree_block *block;
2743 struct tree_block *next;
2747 path = btrfs_alloc_path();
2750 goto out_free_blocks;
2753 /* Kick in readahead for tree blocks with missing keys */
2754 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2755 if (!block->key_ready)
2756 btrfs_readahead_tree_block(fs_info, block->bytenr,
2761 /* Get first keys */
2762 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2763 if (!block->key_ready) {
2764 err = get_tree_block_key(fs_info, block);
2770 /* Do tree relocation */
2771 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2772 node = build_backref_tree(rc, &block->key,
2773 block->level, block->bytenr);
2775 err = PTR_ERR(node);
2779 ret = relocate_tree_block(trans, rc, node, &block->key,
2787 err = finish_pending_nodes(trans, rc, path, err);
2790 btrfs_free_path(path);
2792 free_block_list(blocks);
2796 static noinline_for_stack int prealloc_file_extent_cluster(
2797 struct btrfs_inode *inode,
2798 struct file_extent_cluster *cluster)
2803 u64 offset = inode->index_cnt;
2807 u64 i_size = i_size_read(&inode->vfs_inode);
2808 u64 prealloc_start = cluster->start - offset;
2809 u64 prealloc_end = cluster->end - offset;
2810 u64 cur_offset = prealloc_start;
2813 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2814 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2815 * btrfs_do_readpage() call of previously relocated file cluster.
2817 * If the current cluster starts in the above range, btrfs_do_readpage()
2818 * will skip the read, and relocate_one_page() will later writeback
2819 * the padding zeros as new data, causing data corruption.
2821 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2823 if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
2824 struct address_space *mapping = inode->vfs_inode.i_mapping;
2825 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2826 const u32 sectorsize = fs_info->sectorsize;
2829 ASSERT(sectorsize < PAGE_SIZE);
2830 ASSERT(IS_ALIGNED(i_size, sectorsize));
2833 * Subpage can't handle page with DIRTY but without UPTODATE
2834 * bit as it can lead to the following deadlock:
2836 * btrfs_read_folio()
2837 * | Page already *locked*
2838 * |- btrfs_lock_and_flush_ordered_range()
2839 * |- btrfs_start_ordered_extent()
2840 * |- extent_write_cache_pages()
2842 * We try to lock the page we already hold.
2844 * Here we just writeback the whole data reloc inode, so that
2845 * we will be ensured to have no dirty range in the page, and
2846 * are safe to clear the uptodate bits.
2848 * This shouldn't cause too much overhead, as we need to write
2849 * the data back anyway.
2851 ret = filemap_write_and_wait(mapping);
2855 clear_extent_bits(&inode->io_tree, i_size,
2856 round_up(i_size, PAGE_SIZE) - 1,
2858 page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2860 * If page is freed we don't need to do anything then, as we
2861 * will re-read the whole page anyway.
2864 btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2865 round_up(i_size, PAGE_SIZE) - i_size);
2871 BUG_ON(cluster->start != cluster->boundary[0]);
2872 ret = btrfs_alloc_data_chunk_ondemand(inode,
2873 prealloc_end + 1 - prealloc_start);
2877 btrfs_inode_lock(inode, 0);
2878 for (nr = 0; nr < cluster->nr; nr++) {
2879 struct extent_state *cached_state = NULL;
2881 start = cluster->boundary[nr] - offset;
2882 if (nr + 1 < cluster->nr)
2883 end = cluster->boundary[nr + 1] - 1 - offset;
2885 end = cluster->end - offset;
2887 lock_extent(&inode->io_tree, start, end, &cached_state);
2888 num_bytes = end + 1 - start;
2889 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2890 num_bytes, num_bytes,
2891 end + 1, &alloc_hint);
2892 cur_offset = end + 1;
2893 unlock_extent(&inode->io_tree, start, end, &cached_state);
2897 btrfs_inode_unlock(inode, 0);
2899 if (cur_offset < prealloc_end)
2900 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2901 prealloc_end + 1 - cur_offset);
2905 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2906 u64 start, u64 end, u64 block_start)
2908 struct extent_map *em;
2909 struct extent_state *cached_state = NULL;
2912 em = alloc_extent_map();
2917 em->len = end + 1 - start;
2918 em->block_len = em->len;
2919 em->block_start = block_start;
2920 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2922 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2923 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2924 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2925 free_extent_map(em);
2931 * Allow error injection to test balance/relocation cancellation
2933 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2935 return atomic_read(&fs_info->balance_cancel_req) ||
2936 atomic_read(&fs_info->reloc_cancel_req) ||
2937 fatal_signal_pending(current);
2939 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2941 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2944 /* Last extent, use cluster end directly */
2945 if (cluster_nr >= cluster->nr - 1)
2946 return cluster->end;
2948 /* Use next boundary start*/
2949 return cluster->boundary[cluster_nr + 1] - 1;
2952 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2953 struct file_extent_cluster *cluster,
2954 int *cluster_nr, unsigned long page_index)
2956 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2957 u64 offset = BTRFS_I(inode)->index_cnt;
2958 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2959 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2966 ASSERT(page_index <= last_index);
2967 page = find_lock_page(inode->i_mapping, page_index);
2969 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2970 page_index, last_index + 1 - page_index);
2971 page = find_or_create_page(inode->i_mapping, page_index, mask);
2975 ret = set_page_extent_mapped(page);
2979 if (PageReadahead(page))
2980 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2981 page_folio(page), page_index,
2982 last_index + 1 - page_index);
2984 if (!PageUptodate(page)) {
2985 btrfs_read_folio(NULL, page_folio(page));
2987 if (!PageUptodate(page)) {
2993 page_start = page_offset(page);
2994 page_end = page_start + PAGE_SIZE - 1;
2997 * Start from the cluster, as for subpage case, the cluster can start
3000 cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3001 while (cur <= page_end) {
3002 struct extent_state *cached_state = NULL;
3003 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3004 u64 extent_end = get_cluster_boundary_end(cluster,
3005 *cluster_nr) - offset;
3006 u64 clamped_start = max(page_start, extent_start);
3007 u64 clamped_end = min(page_end, extent_end);
3008 u32 clamped_len = clamped_end + 1 - clamped_start;
3010 /* Reserve metadata for this range */
3011 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3012 clamped_len, clamped_len,
3017 /* Mark the range delalloc and dirty for later writeback */
3018 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3020 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3021 clamped_end, 0, &cached_state);
3023 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3024 clamped_start, clamped_end,
3025 EXTENT_LOCKED | EXTENT_BOUNDARY,
3027 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3029 btrfs_delalloc_release_extents(BTRFS_I(inode),
3033 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3036 * Set the boundary if it's inside the page.
3037 * Data relocation requires the destination extents to have the
3038 * same size as the source.
3039 * EXTENT_BOUNDARY bit prevents current extent from being merged
3040 * with previous extent.
3042 if (in_range(cluster->boundary[*cluster_nr] - offset,
3043 page_start, PAGE_SIZE)) {
3044 u64 boundary_start = cluster->boundary[*cluster_nr] -
3046 u64 boundary_end = boundary_start +
3047 fs_info->sectorsize - 1;
3049 set_extent_bits(&BTRFS_I(inode)->io_tree,
3050 boundary_start, boundary_end,
3053 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3055 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3058 /* Crossed extent end, go to next extent */
3059 if (cur >= extent_end) {
3061 /* Just finished the last extent of the cluster, exit. */
3062 if (*cluster_nr >= cluster->nr)
3069 balance_dirty_pages_ratelimited(inode->i_mapping);
3070 btrfs_throttle(fs_info);
3071 if (btrfs_should_cancel_balance(fs_info))
3081 static int relocate_file_extent_cluster(struct inode *inode,
3082 struct file_extent_cluster *cluster)
3084 u64 offset = BTRFS_I(inode)->index_cnt;
3085 unsigned long index;
3086 unsigned long last_index;
3087 struct file_ra_state *ra;
3094 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3098 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3102 file_ra_state_init(ra, inode->i_mapping);
3104 ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3105 cluster->end - offset, cluster->start);
3109 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3110 for (index = (cluster->start - offset) >> PAGE_SHIFT;
3111 index <= last_index && !ret; index++)
3112 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3114 WARN_ON(cluster_nr != cluster->nr);
3120 static noinline_for_stack
3121 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3122 struct file_extent_cluster *cluster)
3126 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3127 ret = relocate_file_extent_cluster(inode, cluster);
3134 cluster->start = extent_key->objectid;
3136 BUG_ON(cluster->nr >= MAX_EXTENTS);
3137 cluster->end = extent_key->objectid + extent_key->offset - 1;
3138 cluster->boundary[cluster->nr] = extent_key->objectid;
3141 if (cluster->nr >= MAX_EXTENTS) {
3142 ret = relocate_file_extent_cluster(inode, cluster);
3151 * helper to add a tree block to the list.
3152 * the major work is getting the generation and level of the block
3154 static int add_tree_block(struct reloc_control *rc,
3155 struct btrfs_key *extent_key,
3156 struct btrfs_path *path,
3157 struct rb_root *blocks)
3159 struct extent_buffer *eb;
3160 struct btrfs_extent_item *ei;
3161 struct btrfs_tree_block_info *bi;
3162 struct tree_block *block;
3163 struct rb_node *rb_node;
3169 eb = path->nodes[0];
3170 item_size = btrfs_item_size(eb, path->slots[0]);
3172 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3173 item_size >= sizeof(*ei) + sizeof(*bi)) {
3174 unsigned long ptr = 0, end;
3176 ei = btrfs_item_ptr(eb, path->slots[0],
3177 struct btrfs_extent_item);
3178 end = (unsigned long)ei + item_size;
3179 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3180 bi = (struct btrfs_tree_block_info *)(ei + 1);
3181 level = btrfs_tree_block_level(eb, bi);
3182 ptr = (unsigned long)(bi + 1);
3184 level = (int)extent_key->offset;
3185 ptr = (unsigned long)(ei + 1);
3187 generation = btrfs_extent_generation(eb, ei);
3190 * We're reading random blocks without knowing their owner ahead
3191 * of time. This is ok most of the time, as all reloc roots and
3192 * fs roots have the same lock type. However normal trees do
3193 * not, and the only way to know ahead of time is to read the
3194 * inline ref offset. We know it's an fs root if
3196 * 1. There's more than one ref.
3197 * 2. There's a SHARED_DATA_REF_KEY set.
3198 * 3. FULL_BACKREF is set on the flags.
3200 * Otherwise it's safe to assume that the ref offset == the
3201 * owner of this block, so we can use that when calling
3204 if (btrfs_extent_refs(eb, ei) == 1 &&
3205 !(btrfs_extent_flags(eb, ei) &
3206 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3208 struct btrfs_extent_inline_ref *iref;
3211 iref = (struct btrfs_extent_inline_ref *)ptr;
3212 type = btrfs_get_extent_inline_ref_type(eb, iref,
3213 BTRFS_REF_TYPE_BLOCK);
3214 if (type == BTRFS_REF_TYPE_INVALID)
3216 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3217 owner = btrfs_extent_inline_ref_offset(eb, iref);
3219 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3220 btrfs_print_v0_err(eb->fs_info);
3221 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3227 btrfs_release_path(path);
3229 BUG_ON(level == -1);
3231 block = kmalloc(sizeof(*block), GFP_NOFS);
3235 block->bytenr = extent_key->objectid;
3236 block->key.objectid = rc->extent_root->fs_info->nodesize;
3237 block->key.offset = generation;
3238 block->level = level;
3239 block->key_ready = 0;
3240 block->owner = owner;
3242 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3244 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3251 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3253 static int __add_tree_block(struct reloc_control *rc,
3254 u64 bytenr, u32 blocksize,
3255 struct rb_root *blocks)
3257 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3258 struct btrfs_path *path;
3259 struct btrfs_key key;
3261 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3263 if (tree_block_processed(bytenr, rc))
3266 if (rb_simple_search(blocks, bytenr))
3269 path = btrfs_alloc_path();
3273 key.objectid = bytenr;
3275 key.type = BTRFS_METADATA_ITEM_KEY;
3276 key.offset = (u64)-1;
3278 key.type = BTRFS_EXTENT_ITEM_KEY;
3279 key.offset = blocksize;
3282 path->search_commit_root = 1;
3283 path->skip_locking = 1;
3284 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3288 if (ret > 0 && skinny) {
3289 if (path->slots[0]) {
3291 btrfs_item_key_to_cpu(path->nodes[0], &key,
3293 if (key.objectid == bytenr &&
3294 (key.type == BTRFS_METADATA_ITEM_KEY ||
3295 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3296 key.offset == blocksize)))
3302 btrfs_release_path(path);
3308 btrfs_print_leaf(path->nodes[0]);
3310 "tree block extent item (%llu) is not found in extent tree",
3317 ret = add_tree_block(rc, &key, path, blocks);
3319 btrfs_free_path(path);
3323 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3324 struct btrfs_block_group *block_group,
3325 struct inode *inode,
3328 struct btrfs_root *root = fs_info->tree_root;
3329 struct btrfs_trans_handle *trans;
3335 inode = btrfs_iget(fs_info->sb, ino, root);
3340 ret = btrfs_check_trunc_cache_free_space(fs_info,
3341 &fs_info->global_block_rsv);
3345 trans = btrfs_join_transaction(root);
3346 if (IS_ERR(trans)) {
3347 ret = PTR_ERR(trans);
3351 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3353 btrfs_end_transaction(trans);
3354 btrfs_btree_balance_dirty(fs_info);
3361 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3362 * cache inode, to avoid free space cache data extent blocking data relocation.
3364 static int delete_v1_space_cache(struct extent_buffer *leaf,
3365 struct btrfs_block_group *block_group,
3368 u64 space_cache_ino;
3369 struct btrfs_file_extent_item *ei;
3370 struct btrfs_key key;
3375 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3378 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3381 btrfs_item_key_to_cpu(leaf, &key, i);
3382 if (key.type != BTRFS_EXTENT_DATA_KEY)
3384 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3385 type = btrfs_file_extent_type(leaf, ei);
3387 if ((type == BTRFS_FILE_EXTENT_REG ||
3388 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3389 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3391 space_cache_ino = key.objectid;
3397 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3403 * helper to find all tree blocks that reference a given data extent
3405 static noinline_for_stack
3406 int add_data_references(struct reloc_control *rc,
3407 struct btrfs_key *extent_key,
3408 struct btrfs_path *path,
3409 struct rb_root *blocks)
3411 struct btrfs_backref_walk_ctx ctx = { 0 };
3412 struct ulist_iterator leaf_uiter;
3413 struct ulist_node *ref_node = NULL;
3414 const u32 blocksize = rc->extent_root->fs_info->nodesize;
3417 btrfs_release_path(path);
3419 ctx.bytenr = extent_key->objectid;
3420 ctx.ignore_extent_item_pos = true;
3421 ctx.fs_info = rc->extent_root->fs_info;
3423 ret = btrfs_find_all_leafs(&ctx);
3427 ULIST_ITER_INIT(&leaf_uiter);
3428 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3429 struct extent_buffer *eb;
3431 eb = read_tree_block(ctx.fs_info, ref_node->val, 0, 0, 0, NULL);
3436 ret = delete_v1_space_cache(eb, rc->block_group,
3437 extent_key->objectid);
3438 free_extent_buffer(eb);
3441 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3446 free_block_list(blocks);
3447 ulist_free(ctx.refs);
3452 * helper to find next unprocessed extent
3454 static noinline_for_stack
3455 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3456 struct btrfs_key *extent_key)
3458 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3459 struct btrfs_key key;
3460 struct extent_buffer *leaf;
3461 u64 start, end, last;
3464 last = rc->block_group->start + rc->block_group->length;
3467 if (rc->search_start >= last) {
3472 key.objectid = rc->search_start;
3473 key.type = BTRFS_EXTENT_ITEM_KEY;
3476 path->search_commit_root = 1;
3477 path->skip_locking = 1;
3478 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3483 leaf = path->nodes[0];
3484 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3485 ret = btrfs_next_leaf(rc->extent_root, path);
3488 leaf = path->nodes[0];
3491 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3492 if (key.objectid >= last) {
3497 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3498 key.type != BTRFS_METADATA_ITEM_KEY) {
3503 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3504 key.objectid + key.offset <= rc->search_start) {
3509 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3510 key.objectid + fs_info->nodesize <=
3516 ret = find_first_extent_bit(&rc->processed_blocks,
3517 key.objectid, &start, &end,
3518 EXTENT_DIRTY, NULL);
3520 if (ret == 0 && start <= key.objectid) {
3521 btrfs_release_path(path);
3522 rc->search_start = end + 1;
3524 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3525 rc->search_start = key.objectid + key.offset;
3527 rc->search_start = key.objectid +
3529 memcpy(extent_key, &key, sizeof(key));
3533 btrfs_release_path(path);
3537 static void set_reloc_control(struct reloc_control *rc)
3539 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3541 mutex_lock(&fs_info->reloc_mutex);
3542 fs_info->reloc_ctl = rc;
3543 mutex_unlock(&fs_info->reloc_mutex);
3546 static void unset_reloc_control(struct reloc_control *rc)
3548 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3550 mutex_lock(&fs_info->reloc_mutex);
3551 fs_info->reloc_ctl = NULL;
3552 mutex_unlock(&fs_info->reloc_mutex);
3555 static noinline_for_stack
3556 int prepare_to_relocate(struct reloc_control *rc)
3558 struct btrfs_trans_handle *trans;
3561 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3562 BTRFS_BLOCK_RSV_TEMP);
3566 memset(&rc->cluster, 0, sizeof(rc->cluster));
3567 rc->search_start = rc->block_group->start;
3568 rc->extents_found = 0;
3569 rc->nodes_relocated = 0;
3570 rc->merging_rsv_size = 0;
3571 rc->reserved_bytes = 0;
3572 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3573 RELOCATION_RESERVED_NODES;
3574 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3575 rc->block_rsv, rc->block_rsv->size,
3576 BTRFS_RESERVE_FLUSH_ALL);
3580 rc->create_reloc_tree = 1;
3581 set_reloc_control(rc);
3583 trans = btrfs_join_transaction(rc->extent_root);
3584 if (IS_ERR(trans)) {
3585 unset_reloc_control(rc);
3587 * extent tree is not a ref_cow tree and has no reloc_root to
3588 * cleanup. And callers are responsible to free the above
3591 return PTR_ERR(trans);
3594 ret = btrfs_commit_transaction(trans);
3596 unset_reloc_control(rc);
3601 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3603 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3604 struct rb_root blocks = RB_ROOT;
3605 struct btrfs_key key;
3606 struct btrfs_trans_handle *trans = NULL;
3607 struct btrfs_path *path;
3608 struct btrfs_extent_item *ei;
3614 path = btrfs_alloc_path();
3617 path->reada = READA_FORWARD;
3619 ret = prepare_to_relocate(rc);
3626 rc->reserved_bytes = 0;
3627 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3628 rc->block_rsv->size,
3629 BTRFS_RESERVE_FLUSH_ALL);
3635 trans = btrfs_start_transaction(rc->extent_root, 0);
3636 if (IS_ERR(trans)) {
3637 err = PTR_ERR(trans);
3642 if (update_backref_cache(trans, &rc->backref_cache)) {
3643 btrfs_end_transaction(trans);
3648 ret = find_next_extent(rc, path, &key);
3654 rc->extents_found++;
3656 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3657 struct btrfs_extent_item);
3658 flags = btrfs_extent_flags(path->nodes[0], ei);
3660 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3661 ret = add_tree_block(rc, &key, path, &blocks);
3662 } else if (rc->stage == UPDATE_DATA_PTRS &&
3663 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3664 ret = add_data_references(rc, &key, path, &blocks);
3666 btrfs_release_path(path);
3674 if (!RB_EMPTY_ROOT(&blocks)) {
3675 ret = relocate_tree_blocks(trans, rc, &blocks);
3677 if (ret != -EAGAIN) {
3681 rc->extents_found--;
3682 rc->search_start = key.objectid;
3686 btrfs_end_transaction_throttle(trans);
3687 btrfs_btree_balance_dirty(fs_info);
3690 if (rc->stage == MOVE_DATA_EXTENTS &&
3691 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3692 rc->found_file_extent = 1;
3693 ret = relocate_data_extent(rc->data_inode,
3694 &key, &rc->cluster);
3700 if (btrfs_should_cancel_balance(fs_info)) {
3705 if (trans && progress && err == -ENOSPC) {
3706 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3714 btrfs_release_path(path);
3715 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3718 btrfs_end_transaction_throttle(trans);
3719 btrfs_btree_balance_dirty(fs_info);
3723 ret = relocate_file_extent_cluster(rc->data_inode,
3729 rc->create_reloc_tree = 0;
3730 set_reloc_control(rc);
3732 btrfs_backref_release_cache(&rc->backref_cache);
3733 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3736 * Even in the case when the relocation is cancelled, we should all go
3737 * through prepare_to_merge() and merge_reloc_roots().
3739 * For error (including cancelled balance), prepare_to_merge() will
3740 * mark all reloc trees orphan, then queue them for cleanup in
3741 * merge_reloc_roots()
3743 err = prepare_to_merge(rc, err);
3745 merge_reloc_roots(rc);
3747 rc->merge_reloc_tree = 0;
3748 unset_reloc_control(rc);
3749 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3751 /* get rid of pinned extents */
3752 trans = btrfs_join_transaction(rc->extent_root);
3753 if (IS_ERR(trans)) {
3754 err = PTR_ERR(trans);
3757 ret = btrfs_commit_transaction(trans);
3761 ret = clean_dirty_subvols(rc);
3762 if (ret < 0 && !err)
3764 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3765 btrfs_free_path(path);
3769 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3770 struct btrfs_root *root, u64 objectid)
3772 struct btrfs_path *path;
3773 struct btrfs_inode_item *item;
3774 struct extent_buffer *leaf;
3777 path = btrfs_alloc_path();
3781 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3785 leaf = path->nodes[0];
3786 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3787 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3788 btrfs_set_inode_generation(leaf, item, 1);
3789 btrfs_set_inode_size(leaf, item, 0);
3790 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3791 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3792 BTRFS_INODE_PREALLOC);
3793 btrfs_mark_buffer_dirty(leaf);
3795 btrfs_free_path(path);
3799 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3800 struct btrfs_root *root, u64 objectid)
3802 struct btrfs_path *path;
3803 struct btrfs_key key;
3806 path = btrfs_alloc_path();
3812 key.objectid = objectid;
3813 key.type = BTRFS_INODE_ITEM_KEY;
3815 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3821 ret = btrfs_del_item(trans, root, path);
3824 btrfs_abort_transaction(trans, ret);
3825 btrfs_free_path(path);
3829 * helper to create inode for data relocation.
3830 * the inode is in data relocation tree and its link count is 0
3832 static noinline_for_stack
3833 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3834 struct btrfs_block_group *group)
3836 struct inode *inode = NULL;
3837 struct btrfs_trans_handle *trans;
3838 struct btrfs_root *root;
3842 root = btrfs_grab_root(fs_info->data_reloc_root);
3843 trans = btrfs_start_transaction(root, 6);
3844 if (IS_ERR(trans)) {
3845 btrfs_put_root(root);
3846 return ERR_CAST(trans);
3849 err = btrfs_get_free_objectid(root, &objectid);
3853 err = __insert_orphan_inode(trans, root, objectid);
3857 inode = btrfs_iget(fs_info->sb, objectid, root);
3858 if (IS_ERR(inode)) {
3859 delete_orphan_inode(trans, root, objectid);
3860 err = PTR_ERR(inode);
3864 BTRFS_I(inode)->index_cnt = group->start;
3866 err = btrfs_orphan_add(trans, BTRFS_I(inode));
3868 btrfs_put_root(root);
3869 btrfs_end_transaction(trans);
3870 btrfs_btree_balance_dirty(fs_info);
3873 inode = ERR_PTR(err);
3879 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3880 * has been requested meanwhile and don't start in that case.
3884 * -EINPROGRESS operation is already in progress, that's probably a bug
3885 * -ECANCELED cancellation request was set before the operation started
3887 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3889 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3890 /* This should not happen */
3891 btrfs_err(fs_info, "reloc already running, cannot start");
3892 return -EINPROGRESS;
3895 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3896 btrfs_info(fs_info, "chunk relocation canceled on start");
3898 * On cancel, clear all requests but let the caller mark
3899 * the end after cleanup operations.
3901 atomic_set(&fs_info->reloc_cancel_req, 0);
3908 * Mark end of chunk relocation that is cancellable and wake any waiters.
3910 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3912 /* Requested after start, clear bit first so any waiters can continue */
3913 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3914 btrfs_info(fs_info, "chunk relocation canceled during operation");
3915 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3916 atomic_set(&fs_info->reloc_cancel_req, 0);
3919 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3921 struct reloc_control *rc;
3923 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3927 INIT_LIST_HEAD(&rc->reloc_roots);
3928 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3929 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3930 mapping_tree_init(&rc->reloc_root_tree);
3931 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3935 static void free_reloc_control(struct reloc_control *rc)
3937 struct mapping_node *node, *tmp;
3939 free_reloc_roots(&rc->reloc_roots);
3940 rbtree_postorder_for_each_entry_safe(node, tmp,
3941 &rc->reloc_root_tree.rb_root, rb_node)
3948 * Print the block group being relocated
3950 static void describe_relocation(struct btrfs_fs_info *fs_info,
3951 struct btrfs_block_group *block_group)
3953 char buf[128] = {'\0'};
3955 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3958 "relocating block group %llu flags %s",
3959 block_group->start, buf);
3962 static const char *stage_to_string(int stage)
3964 if (stage == MOVE_DATA_EXTENTS)
3965 return "move data extents";
3966 if (stage == UPDATE_DATA_PTRS)
3967 return "update data pointers";
3972 * function to relocate all extents in a block group.
3974 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3976 struct btrfs_block_group *bg;
3977 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3978 struct reloc_control *rc;
3979 struct inode *inode;
3980 struct btrfs_path *path;
3986 * This only gets set if we had a half-deleted snapshot on mount. We
3987 * cannot allow relocation to start while we're still trying to clean up
3988 * these pending deletions.
3990 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3994 /* We may have been woken up by close_ctree, so bail if we're closing. */
3995 if (btrfs_fs_closing(fs_info))
3998 bg = btrfs_lookup_block_group(fs_info, group_start);
4003 * Relocation of a data block group creates ordered extents. Without
4004 * sb_start_write(), we can freeze the filesystem while unfinished
4005 * ordered extents are left. Such ordered extents can cause a deadlock
4006 * e.g. when syncfs() is waiting for their completion but they can't
4007 * finish because they block when joining a transaction, due to the
4008 * fact that the freeze locks are being held in write mode.
4010 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4011 ASSERT(sb_write_started(fs_info->sb));
4013 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4014 btrfs_put_block_group(bg);
4018 rc = alloc_reloc_control(fs_info);
4020 btrfs_put_block_group(bg);
4024 ret = reloc_chunk_start(fs_info);
4030 rc->extent_root = extent_root;
4031 rc->block_group = bg;
4033 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4040 path = btrfs_alloc_path();
4046 inode = lookup_free_space_inode(rc->block_group, path);
4047 btrfs_free_path(path);
4050 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4052 ret = PTR_ERR(inode);
4054 if (ret && ret != -ENOENT) {
4059 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4060 if (IS_ERR(rc->data_inode)) {
4061 err = PTR_ERR(rc->data_inode);
4062 rc->data_inode = NULL;
4066 describe_relocation(fs_info, rc->block_group);
4068 btrfs_wait_block_group_reservations(rc->block_group);
4069 btrfs_wait_nocow_writers(rc->block_group);
4070 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4071 rc->block_group->start,
4072 rc->block_group->length);
4074 ret = btrfs_zone_finish(rc->block_group);
4075 WARN_ON(ret && ret != -EAGAIN);
4080 mutex_lock(&fs_info->cleaner_mutex);
4081 ret = relocate_block_group(rc);
4082 mutex_unlock(&fs_info->cleaner_mutex);
4086 finishes_stage = rc->stage;
4088 * We may have gotten ENOSPC after we already dirtied some
4089 * extents. If writeout happens while we're relocating a
4090 * different block group we could end up hitting the
4091 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4092 * btrfs_reloc_cow_block. Make sure we write everything out
4093 * properly so we don't trip over this problem, and then break
4094 * out of the loop if we hit an error.
4096 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4097 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4101 invalidate_mapping_pages(rc->data_inode->i_mapping,
4103 rc->stage = UPDATE_DATA_PTRS;
4109 if (rc->extents_found == 0)
4112 btrfs_info(fs_info, "found %llu extents, stage: %s",
4113 rc->extents_found, stage_to_string(finishes_stage));
4116 WARN_ON(rc->block_group->pinned > 0);
4117 WARN_ON(rc->block_group->reserved > 0);
4118 WARN_ON(rc->block_group->used > 0);
4121 btrfs_dec_block_group_ro(rc->block_group);
4122 iput(rc->data_inode);
4124 btrfs_put_block_group(bg);
4125 reloc_chunk_end(fs_info);
4126 free_reloc_control(rc);
4130 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4132 struct btrfs_fs_info *fs_info = root->fs_info;
4133 struct btrfs_trans_handle *trans;
4136 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4138 return PTR_ERR(trans);
4140 memset(&root->root_item.drop_progress, 0,
4141 sizeof(root->root_item.drop_progress));
4142 btrfs_set_root_drop_level(&root->root_item, 0);
4143 btrfs_set_root_refs(&root->root_item, 0);
4144 ret = btrfs_update_root(trans, fs_info->tree_root,
4145 &root->root_key, &root->root_item);
4147 err = btrfs_end_transaction(trans);
4154 * recover relocation interrupted by system crash.
4156 * this function resumes merging reloc trees with corresponding fs trees.
4157 * this is important for keeping the sharing of tree blocks
4159 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4161 LIST_HEAD(reloc_roots);
4162 struct btrfs_key key;
4163 struct btrfs_root *fs_root;
4164 struct btrfs_root *reloc_root;
4165 struct btrfs_path *path;
4166 struct extent_buffer *leaf;
4167 struct reloc_control *rc = NULL;
4168 struct btrfs_trans_handle *trans;
4172 path = btrfs_alloc_path();
4175 path->reada = READA_BACK;
4177 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4178 key.type = BTRFS_ROOT_ITEM_KEY;
4179 key.offset = (u64)-1;
4182 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4189 if (path->slots[0] == 0)
4193 leaf = path->nodes[0];
4194 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4195 btrfs_release_path(path);
4197 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4198 key.type != BTRFS_ROOT_ITEM_KEY)
4201 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4202 if (IS_ERR(reloc_root)) {
4203 err = PTR_ERR(reloc_root);
4207 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4208 list_add(&reloc_root->root_list, &reloc_roots);
4210 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4211 fs_root = btrfs_get_fs_root(fs_info,
4212 reloc_root->root_key.offset, false);
4213 if (IS_ERR(fs_root)) {
4214 ret = PTR_ERR(fs_root);
4215 if (ret != -ENOENT) {
4219 ret = mark_garbage_root(reloc_root);
4225 btrfs_put_root(fs_root);
4229 if (key.offset == 0)
4234 btrfs_release_path(path);
4236 if (list_empty(&reloc_roots))
4239 rc = alloc_reloc_control(fs_info);
4245 ret = reloc_chunk_start(fs_info);
4251 rc->extent_root = btrfs_extent_root(fs_info, 0);
4253 set_reloc_control(rc);
4255 trans = btrfs_join_transaction(rc->extent_root);
4256 if (IS_ERR(trans)) {
4257 err = PTR_ERR(trans);
4261 rc->merge_reloc_tree = 1;
4263 while (!list_empty(&reloc_roots)) {
4264 reloc_root = list_entry(reloc_roots.next,
4265 struct btrfs_root, root_list);
4266 list_del(&reloc_root->root_list);
4268 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4269 list_add_tail(&reloc_root->root_list,
4274 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4276 if (IS_ERR(fs_root)) {
4277 err = PTR_ERR(fs_root);
4278 list_add_tail(&reloc_root->root_list, &reloc_roots);
4279 btrfs_end_transaction(trans);
4283 err = __add_reloc_root(reloc_root);
4284 ASSERT(err != -EEXIST);
4286 list_add_tail(&reloc_root->root_list, &reloc_roots);
4287 btrfs_put_root(fs_root);
4288 btrfs_end_transaction(trans);
4291 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4292 btrfs_put_root(fs_root);
4295 err = btrfs_commit_transaction(trans);
4299 merge_reloc_roots(rc);
4301 unset_reloc_control(rc);
4303 trans = btrfs_join_transaction(rc->extent_root);
4304 if (IS_ERR(trans)) {
4305 err = PTR_ERR(trans);
4308 err = btrfs_commit_transaction(trans);
4310 ret = clean_dirty_subvols(rc);
4311 if (ret < 0 && !err)
4314 unset_reloc_control(rc);
4316 reloc_chunk_end(fs_info);
4317 free_reloc_control(rc);
4319 free_reloc_roots(&reloc_roots);
4321 btrfs_free_path(path);
4324 /* cleanup orphan inode in data relocation tree */
4325 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4327 err = btrfs_orphan_cleanup(fs_root);
4328 btrfs_put_root(fs_root);
4334 * helper to add ordered checksum for data relocation.
4336 * cloning checksum properly handles the nodatasum extents.
4337 * it also saves CPU time to re-calculate the checksum.
4339 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4341 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4342 struct btrfs_root *csum_root;
4343 struct btrfs_ordered_sum *sums;
4344 struct btrfs_ordered_extent *ordered;
4350 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4351 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4353 disk_bytenr = file_pos + inode->index_cnt;
4354 csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4355 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr,
4356 disk_bytenr + len - 1, &list, 0, false);
4360 while (!list_empty(&list)) {
4361 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4362 list_del_init(&sums->list);
4365 * We need to offset the new_bytenr based on where the csum is.
4366 * We need to do this because we will read in entire prealloc
4367 * extents but we may have written to say the middle of the
4368 * prealloc extent, so we need to make sure the csum goes with
4369 * the right disk offset.
4371 * We can do this because the data reloc inode refers strictly
4372 * to the on disk bytes, so we don't have to worry about
4373 * disk_len vs real len like with real inodes since it's all
4376 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4377 sums->bytenr = new_bytenr;
4379 btrfs_add_ordered_sum(ordered, sums);
4382 btrfs_put_ordered_extent(ordered);
4386 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4387 struct btrfs_root *root, struct extent_buffer *buf,
4388 struct extent_buffer *cow)
4390 struct btrfs_fs_info *fs_info = root->fs_info;
4391 struct reloc_control *rc;
4392 struct btrfs_backref_node *node;
4397 rc = fs_info->reloc_ctl;
4401 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4403 level = btrfs_header_level(buf);
4404 if (btrfs_header_generation(buf) <=
4405 btrfs_root_last_snapshot(&root->root_item))
4408 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4409 rc->create_reloc_tree) {
4410 WARN_ON(!first_cow && level == 0);
4412 node = rc->backref_cache.path[level];
4413 BUG_ON(node->bytenr != buf->start &&
4414 node->new_bytenr != buf->start);
4416 btrfs_backref_drop_node_buffer(node);
4417 atomic_inc(&cow->refs);
4419 node->new_bytenr = cow->start;
4421 if (!node->pending) {
4422 list_move_tail(&node->list,
4423 &rc->backref_cache.pending[level]);
4428 mark_block_processed(rc, node);
4430 if (first_cow && level > 0)
4431 rc->nodes_relocated += buf->len;
4434 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4435 ret = replace_file_extents(trans, rc, root, cow);
4440 * called before creating snapshot. it calculates metadata reservation
4441 * required for relocating tree blocks in the snapshot
4443 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4444 u64 *bytes_to_reserve)
4446 struct btrfs_root *root = pending->root;
4447 struct reloc_control *rc = root->fs_info->reloc_ctl;
4449 if (!rc || !have_reloc_root(root))
4452 if (!rc->merge_reloc_tree)
4455 root = root->reloc_root;
4456 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4458 * relocation is in the stage of merging trees. the space
4459 * used by merging a reloc tree is twice the size of
4460 * relocated tree nodes in the worst case. half for cowing
4461 * the reloc tree, half for cowing the fs tree. the space
4462 * used by cowing the reloc tree will be freed after the
4463 * tree is dropped. if we create snapshot, cowing the fs
4464 * tree may use more space than it frees. so we need
4465 * reserve extra space.
4467 *bytes_to_reserve += rc->nodes_relocated;
4471 * called after snapshot is created. migrate block reservation
4472 * and create reloc root for the newly created snapshot
4474 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4475 * references held on the reloc_root, one for root->reloc_root and one for
4478 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4479 struct btrfs_pending_snapshot *pending)
4481 struct btrfs_root *root = pending->root;
4482 struct btrfs_root *reloc_root;
4483 struct btrfs_root *new_root;
4484 struct reloc_control *rc = root->fs_info->reloc_ctl;
4487 if (!rc || !have_reloc_root(root))
4490 rc = root->fs_info->reloc_ctl;
4491 rc->merging_rsv_size += rc->nodes_relocated;
4493 if (rc->merge_reloc_tree) {
4494 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4496 rc->nodes_relocated, true);
4501 new_root = pending->snap;
4502 reloc_root = create_reloc_root(trans, root->reloc_root,
4503 new_root->root_key.objectid);
4504 if (IS_ERR(reloc_root))
4505 return PTR_ERR(reloc_root);
4507 ret = __add_reloc_root(reloc_root);
4508 ASSERT(ret != -EEXIST);
4510 /* Pairs with create_reloc_root */
4511 btrfs_put_root(reloc_root);
4514 new_root->reloc_root = btrfs_grab_root(reloc_root);
4516 if (rc->create_reloc_tree)
4517 ret = clone_backref_node(trans, rc, root, reloc_root);