1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
15 #include "transaction.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "inode-map.h"
23 #include "print-tree.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
31 * [What does relocation do]
33 * The objective of relocation is to relocate all extents of the target block
34 * group to other block groups.
35 * This is utilized by resize (shrink only), profile converting, compacting
36 * space, or balance routine to spread chunks over devices.
39 * ------------------------------------------------------------------
40 * BG A: 10 data extents | BG A: deleted
41 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
42 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
44 * [How does relocation work]
46 * 1. Mark the target block group read-only
47 * New extents won't be allocated from the target block group.
49 * 2.1 Record each extent in the target block group
50 * To build a proper map of extents to be relocated.
52 * 2.2 Build data reloc tree and reloc trees
53 * Data reloc tree will contain an inode, recording all newly relocated
55 * There will be only one data reloc tree for one data block group.
57 * Reloc tree will be a special snapshot of its source tree, containing
58 * relocated tree blocks.
59 * Each tree referring to a tree block in target block group will get its
62 * 2.3 Swap source tree with its corresponding reloc tree
63 * Each involved tree only refers to new extents after swap.
65 * 3. Cleanup reloc trees and data reloc tree.
66 * As old extents in the target block group are still referenced by reloc
67 * trees, we need to clean them up before really freeing the target block
70 * The main complexity is in steps 2.2 and 2.3.
72 * The entry point of relocation is relocate_block_group() function.
76 * backref_node, mapping_node and tree_block start with this
79 struct rb_node rb_node;
84 * present a tree block in the backref cache
87 struct rb_node rb_node;
91 /* objectid of tree block owner, can be not uptodate */
93 /* link to pending, changed or detached list */
94 struct list_head list;
95 /* list of upper level blocks reference this block */
96 struct list_head upper;
97 /* list of child blocks in the cache */
98 struct list_head lower;
99 /* NULL if this node is not tree root */
100 struct btrfs_root *root;
101 /* extent buffer got by COW the block */
102 struct extent_buffer *eb;
103 /* level of tree block */
104 unsigned int level:8;
105 /* is the block in non-reference counted tree */
106 unsigned int cowonly:1;
107 /* 1 if no child node in the cache */
108 unsigned int lowest:1;
109 /* is the extent buffer locked */
110 unsigned int locked:1;
111 /* has the block been processed */
112 unsigned int processed:1;
113 /* have backrefs of this block been checked */
114 unsigned int checked:1;
116 * 1 if corresponding block has been cowed but some upper
117 * level block pointers may not point to the new location
119 unsigned int pending:1;
121 * 1 if the backref node isn't connected to any other
124 unsigned int detached:1;
128 * present a block pointer in the backref cache
130 struct backref_edge {
131 struct list_head list[2];
132 struct backref_node *node[2];
137 #define RELOCATION_RESERVED_NODES 256
139 struct backref_cache {
140 /* red black tree of all backref nodes in the cache */
141 struct rb_root rb_root;
142 /* for passing backref nodes to btrfs_reloc_cow_block */
143 struct backref_node *path[BTRFS_MAX_LEVEL];
145 * list of blocks that have been cowed but some block
146 * pointers in upper level blocks may not reflect the
149 struct list_head pending[BTRFS_MAX_LEVEL];
150 /* list of backref nodes with no child node */
151 struct list_head leaves;
152 /* list of blocks that have been cowed in current transaction */
153 struct list_head changed;
154 /* list of detached backref node. */
155 struct list_head detached;
164 * map address of tree root to tree
166 struct mapping_node {
167 struct rb_node rb_node;
172 struct mapping_tree {
173 struct rb_root rb_root;
178 * present a tree block to process
181 struct rb_node rb_node;
183 struct btrfs_key key;
184 unsigned int level:8;
185 unsigned int key_ready:1;
188 #define MAX_EXTENTS 128
190 struct file_extent_cluster {
193 u64 boundary[MAX_EXTENTS];
197 struct reloc_control {
198 /* block group to relocate */
199 struct btrfs_block_group *block_group;
201 struct btrfs_root *extent_root;
202 /* inode for moving data */
203 struct inode *data_inode;
205 struct btrfs_block_rsv *block_rsv;
207 struct backref_cache backref_cache;
209 struct file_extent_cluster cluster;
210 /* tree blocks have been processed */
211 struct extent_io_tree processed_blocks;
212 /* map start of tree root to corresponding reloc tree */
213 struct mapping_tree reloc_root_tree;
214 /* list of reloc trees */
215 struct list_head reloc_roots;
216 /* list of subvolume trees that get relocated */
217 struct list_head dirty_subvol_roots;
218 /* size of metadata reservation for merging reloc trees */
219 u64 merging_rsv_size;
220 /* size of relocated tree nodes */
222 /* reserved size for block group relocation*/
228 unsigned int stage:8;
229 unsigned int create_reloc_tree:1;
230 unsigned int merge_reloc_tree:1;
231 unsigned int found_file_extent:1;
234 /* stages of data relocation */
235 #define MOVE_DATA_EXTENTS 0
236 #define UPDATE_DATA_PTRS 1
238 static void remove_backref_node(struct backref_cache *cache,
239 struct backref_node *node);
240 static void __mark_block_processed(struct reloc_control *rc,
241 struct backref_node *node);
243 static void mapping_tree_init(struct mapping_tree *tree)
245 tree->rb_root = RB_ROOT;
246 spin_lock_init(&tree->lock);
249 static void backref_cache_init(struct backref_cache *cache)
252 cache->rb_root = RB_ROOT;
253 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
254 INIT_LIST_HEAD(&cache->pending[i]);
255 INIT_LIST_HEAD(&cache->changed);
256 INIT_LIST_HEAD(&cache->detached);
257 INIT_LIST_HEAD(&cache->leaves);
260 static void backref_cache_cleanup(struct backref_cache *cache)
262 struct backref_node *node;
265 while (!list_empty(&cache->detached)) {
266 node = list_entry(cache->detached.next,
267 struct backref_node, list);
268 remove_backref_node(cache, node);
271 while (!list_empty(&cache->leaves)) {
272 node = list_entry(cache->leaves.next,
273 struct backref_node, lower);
274 remove_backref_node(cache, node);
277 cache->last_trans = 0;
279 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
280 ASSERT(list_empty(&cache->pending[i]));
281 ASSERT(list_empty(&cache->changed));
282 ASSERT(list_empty(&cache->detached));
283 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
284 ASSERT(!cache->nr_nodes);
285 ASSERT(!cache->nr_edges);
288 static struct backref_node *alloc_backref_node(struct backref_cache *cache)
290 struct backref_node *node;
292 node = kzalloc(sizeof(*node), GFP_NOFS);
294 INIT_LIST_HEAD(&node->list);
295 INIT_LIST_HEAD(&node->upper);
296 INIT_LIST_HEAD(&node->lower);
297 RB_CLEAR_NODE(&node->rb_node);
303 static void free_backref_node(struct backref_cache *cache,
304 struct backref_node *node)
308 btrfs_put_root(node->root);
313 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
315 struct backref_edge *edge;
317 edge = kzalloc(sizeof(*edge), GFP_NOFS);
323 static void free_backref_edge(struct backref_cache *cache,
324 struct backref_edge *edge)
332 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
333 struct rb_node *node)
335 struct rb_node **p = &root->rb_node;
336 struct rb_node *parent = NULL;
337 struct tree_entry *entry;
341 entry = rb_entry(parent, struct tree_entry, rb_node);
343 if (bytenr < entry->bytenr)
345 else if (bytenr > entry->bytenr)
351 rb_link_node(node, parent, p);
352 rb_insert_color(node, root);
356 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
358 struct rb_node *n = root->rb_node;
359 struct tree_entry *entry;
362 entry = rb_entry(n, struct tree_entry, rb_node);
364 if (bytenr < entry->bytenr)
366 else if (bytenr > entry->bytenr)
374 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
377 struct btrfs_fs_info *fs_info = NULL;
378 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
381 fs_info = bnode->root->fs_info;
382 btrfs_panic(fs_info, errno,
383 "Inconsistency in backref cache found at offset %llu",
388 * walk up backref nodes until reach node presents tree root
390 static struct backref_node *walk_up_backref(struct backref_node *node,
391 struct backref_edge *edges[],
394 struct backref_edge *edge;
397 while (!list_empty(&node->upper)) {
398 edge = list_entry(node->upper.next,
399 struct backref_edge, list[LOWER]);
401 node = edge->node[UPPER];
403 BUG_ON(node->detached);
409 * walk down backref nodes to find start of next reference path
411 static struct backref_node *walk_down_backref(struct backref_edge *edges[],
414 struct backref_edge *edge;
415 struct backref_node *lower;
419 edge = edges[idx - 1];
420 lower = edge->node[LOWER];
421 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
425 edge = list_entry(edge->list[LOWER].next,
426 struct backref_edge, list[LOWER]);
427 edges[idx - 1] = edge;
429 return edge->node[UPPER];
435 static void unlock_node_buffer(struct backref_node *node)
438 btrfs_tree_unlock(node->eb);
443 static void drop_node_buffer(struct backref_node *node)
446 unlock_node_buffer(node);
447 free_extent_buffer(node->eb);
452 static void drop_backref_node(struct backref_cache *tree,
453 struct backref_node *node)
455 BUG_ON(!list_empty(&node->upper));
457 drop_node_buffer(node);
458 list_del(&node->list);
459 list_del(&node->lower);
460 if (!RB_EMPTY_NODE(&node->rb_node))
461 rb_erase(&node->rb_node, &tree->rb_root);
462 free_backref_node(tree, node);
466 * remove a backref node from the backref cache
468 static void remove_backref_node(struct backref_cache *cache,
469 struct backref_node *node)
471 struct backref_node *upper;
472 struct backref_edge *edge;
477 BUG_ON(!node->lowest && !node->detached);
478 while (!list_empty(&node->upper)) {
479 edge = list_entry(node->upper.next, struct backref_edge,
481 upper = edge->node[UPPER];
482 list_del(&edge->list[LOWER]);
483 list_del(&edge->list[UPPER]);
484 free_backref_edge(cache, edge);
486 if (RB_EMPTY_NODE(&upper->rb_node)) {
487 BUG_ON(!list_empty(&node->upper));
488 drop_backref_node(cache, node);
494 * add the node to leaf node list if no other
495 * child block cached.
497 if (list_empty(&upper->lower)) {
498 list_add_tail(&upper->lower, &cache->leaves);
503 drop_backref_node(cache, node);
506 static void update_backref_node(struct backref_cache *cache,
507 struct backref_node *node, u64 bytenr)
509 struct rb_node *rb_node;
510 rb_erase(&node->rb_node, &cache->rb_root);
511 node->bytenr = bytenr;
512 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
514 backref_tree_panic(rb_node, -EEXIST, bytenr);
518 * update backref cache after a transaction commit
520 static int update_backref_cache(struct btrfs_trans_handle *trans,
521 struct backref_cache *cache)
523 struct backref_node *node;
526 if (cache->last_trans == 0) {
527 cache->last_trans = trans->transid;
531 if (cache->last_trans == trans->transid)
535 * detached nodes are used to avoid unnecessary backref
536 * lookup. transaction commit changes the extent tree.
537 * so the detached nodes are no longer useful.
539 while (!list_empty(&cache->detached)) {
540 node = list_entry(cache->detached.next,
541 struct backref_node, list);
542 remove_backref_node(cache, node);
545 while (!list_empty(&cache->changed)) {
546 node = list_entry(cache->changed.next,
547 struct backref_node, list);
548 list_del_init(&node->list);
549 BUG_ON(node->pending);
550 update_backref_node(cache, node, node->new_bytenr);
554 * some nodes can be left in the pending list if there were
555 * errors during processing the pending nodes.
557 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
558 list_for_each_entry(node, &cache->pending[level], list) {
559 BUG_ON(!node->pending);
560 if (node->bytenr == node->new_bytenr)
562 update_backref_node(cache, node, node->new_bytenr);
566 cache->last_trans = 0;
570 static bool reloc_root_is_dead(struct btrfs_root *root)
573 * Pair with set_bit/clear_bit in clean_dirty_subvols and
574 * btrfs_update_reloc_root. We need to see the updated bit before
575 * trying to access reloc_root
578 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
584 * Check if this subvolume tree has valid reloc tree.
586 * Reloc tree after swap is considered dead, thus not considered as valid.
587 * This is enough for most callers, as they don't distinguish dead reloc root
588 * from no reloc root. But should_ignore_root() below is a special case.
590 static bool have_reloc_root(struct btrfs_root *root)
592 if (reloc_root_is_dead(root))
594 if (!root->reloc_root)
599 static int should_ignore_root(struct btrfs_root *root)
601 struct btrfs_root *reloc_root;
603 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
606 /* This root has been merged with its reloc tree, we can ignore it */
607 if (reloc_root_is_dead(root))
610 reloc_root = root->reloc_root;
614 if (btrfs_header_generation(reloc_root->commit_root) ==
615 root->fs_info->running_transaction->transid)
618 * if there is reloc tree and it was created in previous
619 * transaction backref lookup can find the reloc tree,
620 * so backref node for the fs tree root is useless for
626 * find reloc tree by address of tree root
628 static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
631 struct rb_node *rb_node;
632 struct mapping_node *node;
633 struct btrfs_root *root = NULL;
635 spin_lock(&rc->reloc_root_tree.lock);
636 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
638 node = rb_entry(rb_node, struct mapping_node, rb_node);
639 root = (struct btrfs_root *)node->data;
641 spin_unlock(&rc->reloc_root_tree.lock);
642 return btrfs_grab_root(root);
645 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
648 struct btrfs_key key;
650 key.objectid = root_objectid;
651 key.type = BTRFS_ROOT_ITEM_KEY;
652 key.offset = (u64)-1;
654 return btrfs_get_fs_root(fs_info, &key, false);
657 static noinline_for_stack
658 int find_inline_backref(struct extent_buffer *leaf, int slot,
659 unsigned long *ptr, unsigned long *end)
661 struct btrfs_key key;
662 struct btrfs_extent_item *ei;
663 struct btrfs_tree_block_info *bi;
666 btrfs_item_key_to_cpu(leaf, &key, slot);
668 item_size = btrfs_item_size_nr(leaf, slot);
669 if (item_size < sizeof(*ei)) {
670 btrfs_print_v0_err(leaf->fs_info);
671 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
674 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
675 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
676 BTRFS_EXTENT_FLAG_TREE_BLOCK));
678 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
679 item_size <= sizeof(*ei) + sizeof(*bi)) {
680 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
683 if (key.type == BTRFS_METADATA_ITEM_KEY &&
684 item_size <= sizeof(*ei)) {
685 WARN_ON(item_size < sizeof(*ei));
689 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
690 bi = (struct btrfs_tree_block_info *)(ei + 1);
691 *ptr = (unsigned long)(bi + 1);
693 *ptr = (unsigned long)(ei + 1);
695 *end = (unsigned long)ei + item_size;
700 * build backref tree for a given tree block. root of the backref tree
701 * corresponds the tree block, leaves of the backref tree correspond
702 * roots of b-trees that reference the tree block.
704 * the basic idea of this function is check backrefs of a given block
705 * to find upper level blocks that reference the block, and then check
706 * backrefs of these upper level blocks recursively. the recursion stop
707 * when tree root is reached or backrefs for the block is cached.
709 * NOTE: if we find backrefs for a block are cached, we know backrefs
710 * for all upper level blocks that directly/indirectly reference the
711 * block are also cached.
713 static noinline_for_stack
714 struct backref_node *build_backref_tree(struct reloc_control *rc,
715 struct btrfs_key *node_key,
716 int level, u64 bytenr)
718 struct backref_cache *cache = &rc->backref_cache;
719 struct btrfs_path *path1; /* For searching extent root */
720 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
721 struct extent_buffer *eb;
722 struct btrfs_root *root;
723 struct backref_node *cur;
724 struct backref_node *upper;
725 struct backref_node *lower;
726 struct backref_node *node = NULL;
727 struct backref_node *exist = NULL;
728 struct backref_edge *edge;
729 struct rb_node *rb_node;
730 struct btrfs_key key;
733 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
738 bool need_check = true;
740 path1 = btrfs_alloc_path();
741 path2 = btrfs_alloc_path();
742 if (!path1 || !path2) {
747 node = alloc_backref_node(cache);
753 node->bytenr = bytenr;
760 key.objectid = cur->bytenr;
761 key.type = BTRFS_METADATA_ITEM_KEY;
762 key.offset = (u64)-1;
764 path1->search_commit_root = 1;
765 path1->skip_locking = 1;
766 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
773 ASSERT(path1->slots[0]);
777 WARN_ON(cur->checked);
778 if (!list_empty(&cur->upper)) {
780 * the backref was added previously when processing
781 * backref of type BTRFS_TREE_BLOCK_REF_KEY
783 ASSERT(list_is_singular(&cur->upper));
784 edge = list_entry(cur->upper.next, struct backref_edge,
786 ASSERT(list_empty(&edge->list[UPPER]));
787 exist = edge->node[UPPER];
789 * add the upper level block to pending list if we need
793 list_add_tail(&edge->list[UPPER], &list);
800 eb = path1->nodes[0];
803 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
804 ret = btrfs_next_leaf(rc->extent_root, path1);
811 eb = path1->nodes[0];
814 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
815 if (key.objectid != cur->bytenr) {
820 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
821 key.type == BTRFS_METADATA_ITEM_KEY) {
822 ret = find_inline_backref(eb, path1->slots[0],
830 /* update key for inline back ref */
831 struct btrfs_extent_inline_ref *iref;
833 iref = (struct btrfs_extent_inline_ref *)ptr;
834 type = btrfs_get_extent_inline_ref_type(eb, iref,
835 BTRFS_REF_TYPE_BLOCK);
836 if (type == BTRFS_REF_TYPE_INVALID) {
841 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
843 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
844 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
848 * Parent node found and matches current inline ref, no need to
849 * rebuild this node for this inline ref.
852 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
853 exist->owner == key.offset) ||
854 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
855 exist->bytenr == key.offset))) {
860 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
861 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
862 if (key.objectid == key.offset) {
864 * Only root blocks of reloc trees use backref
865 * pointing to itself.
867 root = find_reloc_root(rc, cur->bytenr);
873 edge = alloc_backref_edge(cache);
878 rb_node = tree_search(&cache->rb_root, key.offset);
880 upper = alloc_backref_node(cache);
882 free_backref_edge(cache, edge);
886 upper->bytenr = key.offset;
887 upper->level = cur->level + 1;
889 * backrefs for the upper level block isn't
890 * cached, add the block to pending list
892 list_add_tail(&edge->list[UPPER], &list);
894 upper = rb_entry(rb_node, struct backref_node,
896 ASSERT(upper->checked);
897 INIT_LIST_HEAD(&edge->list[UPPER]);
899 list_add_tail(&edge->list[LOWER], &cur->upper);
900 edge->node[LOWER] = cur;
901 edge->node[UPPER] = upper;
904 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
906 btrfs_print_v0_err(rc->extent_root->fs_info);
907 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
910 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
915 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
916 * means the root objectid. We need to search the tree to get
919 root = read_fs_root(rc->extent_root->fs_info, key.offset);
925 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
928 if (btrfs_root_level(&root->root_item) == cur->level) {
930 ASSERT(btrfs_root_bytenr(&root->root_item) ==
932 if (should_ignore_root(root)) {
933 btrfs_put_root(root);
934 list_add(&cur->list, &useless);
941 level = cur->level + 1;
943 /* Search the tree to find parent blocks referring the block. */
944 path2->search_commit_root = 1;
945 path2->skip_locking = 1;
946 path2->lowest_level = level;
947 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
948 path2->lowest_level = 0;
950 btrfs_put_root(root);
954 if (ret > 0 && path2->slots[level] > 0)
955 path2->slots[level]--;
957 eb = path2->nodes[level];
958 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
960 btrfs_err(root->fs_info,
961 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
962 cur->bytenr, level - 1,
963 root->root_key.objectid,
964 node_key->objectid, node_key->type,
966 btrfs_put_root(root);
973 /* Add all nodes and edges in the path */
974 for (; level < BTRFS_MAX_LEVEL; level++) {
975 if (!path2->nodes[level]) {
976 ASSERT(btrfs_root_bytenr(&root->root_item) ==
978 if (should_ignore_root(root)) {
979 btrfs_put_root(root);
980 list_add(&lower->list, &useless);
987 edge = alloc_backref_edge(cache);
989 btrfs_put_root(root);
994 eb = path2->nodes[level];
995 rb_node = tree_search(&cache->rb_root, eb->start);
997 upper = alloc_backref_node(cache);
999 btrfs_put_root(root);
1000 free_backref_edge(cache, edge);
1004 upper->bytenr = eb->start;
1005 upper->owner = btrfs_header_owner(eb);
1006 upper->level = lower->level + 1;
1007 if (!test_bit(BTRFS_ROOT_REF_COWS,
1012 * if we know the block isn't shared
1013 * we can void checking its backrefs.
1015 if (btrfs_block_can_be_shared(root, eb))
1021 * add the block to pending list if we
1022 * need check its backrefs, we only do this once
1023 * while walking up a tree as we will catch
1024 * anything else later on.
1026 if (!upper->checked && need_check) {
1028 list_add_tail(&edge->list[UPPER],
1033 INIT_LIST_HEAD(&edge->list[UPPER]);
1036 upper = rb_entry(rb_node, struct backref_node,
1038 ASSERT(upper->checked);
1039 INIT_LIST_HEAD(&edge->list[UPPER]);
1041 upper->owner = btrfs_header_owner(eb);
1043 list_add_tail(&edge->list[LOWER], &lower->upper);
1044 edge->node[LOWER] = lower;
1045 edge->node[UPPER] = upper;
1048 btrfs_put_root(root);
1054 btrfs_release_path(path2);
1057 ptr += btrfs_extent_inline_ref_size(key.type);
1067 btrfs_release_path(path1);
1072 /* the pending list isn't empty, take the first block to process */
1073 if (!list_empty(&list)) {
1074 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1075 list_del_init(&edge->list[UPPER]);
1076 cur = edge->node[UPPER];
1081 * everything goes well, connect backref nodes and insert backref nodes
1084 ASSERT(node->checked);
1085 cowonly = node->cowonly;
1087 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1090 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1091 list_add_tail(&node->lower, &cache->leaves);
1094 list_for_each_entry(edge, &node->upper, list[LOWER])
1095 list_add_tail(&edge->list[UPPER], &list);
1097 while (!list_empty(&list)) {
1098 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1099 list_del_init(&edge->list[UPPER]);
1100 upper = edge->node[UPPER];
1101 if (upper->detached) {
1102 list_del(&edge->list[LOWER]);
1103 lower = edge->node[LOWER];
1104 free_backref_edge(cache, edge);
1105 if (list_empty(&lower->upper))
1106 list_add(&lower->list, &useless);
1110 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1111 if (upper->lowest) {
1112 list_del_init(&upper->lower);
1116 list_add_tail(&edge->list[UPPER], &upper->lower);
1120 if (!upper->checked) {
1122 * Still want to blow up for developers since this is a
1129 if (cowonly != upper->cowonly) {
1136 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1139 backref_tree_panic(rb_node, -EEXIST,
1143 list_add_tail(&edge->list[UPPER], &upper->lower);
1145 list_for_each_entry(edge, &upper->upper, list[LOWER])
1146 list_add_tail(&edge->list[UPPER], &list);
1149 * process useless backref nodes. backref nodes for tree leaves
1150 * are deleted from the cache. backref nodes for upper level
1151 * tree blocks are left in the cache to avoid unnecessary backref
1154 while (!list_empty(&useless)) {
1155 upper = list_entry(useless.next, struct backref_node, list);
1156 list_del_init(&upper->list);
1157 ASSERT(list_empty(&upper->upper));
1160 if (upper->lowest) {
1161 list_del_init(&upper->lower);
1164 while (!list_empty(&upper->lower)) {
1165 edge = list_entry(upper->lower.next,
1166 struct backref_edge, list[UPPER]);
1167 list_del(&edge->list[UPPER]);
1168 list_del(&edge->list[LOWER]);
1169 lower = edge->node[LOWER];
1170 free_backref_edge(cache, edge);
1172 if (list_empty(&lower->upper))
1173 list_add(&lower->list, &useless);
1175 __mark_block_processed(rc, upper);
1176 if (upper->level > 0) {
1177 list_add(&upper->list, &cache->detached);
1178 upper->detached = 1;
1180 rb_erase(&upper->rb_node, &cache->rb_root);
1181 free_backref_node(cache, upper);
1185 btrfs_free_path(path1);
1186 btrfs_free_path(path2);
1188 while (!list_empty(&useless)) {
1189 lower = list_entry(useless.next,
1190 struct backref_node, list);
1191 list_del_init(&lower->list);
1193 while (!list_empty(&list)) {
1194 edge = list_first_entry(&list, struct backref_edge,
1196 list_del(&edge->list[UPPER]);
1197 list_del(&edge->list[LOWER]);
1198 lower = edge->node[LOWER];
1199 upper = edge->node[UPPER];
1200 free_backref_edge(cache, edge);
1203 * Lower is no longer linked to any upper backref nodes
1204 * and isn't in the cache, we can free it ourselves.
1206 if (list_empty(&lower->upper) &&
1207 RB_EMPTY_NODE(&lower->rb_node))
1208 list_add(&lower->list, &useless);
1210 if (!RB_EMPTY_NODE(&upper->rb_node))
1213 /* Add this guy's upper edges to the list to process */
1214 list_for_each_entry(edge, &upper->upper, list[LOWER])
1215 list_add_tail(&edge->list[UPPER], &list);
1216 if (list_empty(&upper->upper))
1217 list_add(&upper->list, &useless);
1220 while (!list_empty(&useless)) {
1221 lower = list_entry(useless.next,
1222 struct backref_node, list);
1223 list_del_init(&lower->list);
1226 free_backref_node(cache, lower);
1229 remove_backref_node(cache, node);
1230 return ERR_PTR(err);
1232 ASSERT(!node || !node->detached);
1237 * helper to add backref node for the newly created snapshot.
1238 * the backref node is created by cloning backref node that
1239 * corresponds to root of source tree
1241 static int clone_backref_node(struct btrfs_trans_handle *trans,
1242 struct reloc_control *rc,
1243 struct btrfs_root *src,
1244 struct btrfs_root *dest)
1246 struct btrfs_root *reloc_root = src->reloc_root;
1247 struct backref_cache *cache = &rc->backref_cache;
1248 struct backref_node *node = NULL;
1249 struct backref_node *new_node;
1250 struct backref_edge *edge;
1251 struct backref_edge *new_edge;
1252 struct rb_node *rb_node;
1254 if (cache->last_trans > 0)
1255 update_backref_cache(trans, cache);
1257 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1259 node = rb_entry(rb_node, struct backref_node, rb_node);
1263 BUG_ON(node->new_bytenr != reloc_root->node->start);
1267 rb_node = tree_search(&cache->rb_root,
1268 reloc_root->commit_root->start);
1270 node = rb_entry(rb_node, struct backref_node,
1272 BUG_ON(node->detached);
1279 new_node = alloc_backref_node(cache);
1283 new_node->bytenr = dest->node->start;
1284 new_node->level = node->level;
1285 new_node->lowest = node->lowest;
1286 new_node->checked = 1;
1287 new_node->root = btrfs_grab_root(dest);
1288 ASSERT(new_node->root);
1290 if (!node->lowest) {
1291 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1292 new_edge = alloc_backref_edge(cache);
1296 new_edge->node[UPPER] = new_node;
1297 new_edge->node[LOWER] = edge->node[LOWER];
1298 list_add_tail(&new_edge->list[UPPER],
1302 list_add_tail(&new_node->lower, &cache->leaves);
1305 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1306 &new_node->rb_node);
1308 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1310 if (!new_node->lowest) {
1311 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1312 list_add_tail(&new_edge->list[LOWER],
1313 &new_edge->node[LOWER]->upper);
1318 while (!list_empty(&new_node->lower)) {
1319 new_edge = list_entry(new_node->lower.next,
1320 struct backref_edge, list[UPPER]);
1321 list_del(&new_edge->list[UPPER]);
1322 free_backref_edge(cache, new_edge);
1324 free_backref_node(cache, new_node);
1329 * helper to add 'address of tree root -> reloc tree' mapping
1331 static int __must_check __add_reloc_root(struct btrfs_root *root)
1333 struct btrfs_fs_info *fs_info = root->fs_info;
1334 struct rb_node *rb_node;
1335 struct mapping_node *node;
1336 struct reloc_control *rc = fs_info->reloc_ctl;
1338 node = kmalloc(sizeof(*node), GFP_NOFS);
1342 node->bytenr = root->commit_root->start;
1345 spin_lock(&rc->reloc_root_tree.lock);
1346 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1347 node->bytenr, &node->rb_node);
1348 spin_unlock(&rc->reloc_root_tree.lock);
1350 btrfs_panic(fs_info, -EEXIST,
1351 "Duplicate root found for start=%llu while inserting into relocation tree",
1355 list_add_tail(&root->root_list, &rc->reloc_roots);
1360 * helper to delete the 'address of tree root -> reloc tree'
1363 static void __del_reloc_root(struct btrfs_root *root)
1365 struct btrfs_fs_info *fs_info = root->fs_info;
1366 struct rb_node *rb_node;
1367 struct mapping_node *node = NULL;
1368 struct reloc_control *rc = fs_info->reloc_ctl;
1369 bool put_ref = false;
1371 if (rc && root->node) {
1372 spin_lock(&rc->reloc_root_tree.lock);
1373 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1374 root->commit_root->start);
1376 node = rb_entry(rb_node, struct mapping_node, rb_node);
1377 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1378 RB_CLEAR_NODE(&node->rb_node);
1380 spin_unlock(&rc->reloc_root_tree.lock);
1383 BUG_ON((struct btrfs_root *)node->data != root);
1387 * We only put the reloc root here if it's on the list. There's a lot
1388 * of places where the pattern is to splice the rc->reloc_roots, process
1389 * the reloc roots, and then add the reloc root back onto
1390 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
1391 * list we don't want the reference being dropped, because the guy
1392 * messing with the list is in charge of the reference.
1394 spin_lock(&fs_info->trans_lock);
1395 if (!list_empty(&root->root_list)) {
1397 list_del_init(&root->root_list);
1399 spin_unlock(&fs_info->trans_lock);
1401 btrfs_put_root(root);
1406 * helper to update the 'address of tree root -> reloc tree'
1409 static int __update_reloc_root(struct btrfs_root *root)
1411 struct btrfs_fs_info *fs_info = root->fs_info;
1412 struct rb_node *rb_node;
1413 struct mapping_node *node = NULL;
1414 struct reloc_control *rc = fs_info->reloc_ctl;
1416 spin_lock(&rc->reloc_root_tree.lock);
1417 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1418 root->commit_root->start);
1420 node = rb_entry(rb_node, struct mapping_node, rb_node);
1421 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1423 spin_unlock(&rc->reloc_root_tree.lock);
1427 BUG_ON((struct btrfs_root *)node->data != root);
1429 spin_lock(&rc->reloc_root_tree.lock);
1430 node->bytenr = root->node->start;
1431 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1432 node->bytenr, &node->rb_node);
1433 spin_unlock(&rc->reloc_root_tree.lock);
1435 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1439 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1440 struct btrfs_root *root, u64 objectid)
1442 struct btrfs_fs_info *fs_info = root->fs_info;
1443 struct btrfs_root *reloc_root;
1444 struct extent_buffer *eb;
1445 struct btrfs_root_item *root_item;
1446 struct btrfs_key root_key;
1449 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1452 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1453 root_key.type = BTRFS_ROOT_ITEM_KEY;
1454 root_key.offset = objectid;
1456 if (root->root_key.objectid == objectid) {
1457 u64 commit_root_gen;
1459 /* called by btrfs_init_reloc_root */
1460 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1461 BTRFS_TREE_RELOC_OBJECTID);
1464 * Set the last_snapshot field to the generation of the commit
1465 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1466 * correctly (returns true) when the relocation root is created
1467 * either inside the critical section of a transaction commit
1468 * (through transaction.c:qgroup_account_snapshot()) and when
1469 * it's created before the transaction commit is started.
1471 commit_root_gen = btrfs_header_generation(root->commit_root);
1472 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1475 * called by btrfs_reloc_post_snapshot_hook.
1476 * the source tree is a reloc tree, all tree blocks
1477 * modified after it was created have RELOC flag
1478 * set in their headers. so it's OK to not update
1479 * the 'last_snapshot'.
1481 ret = btrfs_copy_root(trans, root, root->node, &eb,
1482 BTRFS_TREE_RELOC_OBJECTID);
1486 memcpy(root_item, &root->root_item, sizeof(*root_item));
1487 btrfs_set_root_bytenr(root_item, eb->start);
1488 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1489 btrfs_set_root_generation(root_item, trans->transid);
1491 if (root->root_key.objectid == objectid) {
1492 btrfs_set_root_refs(root_item, 0);
1493 memset(&root_item->drop_progress, 0,
1494 sizeof(struct btrfs_disk_key));
1495 root_item->drop_level = 0;
1498 btrfs_tree_unlock(eb);
1499 free_extent_buffer(eb);
1501 ret = btrfs_insert_root(trans, fs_info->tree_root,
1502 &root_key, root_item);
1506 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
1507 BUG_ON(IS_ERR(reloc_root));
1508 set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
1509 reloc_root->last_trans = trans->transid;
1514 * create reloc tree for a given fs tree. reloc tree is just a
1515 * snapshot of the fs tree with special root objectid.
1517 * The reloc_root comes out of here with two references, one for
1518 * root->reloc_root, and another for being on the rc->reloc_roots list.
1520 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1521 struct btrfs_root *root)
1523 struct btrfs_fs_info *fs_info = root->fs_info;
1524 struct btrfs_root *reloc_root;
1525 struct reloc_control *rc = fs_info->reloc_ctl;
1526 struct btrfs_block_rsv *rsv;
1534 * The subvolume has reloc tree but the swap is finished, no need to
1535 * create/update the dead reloc tree
1537 if (reloc_root_is_dead(root))
1541 * This is subtle but important. We do not do
1542 * record_root_in_transaction for reloc roots, instead we record their
1543 * corresponding fs root, and then here we update the last trans for the
1544 * reloc root. This means that we have to do this for the entire life
1545 * of the reloc root, regardless of which stage of the relocation we are
1548 if (root->reloc_root) {
1549 reloc_root = root->reloc_root;
1550 reloc_root->last_trans = trans->transid;
1555 * We are merging reloc roots, we do not need new reloc trees. Also
1556 * reloc trees never need their own reloc tree.
1558 if (!rc->create_reloc_tree ||
1559 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1562 if (!trans->reloc_reserved) {
1563 rsv = trans->block_rsv;
1564 trans->block_rsv = rc->block_rsv;
1567 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1569 trans->block_rsv = rsv;
1571 ret = __add_reloc_root(reloc_root);
1573 root->reloc_root = btrfs_grab_root(reloc_root);
1578 * update root item of reloc tree
1580 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1581 struct btrfs_root *root)
1583 struct btrfs_fs_info *fs_info = root->fs_info;
1584 struct btrfs_root *reloc_root;
1585 struct btrfs_root_item *root_item;
1588 if (!have_reloc_root(root))
1591 reloc_root = root->reloc_root;
1592 root_item = &reloc_root->root_item;
1595 * We are probably ok here, but __del_reloc_root() will drop its ref of
1596 * the root. We have the ref for root->reloc_root, but just in case
1597 * hold it while we update the reloc root.
1599 btrfs_grab_root(reloc_root);
1601 /* root->reloc_root will stay until current relocation finished */
1602 if (fs_info->reloc_ctl->merge_reloc_tree &&
1603 btrfs_root_refs(root_item) == 0) {
1604 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1606 * Mark the tree as dead before we change reloc_root so
1607 * have_reloc_root will not touch it from now on.
1610 __del_reloc_root(reloc_root);
1613 if (reloc_root->commit_root != reloc_root->node) {
1614 __update_reloc_root(reloc_root);
1615 btrfs_set_root_node(root_item, reloc_root->node);
1616 free_extent_buffer(reloc_root->commit_root);
1617 reloc_root->commit_root = btrfs_root_node(reloc_root);
1620 ret = btrfs_update_root(trans, fs_info->tree_root,
1621 &reloc_root->root_key, root_item);
1623 btrfs_put_root(reloc_root);
1629 * helper to find first cached inode with inode number >= objectid
1632 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1634 struct rb_node *node;
1635 struct rb_node *prev;
1636 struct btrfs_inode *entry;
1637 struct inode *inode;
1639 spin_lock(&root->inode_lock);
1641 node = root->inode_tree.rb_node;
1645 entry = rb_entry(node, struct btrfs_inode, rb_node);
1647 if (objectid < btrfs_ino(entry))
1648 node = node->rb_left;
1649 else if (objectid > btrfs_ino(entry))
1650 node = node->rb_right;
1656 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1657 if (objectid <= btrfs_ino(entry)) {
1661 prev = rb_next(prev);
1665 entry = rb_entry(node, struct btrfs_inode, rb_node);
1666 inode = igrab(&entry->vfs_inode);
1668 spin_unlock(&root->inode_lock);
1672 objectid = btrfs_ino(entry) + 1;
1673 if (cond_resched_lock(&root->inode_lock))
1676 node = rb_next(node);
1678 spin_unlock(&root->inode_lock);
1682 static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
1684 if (bytenr >= block_group->start &&
1685 bytenr < block_group->start + block_group->length)
1691 * get new location of data
1693 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1694 u64 bytenr, u64 num_bytes)
1696 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1697 struct btrfs_path *path;
1698 struct btrfs_file_extent_item *fi;
1699 struct extent_buffer *leaf;
1702 path = btrfs_alloc_path();
1706 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1707 ret = btrfs_lookup_file_extent(NULL, root, path,
1708 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1716 leaf = path->nodes[0];
1717 fi = btrfs_item_ptr(leaf, path->slots[0],
1718 struct btrfs_file_extent_item);
1720 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1721 btrfs_file_extent_compression(leaf, fi) ||
1722 btrfs_file_extent_encryption(leaf, fi) ||
1723 btrfs_file_extent_other_encoding(leaf, fi));
1725 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1730 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1733 btrfs_free_path(path);
1738 * update file extent items in the tree leaf to point to
1739 * the new locations.
1741 static noinline_for_stack
1742 int replace_file_extents(struct btrfs_trans_handle *trans,
1743 struct reloc_control *rc,
1744 struct btrfs_root *root,
1745 struct extent_buffer *leaf)
1747 struct btrfs_fs_info *fs_info = root->fs_info;
1748 struct btrfs_key key;
1749 struct btrfs_file_extent_item *fi;
1750 struct inode *inode = NULL;
1762 if (rc->stage != UPDATE_DATA_PTRS)
1765 /* reloc trees always use full backref */
1766 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1767 parent = leaf->start;
1771 nritems = btrfs_header_nritems(leaf);
1772 for (i = 0; i < nritems; i++) {
1773 struct btrfs_ref ref = { 0 };
1776 btrfs_item_key_to_cpu(leaf, &key, i);
1777 if (key.type != BTRFS_EXTENT_DATA_KEY)
1779 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1780 if (btrfs_file_extent_type(leaf, fi) ==
1781 BTRFS_FILE_EXTENT_INLINE)
1783 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1784 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1787 if (!in_block_group(bytenr, rc->block_group))
1791 * if we are modifying block in fs tree, wait for readpage
1792 * to complete and drop the extent cache
1794 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1796 inode = find_next_inode(root, key.objectid);
1798 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1799 btrfs_add_delayed_iput(inode);
1800 inode = find_next_inode(root, key.objectid);
1802 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1804 btrfs_file_extent_num_bytes(leaf, fi);
1805 WARN_ON(!IS_ALIGNED(key.offset,
1806 fs_info->sectorsize));
1807 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1809 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1814 btrfs_drop_extent_cache(BTRFS_I(inode),
1815 key.offset, end, 1);
1816 unlock_extent(&BTRFS_I(inode)->io_tree,
1821 ret = get_new_location(rc->data_inode, &new_bytenr,
1825 * Don't have to abort since we've not changed anything
1826 * in the file extent yet.
1831 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1834 key.offset -= btrfs_file_extent_offset(leaf, fi);
1835 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1837 ref.real_root = root->root_key.objectid;
1838 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1839 key.objectid, key.offset);
1840 ret = btrfs_inc_extent_ref(trans, &ref);
1842 btrfs_abort_transaction(trans, ret);
1846 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1848 ref.real_root = root->root_key.objectid;
1849 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1850 key.objectid, key.offset);
1851 ret = btrfs_free_extent(trans, &ref);
1853 btrfs_abort_transaction(trans, ret);
1858 btrfs_mark_buffer_dirty(leaf);
1860 btrfs_add_delayed_iput(inode);
1864 static noinline_for_stack
1865 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1866 struct btrfs_path *path, int level)
1868 struct btrfs_disk_key key1;
1869 struct btrfs_disk_key key2;
1870 btrfs_node_key(eb, &key1, slot);
1871 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1872 return memcmp(&key1, &key2, sizeof(key1));
1876 * try to replace tree blocks in fs tree with the new blocks
1877 * in reloc tree. tree blocks haven't been modified since the
1878 * reloc tree was create can be replaced.
1880 * if a block was replaced, level of the block + 1 is returned.
1881 * if no block got replaced, 0 is returned. if there are other
1882 * errors, a negative error number is returned.
1884 static noinline_for_stack
1885 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1886 struct btrfs_root *dest, struct btrfs_root *src,
1887 struct btrfs_path *path, struct btrfs_key *next_key,
1888 int lowest_level, int max_level)
1890 struct btrfs_fs_info *fs_info = dest->fs_info;
1891 struct extent_buffer *eb;
1892 struct extent_buffer *parent;
1893 struct btrfs_ref ref = { 0 };
1894 struct btrfs_key key;
1906 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1907 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1909 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1911 slot = path->slots[lowest_level];
1912 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1914 eb = btrfs_lock_root_node(dest);
1915 btrfs_set_lock_blocking_write(eb);
1916 level = btrfs_header_level(eb);
1918 if (level < lowest_level) {
1919 btrfs_tree_unlock(eb);
1920 free_extent_buffer(eb);
1925 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1928 btrfs_set_lock_blocking_write(eb);
1931 next_key->objectid = (u64)-1;
1932 next_key->type = (u8)-1;
1933 next_key->offset = (u64)-1;
1938 struct btrfs_key first_key;
1940 level = btrfs_header_level(parent);
1941 BUG_ON(level < lowest_level);
1943 ret = btrfs_bin_search(parent, &key, level, &slot);
1946 if (ret && slot > 0)
1949 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1950 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1952 old_bytenr = btrfs_node_blockptr(parent, slot);
1953 blocksize = fs_info->nodesize;
1954 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1955 btrfs_node_key_to_cpu(parent, &first_key, slot);
1957 if (level <= max_level) {
1958 eb = path->nodes[level];
1959 new_bytenr = btrfs_node_blockptr(eb,
1960 path->slots[level]);
1961 new_ptr_gen = btrfs_node_ptr_generation(eb,
1962 path->slots[level]);
1968 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1973 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1974 memcmp_node_keys(parent, slot, path, level)) {
1975 if (level <= lowest_level) {
1980 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1981 level - 1, &first_key);
1985 } else if (!extent_buffer_uptodate(eb)) {
1987 free_extent_buffer(eb);
1990 btrfs_tree_lock(eb);
1992 ret = btrfs_cow_block(trans, dest, eb, parent,
1996 btrfs_set_lock_blocking_write(eb);
1998 btrfs_tree_unlock(parent);
1999 free_extent_buffer(parent);
2006 btrfs_tree_unlock(parent);
2007 free_extent_buffer(parent);
2012 btrfs_node_key_to_cpu(path->nodes[level], &key,
2013 path->slots[level]);
2014 btrfs_release_path(path);
2016 path->lowest_level = level;
2017 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
2018 path->lowest_level = 0;
2022 * Info qgroup to trace both subtrees.
2024 * We must trace both trees.
2025 * 1) Tree reloc subtree
2026 * If not traced, we will leak data numbers
2028 * If not traced, we will double count old data
2030 * We don't scan the subtree right now, but only record
2031 * the swapped tree blocks.
2032 * The real subtree rescan is delayed until we have new
2033 * CoW on the subtree root node before transaction commit.
2035 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
2036 rc->block_group, parent, slot,
2037 path->nodes[level], path->slots[level],
2042 * swap blocks in fs tree and reloc tree.
2044 btrfs_set_node_blockptr(parent, slot, new_bytenr);
2045 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
2046 btrfs_mark_buffer_dirty(parent);
2048 btrfs_set_node_blockptr(path->nodes[level],
2049 path->slots[level], old_bytenr);
2050 btrfs_set_node_ptr_generation(path->nodes[level],
2051 path->slots[level], old_ptr_gen);
2052 btrfs_mark_buffer_dirty(path->nodes[level]);
2054 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
2055 blocksize, path->nodes[level]->start);
2056 ref.skip_qgroup = true;
2057 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
2058 ret = btrfs_inc_extent_ref(trans, &ref);
2060 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
2062 ref.skip_qgroup = true;
2063 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
2064 ret = btrfs_inc_extent_ref(trans, &ref);
2067 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
2068 blocksize, path->nodes[level]->start);
2069 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
2070 ref.skip_qgroup = true;
2071 ret = btrfs_free_extent(trans, &ref);
2074 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
2076 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
2077 ref.skip_qgroup = true;
2078 ret = btrfs_free_extent(trans, &ref);
2081 btrfs_unlock_up_safe(path, 0);
2086 btrfs_tree_unlock(parent);
2087 free_extent_buffer(parent);
2092 * helper to find next relocated block in reloc tree
2094 static noinline_for_stack
2095 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
2098 struct extent_buffer *eb;
2103 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2105 for (i = 0; i < *level; i++) {
2106 free_extent_buffer(path->nodes[i]);
2107 path->nodes[i] = NULL;
2110 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
2111 eb = path->nodes[i];
2112 nritems = btrfs_header_nritems(eb);
2113 while (path->slots[i] + 1 < nritems) {
2115 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
2122 free_extent_buffer(path->nodes[i]);
2123 path->nodes[i] = NULL;
2129 * walk down reloc tree to find relocated block of lowest level
2131 static noinline_for_stack
2132 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
2135 struct btrfs_fs_info *fs_info = root->fs_info;
2136 struct extent_buffer *eb = NULL;
2143 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2145 for (i = *level; i > 0; i--) {
2146 struct btrfs_key first_key;
2148 eb = path->nodes[i];
2149 nritems = btrfs_header_nritems(eb);
2150 while (path->slots[i] < nritems) {
2151 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2152 if (ptr_gen > last_snapshot)
2156 if (path->slots[i] >= nritems) {
2167 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2168 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2169 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2173 } else if (!extent_buffer_uptodate(eb)) {
2174 free_extent_buffer(eb);
2177 BUG_ON(btrfs_header_level(eb) != i - 1);
2178 path->nodes[i - 1] = eb;
2179 path->slots[i - 1] = 0;
2185 * invalidate extent cache for file extents whose key in range of
2186 * [min_key, max_key)
2188 static int invalidate_extent_cache(struct btrfs_root *root,
2189 struct btrfs_key *min_key,
2190 struct btrfs_key *max_key)
2192 struct btrfs_fs_info *fs_info = root->fs_info;
2193 struct inode *inode = NULL;
2198 objectid = min_key->objectid;
2203 if (objectid > max_key->objectid)
2206 inode = find_next_inode(root, objectid);
2209 ino = btrfs_ino(BTRFS_I(inode));
2211 if (ino > max_key->objectid) {
2217 if (!S_ISREG(inode->i_mode))
2220 if (unlikely(min_key->objectid == ino)) {
2221 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2223 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2226 start = min_key->offset;
2227 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2233 if (unlikely(max_key->objectid == ino)) {
2234 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2236 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2239 if (max_key->offset == 0)
2241 end = max_key->offset;
2242 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2249 /* the lock_extent waits for readpage to complete */
2250 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2251 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2252 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2257 static int find_next_key(struct btrfs_path *path, int level,
2258 struct btrfs_key *key)
2261 while (level < BTRFS_MAX_LEVEL) {
2262 if (!path->nodes[level])
2264 if (path->slots[level] + 1 <
2265 btrfs_header_nritems(path->nodes[level])) {
2266 btrfs_node_key_to_cpu(path->nodes[level], key,
2267 path->slots[level] + 1);
2276 * Insert current subvolume into reloc_control::dirty_subvol_roots
2278 static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
2279 struct reloc_control *rc,
2280 struct btrfs_root *root)
2282 struct btrfs_root *reloc_root = root->reloc_root;
2283 struct btrfs_root_item *reloc_root_item;
2285 /* @root must be a subvolume tree root with a valid reloc tree */
2286 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
2289 reloc_root_item = &reloc_root->root_item;
2290 memset(&reloc_root_item->drop_progress, 0,
2291 sizeof(reloc_root_item->drop_progress));
2292 reloc_root_item->drop_level = 0;
2293 btrfs_set_root_refs(reloc_root_item, 0);
2294 btrfs_update_reloc_root(trans, root);
2296 if (list_empty(&root->reloc_dirty_list)) {
2297 btrfs_grab_root(root);
2298 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
2302 static int clean_dirty_subvols(struct reloc_control *rc)
2304 struct btrfs_root *root;
2305 struct btrfs_root *next;
2309 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
2311 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2312 /* Merged subvolume, cleanup its reloc root */
2313 struct btrfs_root *reloc_root = root->reloc_root;
2315 list_del_init(&root->reloc_dirty_list);
2316 root->reloc_root = NULL;
2318 * Need barrier to ensure clear_bit() only happens after
2319 * root->reloc_root = NULL. Pairs with have_reloc_root.
2322 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2325 * btrfs_drop_snapshot drops our ref we hold for
2326 * ->reloc_root. If it fails however we must
2327 * drop the ref ourselves.
2329 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
2331 btrfs_put_root(reloc_root);
2336 btrfs_put_root(root);
2338 /* Orphan reloc tree, just clean it up */
2339 ret2 = btrfs_drop_snapshot(root, 0, 1);
2341 btrfs_put_root(root);
2351 * merge the relocated tree blocks in reloc tree with corresponding
2354 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2355 struct btrfs_root *root)
2357 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2358 struct btrfs_key key;
2359 struct btrfs_key next_key;
2360 struct btrfs_trans_handle *trans = NULL;
2361 struct btrfs_root *reloc_root;
2362 struct btrfs_root_item *root_item;
2363 struct btrfs_path *path;
2364 struct extent_buffer *leaf;
2372 path = btrfs_alloc_path();
2375 path->reada = READA_FORWARD;
2377 reloc_root = root->reloc_root;
2378 root_item = &reloc_root->root_item;
2380 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2381 level = btrfs_root_level(root_item);
2382 atomic_inc(&reloc_root->node->refs);
2383 path->nodes[level] = reloc_root->node;
2384 path->slots[level] = 0;
2386 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2388 level = root_item->drop_level;
2390 path->lowest_level = level;
2391 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2392 path->lowest_level = 0;
2394 btrfs_free_path(path);
2398 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2399 path->slots[level]);
2400 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2402 btrfs_unlock_up_safe(path, 0);
2405 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2406 memset(&next_key, 0, sizeof(next_key));
2409 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2410 BTRFS_RESERVE_FLUSH_ALL);
2415 trans = btrfs_start_transaction(root, 0);
2416 if (IS_ERR(trans)) {
2417 err = PTR_ERR(trans);
2423 * At this point we no longer have a reloc_control, so we can't
2424 * depend on btrfs_init_reloc_root to update our last_trans.
2426 * But that's ok, we started the trans handle on our
2427 * corresponding fs_root, which means it's been added to the
2428 * dirty list. At commit time we'll still call
2429 * btrfs_update_reloc_root() and update our root item
2432 reloc_root->last_trans = trans->transid;
2433 trans->block_rsv = rc->block_rsv;
2438 ret = walk_down_reloc_tree(reloc_root, path, &level);
2446 if (!find_next_key(path, level, &key) &&
2447 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2450 ret = replace_path(trans, rc, root, reloc_root, path,
2451 &next_key, level, max_level);
2460 btrfs_node_key_to_cpu(path->nodes[level], &key,
2461 path->slots[level]);
2465 ret = walk_up_reloc_tree(reloc_root, path, &level);
2471 * save the merging progress in the drop_progress.
2472 * this is OK since root refs == 1 in this case.
2474 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2475 path->slots[level]);
2476 root_item->drop_level = level;
2478 btrfs_end_transaction_throttle(trans);
2481 btrfs_btree_balance_dirty(fs_info);
2483 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2484 invalidate_extent_cache(root, &key, &next_key);
2488 * handle the case only one block in the fs tree need to be
2489 * relocated and the block is tree root.
2491 leaf = btrfs_lock_root_node(root);
2492 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2493 btrfs_tree_unlock(leaf);
2494 free_extent_buffer(leaf);
2498 btrfs_free_path(path);
2501 insert_dirty_subvol(trans, rc, root);
2504 btrfs_end_transaction_throttle(trans);
2506 btrfs_btree_balance_dirty(fs_info);
2508 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2509 invalidate_extent_cache(root, &key, &next_key);
2514 static noinline_for_stack
2515 int prepare_to_merge(struct reloc_control *rc, int err)
2517 struct btrfs_root *root = rc->extent_root;
2518 struct btrfs_fs_info *fs_info = root->fs_info;
2519 struct btrfs_root *reloc_root;
2520 struct btrfs_trans_handle *trans;
2521 LIST_HEAD(reloc_roots);
2525 mutex_lock(&fs_info->reloc_mutex);
2526 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2527 rc->merging_rsv_size += rc->nodes_relocated * 2;
2528 mutex_unlock(&fs_info->reloc_mutex);
2532 num_bytes = rc->merging_rsv_size;
2533 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2534 BTRFS_RESERVE_FLUSH_ALL);
2539 trans = btrfs_join_transaction(rc->extent_root);
2540 if (IS_ERR(trans)) {
2542 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2544 return PTR_ERR(trans);
2548 if (num_bytes != rc->merging_rsv_size) {
2549 btrfs_end_transaction(trans);
2550 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2556 rc->merge_reloc_tree = 1;
2558 while (!list_empty(&rc->reloc_roots)) {
2559 reloc_root = list_entry(rc->reloc_roots.next,
2560 struct btrfs_root, root_list);
2561 list_del_init(&reloc_root->root_list);
2563 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2564 BUG_ON(IS_ERR(root));
2565 BUG_ON(root->reloc_root != reloc_root);
2568 * set reference count to 1, so btrfs_recover_relocation
2569 * knows it should resumes merging
2572 btrfs_set_root_refs(&reloc_root->root_item, 1);
2573 btrfs_update_reloc_root(trans, root);
2575 list_add(&reloc_root->root_list, &reloc_roots);
2576 btrfs_put_root(root);
2579 list_splice(&reloc_roots, &rc->reloc_roots);
2582 btrfs_commit_transaction(trans);
2584 btrfs_end_transaction(trans);
2588 static noinline_for_stack
2589 void free_reloc_roots(struct list_head *list)
2591 struct btrfs_root *reloc_root;
2593 while (!list_empty(list)) {
2594 reloc_root = list_entry(list->next, struct btrfs_root,
2596 __del_reloc_root(reloc_root);
2600 static noinline_for_stack
2601 void merge_reloc_roots(struct reloc_control *rc)
2603 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2604 struct btrfs_root *root;
2605 struct btrfs_root *reloc_root;
2606 LIST_HEAD(reloc_roots);
2610 root = rc->extent_root;
2613 * this serializes us with btrfs_record_root_in_transaction,
2614 * we have to make sure nobody is in the middle of
2615 * adding their roots to the list while we are
2618 mutex_lock(&fs_info->reloc_mutex);
2619 list_splice_init(&rc->reloc_roots, &reloc_roots);
2620 mutex_unlock(&fs_info->reloc_mutex);
2622 while (!list_empty(&reloc_roots)) {
2624 reloc_root = list_entry(reloc_roots.next,
2625 struct btrfs_root, root_list);
2627 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2628 root = read_fs_root(fs_info,
2629 reloc_root->root_key.offset);
2630 BUG_ON(IS_ERR(root));
2631 BUG_ON(root->reloc_root != reloc_root);
2633 ret = merge_reloc_root(rc, root);
2634 btrfs_put_root(root);
2636 if (list_empty(&reloc_root->root_list))
2637 list_add_tail(&reloc_root->root_list,
2642 list_del_init(&reloc_root->root_list);
2643 /* Don't forget to queue this reloc root for cleanup */
2644 list_add_tail(&reloc_root->reloc_dirty_list,
2645 &rc->dirty_subvol_roots);
2655 btrfs_handle_fs_error(fs_info, ret, NULL);
2656 if (!list_empty(&reloc_roots))
2657 free_reloc_roots(&reloc_roots);
2659 /* new reloc root may be added */
2660 mutex_lock(&fs_info->reloc_mutex);
2661 list_splice_init(&rc->reloc_roots, &reloc_roots);
2662 mutex_unlock(&fs_info->reloc_mutex);
2663 if (!list_empty(&reloc_roots))
2664 free_reloc_roots(&reloc_roots);
2670 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2672 * here, but it's wrong. If we fail to start the transaction in
2673 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2674 * have actually been removed from the reloc_root_tree rb tree. This is
2675 * fine because we're bailing here, and we hold a reference on the root
2676 * for the list that holds it, so these roots will be cleaned up when we
2677 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2678 * will be cleaned up on unmount.
2680 * The remaining nodes will be cleaned up by free_reloc_control.
2684 static void free_block_list(struct rb_root *blocks)
2686 struct tree_block *block;
2687 struct rb_node *rb_node;
2688 while ((rb_node = rb_first(blocks))) {
2689 block = rb_entry(rb_node, struct tree_block, rb_node);
2690 rb_erase(rb_node, blocks);
2695 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2696 struct btrfs_root *reloc_root)
2698 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2699 struct btrfs_root *root;
2702 if (reloc_root->last_trans == trans->transid)
2705 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2706 BUG_ON(IS_ERR(root));
2707 BUG_ON(root->reloc_root != reloc_root);
2708 ret = btrfs_record_root_in_trans(trans, root);
2709 btrfs_put_root(root);
2714 static noinline_for_stack
2715 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2716 struct reloc_control *rc,
2717 struct backref_node *node,
2718 struct backref_edge *edges[])
2720 struct backref_node *next;
2721 struct btrfs_root *root;
2727 next = walk_up_backref(next, edges, &index);
2730 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2732 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2733 record_reloc_root_in_trans(trans, root);
2737 btrfs_record_root_in_trans(trans, root);
2738 root = root->reloc_root;
2740 if (next->new_bytenr != root->node->start) {
2741 BUG_ON(next->new_bytenr);
2742 BUG_ON(!list_empty(&next->list));
2743 next->new_bytenr = root->node->start;
2744 btrfs_put_root(next->root);
2745 next->root = btrfs_grab_root(root);
2747 list_add_tail(&next->list,
2748 &rc->backref_cache.changed);
2749 __mark_block_processed(rc, next);
2755 next = walk_down_backref(edges, &index);
2756 if (!next || next->level <= node->level)
2763 /* setup backref node path for btrfs_reloc_cow_block */
2765 rc->backref_cache.path[next->level] = next;
2768 next = edges[index]->node[UPPER];
2774 * select a tree root for relocation. return NULL if the block
2775 * is reference counted. we should use do_relocation() in this
2776 * case. return a tree root pointer if the block isn't reference
2777 * counted. return -ENOENT if the block is root of reloc tree.
2779 static noinline_for_stack
2780 struct btrfs_root *select_one_root(struct backref_node *node)
2782 struct backref_node *next;
2783 struct btrfs_root *root;
2784 struct btrfs_root *fs_root = NULL;
2785 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2791 next = walk_up_backref(next, edges, &index);
2795 /* no other choice for non-references counted tree */
2796 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2799 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2805 next = walk_down_backref(edges, &index);
2806 if (!next || next->level <= node->level)
2811 return ERR_PTR(-ENOENT);
2815 static noinline_for_stack
2816 u64 calcu_metadata_size(struct reloc_control *rc,
2817 struct backref_node *node, int reserve)
2819 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2820 struct backref_node *next = node;
2821 struct backref_edge *edge;
2822 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2826 BUG_ON(reserve && node->processed);
2831 if (next->processed && (reserve || next != node))
2834 num_bytes += fs_info->nodesize;
2836 if (list_empty(&next->upper))
2839 edge = list_entry(next->upper.next,
2840 struct backref_edge, list[LOWER]);
2841 edges[index++] = edge;
2842 next = edge->node[UPPER];
2844 next = walk_down_backref(edges, &index);
2849 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2850 struct reloc_control *rc,
2851 struct backref_node *node)
2853 struct btrfs_root *root = rc->extent_root;
2854 struct btrfs_fs_info *fs_info = root->fs_info;
2859 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2861 trans->block_rsv = rc->block_rsv;
2862 rc->reserved_bytes += num_bytes;
2865 * We are under a transaction here so we can only do limited flushing.
2866 * If we get an enospc just kick back -EAGAIN so we know to drop the
2867 * transaction and try to refill when we can flush all the things.
2869 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2870 BTRFS_RESERVE_FLUSH_LIMIT);
2872 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2873 while (tmp <= rc->reserved_bytes)
2876 * only one thread can access block_rsv at this point,
2877 * so we don't need hold lock to protect block_rsv.
2878 * we expand more reservation size here to allow enough
2879 * space for relocation and we will return earlier in
2882 rc->block_rsv->size = tmp + fs_info->nodesize *
2883 RELOCATION_RESERVED_NODES;
2891 * relocate a block tree, and then update pointers in upper level
2892 * blocks that reference the block to point to the new location.
2894 * if called by link_to_upper, the block has already been relocated.
2895 * in that case this function just updates pointers.
2897 static int do_relocation(struct btrfs_trans_handle *trans,
2898 struct reloc_control *rc,
2899 struct backref_node *node,
2900 struct btrfs_key *key,
2901 struct btrfs_path *path, int lowest)
2903 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2904 struct backref_node *upper;
2905 struct backref_edge *edge;
2906 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2907 struct btrfs_root *root;
2908 struct extent_buffer *eb;
2916 BUG_ON(lowest && node->eb);
2918 path->lowest_level = node->level + 1;
2919 rc->backref_cache.path[node->level] = node;
2920 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2921 struct btrfs_key first_key;
2922 struct btrfs_ref ref = { 0 };
2926 upper = edge->node[UPPER];
2927 root = select_reloc_root(trans, rc, upper, edges);
2930 if (upper->eb && !upper->locked) {
2932 ret = btrfs_bin_search(upper->eb, key,
2933 upper->level, &slot);
2939 bytenr = btrfs_node_blockptr(upper->eb, slot);
2940 if (node->eb->start == bytenr)
2943 drop_node_buffer(upper);
2947 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2954 btrfs_release_path(path);
2959 upper->eb = path->nodes[upper->level];
2960 path->nodes[upper->level] = NULL;
2962 BUG_ON(upper->eb != path->nodes[upper->level]);
2966 path->locks[upper->level] = 0;
2968 slot = path->slots[upper->level];
2969 btrfs_release_path(path);
2971 ret = btrfs_bin_search(upper->eb, key, upper->level,
2980 bytenr = btrfs_node_blockptr(upper->eb, slot);
2982 if (bytenr != node->bytenr) {
2983 btrfs_err(root->fs_info,
2984 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2985 bytenr, node->bytenr, slot,
2991 if (node->eb->start == bytenr)
2995 blocksize = root->fs_info->nodesize;
2996 generation = btrfs_node_ptr_generation(upper->eb, slot);
2997 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2998 eb = read_tree_block(fs_info, bytenr, generation,
2999 upper->level - 1, &first_key);
3003 } else if (!extent_buffer_uptodate(eb)) {
3004 free_extent_buffer(eb);
3008 btrfs_tree_lock(eb);
3009 btrfs_set_lock_blocking_write(eb);
3012 ret = btrfs_cow_block(trans, root, eb, upper->eb,
3014 btrfs_tree_unlock(eb);
3015 free_extent_buffer(eb);
3020 BUG_ON(node->eb != eb);
3022 btrfs_set_node_blockptr(upper->eb, slot,
3024 btrfs_set_node_ptr_generation(upper->eb, slot,
3026 btrfs_mark_buffer_dirty(upper->eb);
3028 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
3029 node->eb->start, blocksize,
3031 ref.real_root = root->root_key.objectid;
3032 btrfs_init_tree_ref(&ref, node->level,
3033 btrfs_header_owner(upper->eb));
3034 ret = btrfs_inc_extent_ref(trans, &ref);
3037 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
3041 if (!upper->pending)
3042 drop_node_buffer(upper);
3044 unlock_node_buffer(upper);
3049 if (!err && node->pending) {
3050 drop_node_buffer(node);
3051 list_move_tail(&node->list, &rc->backref_cache.changed);
3055 path->lowest_level = 0;
3056 BUG_ON(err == -ENOSPC);
3060 static int link_to_upper(struct btrfs_trans_handle *trans,
3061 struct reloc_control *rc,
3062 struct backref_node *node,
3063 struct btrfs_path *path)
3065 struct btrfs_key key;
3067 btrfs_node_key_to_cpu(node->eb, &key, 0);
3068 return do_relocation(trans, rc, node, &key, path, 0);
3071 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
3072 struct reloc_control *rc,
3073 struct btrfs_path *path, int err)
3076 struct backref_cache *cache = &rc->backref_cache;
3077 struct backref_node *node;
3081 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
3082 while (!list_empty(&cache->pending[level])) {
3083 node = list_entry(cache->pending[level].next,
3084 struct backref_node, list);
3085 list_move_tail(&node->list, &list);
3086 BUG_ON(!node->pending);
3089 ret = link_to_upper(trans, rc, node, path);
3094 list_splice_init(&list, &cache->pending[level]);
3099 static void mark_block_processed(struct reloc_control *rc,
3100 u64 bytenr, u32 blocksize)
3102 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
3106 static void __mark_block_processed(struct reloc_control *rc,
3107 struct backref_node *node)
3110 if (node->level == 0 ||
3111 in_block_group(node->bytenr, rc->block_group)) {
3112 blocksize = rc->extent_root->fs_info->nodesize;
3113 mark_block_processed(rc, node->bytenr, blocksize);
3115 node->processed = 1;
3119 * mark a block and all blocks directly/indirectly reference the block
3122 static void update_processed_blocks(struct reloc_control *rc,
3123 struct backref_node *node)
3125 struct backref_node *next = node;
3126 struct backref_edge *edge;
3127 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
3133 if (next->processed)
3136 __mark_block_processed(rc, next);
3138 if (list_empty(&next->upper))
3141 edge = list_entry(next->upper.next,
3142 struct backref_edge, list[LOWER]);
3143 edges[index++] = edge;
3144 next = edge->node[UPPER];
3146 next = walk_down_backref(edges, &index);
3150 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
3152 u32 blocksize = rc->extent_root->fs_info->nodesize;
3154 if (test_range_bit(&rc->processed_blocks, bytenr,
3155 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
3160 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
3161 struct tree_block *block)
3163 struct extent_buffer *eb;
3165 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
3166 block->level, NULL);
3169 } else if (!extent_buffer_uptodate(eb)) {
3170 free_extent_buffer(eb);
3173 if (block->level == 0)
3174 btrfs_item_key_to_cpu(eb, &block->key, 0);
3176 btrfs_node_key_to_cpu(eb, &block->key, 0);
3177 free_extent_buffer(eb);
3178 block->key_ready = 1;
3183 * helper function to relocate a tree block
3185 static int relocate_tree_block(struct btrfs_trans_handle *trans,
3186 struct reloc_control *rc,
3187 struct backref_node *node,
3188 struct btrfs_key *key,
3189 struct btrfs_path *path)
3191 struct btrfs_root *root;
3198 * If we fail here we want to drop our backref_node because we are going
3199 * to start over and regenerate the tree for it.
3201 ret = reserve_metadata_space(trans, rc, node);
3205 BUG_ON(node->processed);
3206 root = select_one_root(node);
3207 if (root == ERR_PTR(-ENOENT)) {
3208 update_processed_blocks(rc, node);
3213 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3214 BUG_ON(node->new_bytenr);
3215 BUG_ON(!list_empty(&node->list));
3216 btrfs_record_root_in_trans(trans, root);
3217 root = root->reloc_root;
3218 node->new_bytenr = root->node->start;
3219 btrfs_put_root(node->root);
3220 node->root = btrfs_grab_root(root);
3222 list_add_tail(&node->list, &rc->backref_cache.changed);
3224 path->lowest_level = node->level;
3225 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
3226 btrfs_release_path(path);
3231 update_processed_blocks(rc, node);
3233 ret = do_relocation(trans, rc, node, key, path, 1);
3236 if (ret || node->level == 0 || node->cowonly)
3237 remove_backref_node(&rc->backref_cache, node);
3242 * relocate a list of blocks
3244 static noinline_for_stack
3245 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
3246 struct reloc_control *rc, struct rb_root *blocks)
3248 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3249 struct backref_node *node;
3250 struct btrfs_path *path;
3251 struct tree_block *block;
3252 struct tree_block *next;
3256 path = btrfs_alloc_path();
3259 goto out_free_blocks;
3262 /* Kick in readahead for tree blocks with missing keys */
3263 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3264 if (!block->key_ready)
3265 readahead_tree_block(fs_info, block->bytenr);
3268 /* Get first keys */
3269 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3270 if (!block->key_ready) {
3271 err = get_tree_block_key(fs_info, block);
3277 /* Do tree relocation */
3278 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3279 node = build_backref_tree(rc, &block->key,
3280 block->level, block->bytenr);
3282 err = PTR_ERR(node);
3286 ret = relocate_tree_block(trans, rc, node, &block->key,
3294 err = finish_pending_nodes(trans, rc, path, err);
3297 btrfs_free_path(path);
3299 free_block_list(blocks);
3303 static noinline_for_stack
3304 int prealloc_file_extent_cluster(struct inode *inode,
3305 struct file_extent_cluster *cluster)
3310 u64 offset = BTRFS_I(inode)->index_cnt;
3314 u64 prealloc_start = cluster->start - offset;
3315 u64 prealloc_end = cluster->end - offset;
3317 struct extent_changeset *data_reserved = NULL;
3319 BUG_ON(cluster->start != cluster->boundary[0]);
3322 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3323 prealloc_end + 1 - prealloc_start);
3327 cur_offset = prealloc_start;
3328 while (nr < cluster->nr) {
3329 start = cluster->boundary[nr] - offset;
3330 if (nr + 1 < cluster->nr)
3331 end = cluster->boundary[nr + 1] - 1 - offset;
3333 end = cluster->end - offset;
3335 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3336 num_bytes = end + 1 - start;
3337 if (cur_offset < start)
3338 btrfs_free_reserved_data_space(inode, data_reserved,
3339 cur_offset, start - cur_offset);
3340 ret = btrfs_prealloc_file_range(inode, 0, start,
3341 num_bytes, num_bytes,
3342 end + 1, &alloc_hint);
3343 cur_offset = end + 1;
3344 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3349 if (cur_offset < prealloc_end)
3350 btrfs_free_reserved_data_space(inode, data_reserved,
3351 cur_offset, prealloc_end + 1 - cur_offset);
3353 inode_unlock(inode);
3354 extent_changeset_free(data_reserved);
3358 static noinline_for_stack
3359 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3362 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3363 struct extent_map *em;
3366 em = alloc_extent_map();
3371 em->len = end + 1 - start;
3372 em->block_len = em->len;
3373 em->block_start = block_start;
3374 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3376 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3378 write_lock(&em_tree->lock);
3379 ret = add_extent_mapping(em_tree, em, 0);
3380 write_unlock(&em_tree->lock);
3381 if (ret != -EEXIST) {
3382 free_extent_map(em);
3385 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3387 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3392 * Allow error injection to test balance cancellation
3394 int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
3396 return atomic_read(&fs_info->balance_cancel_req);
3398 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
3400 static int relocate_file_extent_cluster(struct inode *inode,
3401 struct file_extent_cluster *cluster)
3403 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3406 u64 offset = BTRFS_I(inode)->index_cnt;
3407 unsigned long index;
3408 unsigned long last_index;
3410 struct file_ra_state *ra;
3411 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3418 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3422 ret = prealloc_file_extent_cluster(inode, cluster);
3426 file_ra_state_init(ra, inode->i_mapping);
3428 ret = setup_extent_mapping(inode, cluster->start - offset,
3429 cluster->end - offset, cluster->start);
3433 index = (cluster->start - offset) >> PAGE_SHIFT;
3434 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3435 while (index <= last_index) {
3436 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3441 page = find_lock_page(inode->i_mapping, index);
3443 page_cache_sync_readahead(inode->i_mapping,
3445 last_index + 1 - index);
3446 page = find_or_create_page(inode->i_mapping, index,
3449 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3451 btrfs_delalloc_release_extents(BTRFS_I(inode),
3458 if (PageReadahead(page)) {
3459 page_cache_async_readahead(inode->i_mapping,
3460 ra, NULL, page, index,
3461 last_index + 1 - index);
3464 if (!PageUptodate(page)) {
3465 btrfs_readpage(NULL, page);
3467 if (!PageUptodate(page)) {
3470 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3472 btrfs_delalloc_release_extents(BTRFS_I(inode),
3479 page_start = page_offset(page);
3480 page_end = page_start + PAGE_SIZE - 1;
3482 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3484 set_page_extent_mapped(page);
3486 if (nr < cluster->nr &&
3487 page_start + offset == cluster->boundary[nr]) {
3488 set_extent_bits(&BTRFS_I(inode)->io_tree,
3489 page_start, page_end,
3494 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3499 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3501 btrfs_delalloc_release_extents(BTRFS_I(inode),
3504 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3505 page_start, page_end,
3506 EXTENT_LOCKED | EXTENT_BOUNDARY);
3510 set_page_dirty(page);
3512 unlock_extent(&BTRFS_I(inode)->io_tree,
3513 page_start, page_end);
3518 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
3519 balance_dirty_pages_ratelimited(inode->i_mapping);
3520 btrfs_throttle(fs_info);
3521 if (btrfs_should_cancel_balance(fs_info)) {
3526 WARN_ON(nr != cluster->nr);
3532 static noinline_for_stack
3533 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3534 struct file_extent_cluster *cluster)
3538 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3539 ret = relocate_file_extent_cluster(inode, cluster);
3546 cluster->start = extent_key->objectid;
3548 BUG_ON(cluster->nr >= MAX_EXTENTS);
3549 cluster->end = extent_key->objectid + extent_key->offset - 1;
3550 cluster->boundary[cluster->nr] = extent_key->objectid;
3553 if (cluster->nr >= MAX_EXTENTS) {
3554 ret = relocate_file_extent_cluster(inode, cluster);
3563 * helper to add a tree block to the list.
3564 * the major work is getting the generation and level of the block
3566 static int add_tree_block(struct reloc_control *rc,
3567 struct btrfs_key *extent_key,
3568 struct btrfs_path *path,
3569 struct rb_root *blocks)
3571 struct extent_buffer *eb;
3572 struct btrfs_extent_item *ei;
3573 struct btrfs_tree_block_info *bi;
3574 struct tree_block *block;
3575 struct rb_node *rb_node;
3580 eb = path->nodes[0];
3581 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3583 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3584 item_size >= sizeof(*ei) + sizeof(*bi)) {
3585 ei = btrfs_item_ptr(eb, path->slots[0],
3586 struct btrfs_extent_item);
3587 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3588 bi = (struct btrfs_tree_block_info *)(ei + 1);
3589 level = btrfs_tree_block_level(eb, bi);
3591 level = (int)extent_key->offset;
3593 generation = btrfs_extent_generation(eb, ei);
3594 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3595 btrfs_print_v0_err(eb->fs_info);
3596 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3602 btrfs_release_path(path);
3604 BUG_ON(level == -1);
3606 block = kmalloc(sizeof(*block), GFP_NOFS);
3610 block->bytenr = extent_key->objectid;
3611 block->key.objectid = rc->extent_root->fs_info->nodesize;
3612 block->key.offset = generation;
3613 block->level = level;
3614 block->key_ready = 0;
3616 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3618 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3624 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3626 static int __add_tree_block(struct reloc_control *rc,
3627 u64 bytenr, u32 blocksize,
3628 struct rb_root *blocks)
3630 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3631 struct btrfs_path *path;
3632 struct btrfs_key key;
3634 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3636 if (tree_block_processed(bytenr, rc))
3639 if (tree_search(blocks, bytenr))
3642 path = btrfs_alloc_path();
3646 key.objectid = bytenr;
3648 key.type = BTRFS_METADATA_ITEM_KEY;
3649 key.offset = (u64)-1;
3651 key.type = BTRFS_EXTENT_ITEM_KEY;
3652 key.offset = blocksize;
3655 path->search_commit_root = 1;
3656 path->skip_locking = 1;
3657 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3661 if (ret > 0 && skinny) {
3662 if (path->slots[0]) {
3664 btrfs_item_key_to_cpu(path->nodes[0], &key,
3666 if (key.objectid == bytenr &&
3667 (key.type == BTRFS_METADATA_ITEM_KEY ||
3668 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3669 key.offset == blocksize)))
3675 btrfs_release_path(path);
3681 btrfs_print_leaf(path->nodes[0]);
3683 "tree block extent item (%llu) is not found in extent tree",
3690 ret = add_tree_block(rc, &key, path, blocks);
3692 btrfs_free_path(path);
3696 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3697 struct btrfs_block_group *block_group,
3698 struct inode *inode,
3701 struct btrfs_key key;
3702 struct btrfs_root *root = fs_info->tree_root;
3703 struct btrfs_trans_handle *trans;
3710 key.type = BTRFS_INODE_ITEM_KEY;
3713 inode = btrfs_iget(fs_info->sb, &key, root);
3718 ret = btrfs_check_trunc_cache_free_space(fs_info,
3719 &fs_info->global_block_rsv);
3723 trans = btrfs_join_transaction(root);
3724 if (IS_ERR(trans)) {
3725 ret = PTR_ERR(trans);
3729 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3731 btrfs_end_transaction(trans);
3732 btrfs_btree_balance_dirty(fs_info);
3739 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3740 * cache inode, to avoid free space cache data extent blocking data relocation.
3742 static int delete_v1_space_cache(struct extent_buffer *leaf,
3743 struct btrfs_block_group *block_group,
3746 u64 space_cache_ino;
3747 struct btrfs_file_extent_item *ei;
3748 struct btrfs_key key;
3753 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3756 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3757 btrfs_item_key_to_cpu(leaf, &key, i);
3758 if (key.type != BTRFS_EXTENT_DATA_KEY)
3760 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3761 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
3762 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3764 space_cache_ino = key.objectid;
3770 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3776 * helper to find all tree blocks that reference a given data extent
3778 static noinline_for_stack
3779 int add_data_references(struct reloc_control *rc,
3780 struct btrfs_key *extent_key,
3781 struct btrfs_path *path,
3782 struct rb_root *blocks)
3784 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3785 struct ulist *leaves = NULL;
3786 struct ulist_iterator leaf_uiter;
3787 struct ulist_node *ref_node = NULL;
3788 const u32 blocksize = fs_info->nodesize;
3791 btrfs_release_path(path);
3792 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3793 0, &leaves, NULL, true);
3797 ULIST_ITER_INIT(&leaf_uiter);
3798 while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
3799 struct extent_buffer *eb;
3801 eb = read_tree_block(fs_info, ref_node->val, 0, 0, NULL);
3806 ret = delete_v1_space_cache(eb, rc->block_group,
3807 extent_key->objectid);
3808 free_extent_buffer(eb);
3811 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3816 free_block_list(blocks);
3822 * helper to find next unprocessed extent
3824 static noinline_for_stack
3825 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3826 struct btrfs_key *extent_key)
3828 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3829 struct btrfs_key key;
3830 struct extent_buffer *leaf;
3831 u64 start, end, last;
3834 last = rc->block_group->start + rc->block_group->length;
3837 if (rc->search_start >= last) {
3842 key.objectid = rc->search_start;
3843 key.type = BTRFS_EXTENT_ITEM_KEY;
3846 path->search_commit_root = 1;
3847 path->skip_locking = 1;
3848 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3853 leaf = path->nodes[0];
3854 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3855 ret = btrfs_next_leaf(rc->extent_root, path);
3858 leaf = path->nodes[0];
3861 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3862 if (key.objectid >= last) {
3867 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3868 key.type != BTRFS_METADATA_ITEM_KEY) {
3873 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3874 key.objectid + key.offset <= rc->search_start) {
3879 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3880 key.objectid + fs_info->nodesize <=
3886 ret = find_first_extent_bit(&rc->processed_blocks,
3887 key.objectid, &start, &end,
3888 EXTENT_DIRTY, NULL);
3890 if (ret == 0 && start <= key.objectid) {
3891 btrfs_release_path(path);
3892 rc->search_start = end + 1;
3894 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3895 rc->search_start = key.objectid + key.offset;
3897 rc->search_start = key.objectid +
3899 memcpy(extent_key, &key, sizeof(key));
3903 btrfs_release_path(path);
3907 static void set_reloc_control(struct reloc_control *rc)
3909 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3911 mutex_lock(&fs_info->reloc_mutex);
3912 fs_info->reloc_ctl = rc;
3913 mutex_unlock(&fs_info->reloc_mutex);
3916 static void unset_reloc_control(struct reloc_control *rc)
3918 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3920 mutex_lock(&fs_info->reloc_mutex);
3921 fs_info->reloc_ctl = NULL;
3922 mutex_unlock(&fs_info->reloc_mutex);
3925 static int check_extent_flags(u64 flags)
3927 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3928 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3930 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3931 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3933 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3934 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3939 static noinline_for_stack
3940 int prepare_to_relocate(struct reloc_control *rc)
3942 struct btrfs_trans_handle *trans;
3945 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3946 BTRFS_BLOCK_RSV_TEMP);
3950 memset(&rc->cluster, 0, sizeof(rc->cluster));
3951 rc->search_start = rc->block_group->start;
3952 rc->extents_found = 0;
3953 rc->nodes_relocated = 0;
3954 rc->merging_rsv_size = 0;
3955 rc->reserved_bytes = 0;
3956 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3957 RELOCATION_RESERVED_NODES;
3958 ret = btrfs_block_rsv_refill(rc->extent_root,
3959 rc->block_rsv, rc->block_rsv->size,
3960 BTRFS_RESERVE_FLUSH_ALL);
3964 rc->create_reloc_tree = 1;
3965 set_reloc_control(rc);
3967 trans = btrfs_join_transaction(rc->extent_root);
3968 if (IS_ERR(trans)) {
3969 unset_reloc_control(rc);
3971 * extent tree is not a ref_cow tree and has no reloc_root to
3972 * cleanup. And callers are responsible to free the above
3975 return PTR_ERR(trans);
3977 btrfs_commit_transaction(trans);
3981 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3983 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3984 struct rb_root blocks = RB_ROOT;
3985 struct btrfs_key key;
3986 struct btrfs_trans_handle *trans = NULL;
3987 struct btrfs_path *path;
3988 struct btrfs_extent_item *ei;
3995 path = btrfs_alloc_path();
3998 path->reada = READA_FORWARD;
4000 ret = prepare_to_relocate(rc);
4007 rc->reserved_bytes = 0;
4008 ret = btrfs_block_rsv_refill(rc->extent_root,
4009 rc->block_rsv, rc->block_rsv->size,
4010 BTRFS_RESERVE_FLUSH_ALL);
4016 trans = btrfs_start_transaction(rc->extent_root, 0);
4017 if (IS_ERR(trans)) {
4018 err = PTR_ERR(trans);
4023 if (update_backref_cache(trans, &rc->backref_cache)) {
4024 btrfs_end_transaction(trans);
4029 ret = find_next_extent(rc, path, &key);
4035 rc->extents_found++;
4037 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4038 struct btrfs_extent_item);
4039 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
4040 if (item_size >= sizeof(*ei)) {
4041 flags = btrfs_extent_flags(path->nodes[0], ei);
4042 ret = check_extent_flags(flags);
4044 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
4046 btrfs_print_v0_err(trans->fs_info);
4047 btrfs_abort_transaction(trans, err);
4053 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
4054 ret = add_tree_block(rc, &key, path, &blocks);
4055 } else if (rc->stage == UPDATE_DATA_PTRS &&
4056 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4057 ret = add_data_references(rc, &key, path, &blocks);
4059 btrfs_release_path(path);
4067 if (!RB_EMPTY_ROOT(&blocks)) {
4068 ret = relocate_tree_blocks(trans, rc, &blocks);
4070 if (ret != -EAGAIN) {
4074 rc->extents_found--;
4075 rc->search_start = key.objectid;
4079 btrfs_end_transaction_throttle(trans);
4080 btrfs_btree_balance_dirty(fs_info);
4083 if (rc->stage == MOVE_DATA_EXTENTS &&
4084 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4085 rc->found_file_extent = 1;
4086 ret = relocate_data_extent(rc->data_inode,
4087 &key, &rc->cluster);
4093 if (btrfs_should_cancel_balance(fs_info)) {
4098 if (trans && progress && err == -ENOSPC) {
4099 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
4107 btrfs_release_path(path);
4108 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4111 btrfs_end_transaction_throttle(trans);
4112 btrfs_btree_balance_dirty(fs_info);
4116 ret = relocate_file_extent_cluster(rc->data_inode,
4122 rc->create_reloc_tree = 0;
4123 set_reloc_control(rc);
4125 backref_cache_cleanup(&rc->backref_cache);
4126 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
4129 * Even in the case when the relocation is cancelled, we should all go
4130 * through prepare_to_merge() and merge_reloc_roots().
4132 * For error (including cancelled balance), prepare_to_merge() will
4133 * mark all reloc trees orphan, then queue them for cleanup in
4134 * merge_reloc_roots()
4136 err = prepare_to_merge(rc, err);
4138 merge_reloc_roots(rc);
4140 rc->merge_reloc_tree = 0;
4141 unset_reloc_control(rc);
4142 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
4144 /* get rid of pinned extents */
4145 trans = btrfs_join_transaction(rc->extent_root);
4146 if (IS_ERR(trans)) {
4147 err = PTR_ERR(trans);
4150 btrfs_commit_transaction(trans);
4152 ret = clean_dirty_subvols(rc);
4153 if (ret < 0 && !err)
4155 btrfs_free_block_rsv(fs_info, rc->block_rsv);
4156 btrfs_free_path(path);
4160 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4161 struct btrfs_root *root, u64 objectid)
4163 struct btrfs_path *path;
4164 struct btrfs_inode_item *item;
4165 struct extent_buffer *leaf;
4168 path = btrfs_alloc_path();
4172 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4176 leaf = path->nodes[0];
4177 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4178 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
4179 btrfs_set_inode_generation(leaf, item, 1);
4180 btrfs_set_inode_size(leaf, item, 0);
4181 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4182 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4183 BTRFS_INODE_PREALLOC);
4184 btrfs_mark_buffer_dirty(leaf);
4186 btrfs_free_path(path);
4191 * helper to create inode for data relocation.
4192 * the inode is in data relocation tree and its link count is 0
4194 static noinline_for_stack
4195 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4196 struct btrfs_block_group *group)
4198 struct inode *inode = NULL;
4199 struct btrfs_trans_handle *trans;
4200 struct btrfs_root *root;
4201 struct btrfs_key key;
4205 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4207 return ERR_CAST(root);
4209 trans = btrfs_start_transaction(root, 6);
4210 if (IS_ERR(trans)) {
4211 btrfs_put_root(root);
4212 return ERR_CAST(trans);
4215 err = btrfs_find_free_objectid(root, &objectid);
4219 err = __insert_orphan_inode(trans, root, objectid);
4222 key.objectid = objectid;
4223 key.type = BTRFS_INODE_ITEM_KEY;
4225 inode = btrfs_iget(fs_info->sb, &key, root);
4226 BUG_ON(IS_ERR(inode));
4227 BTRFS_I(inode)->index_cnt = group->start;
4229 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4231 btrfs_put_root(root);
4232 btrfs_end_transaction(trans);
4233 btrfs_btree_balance_dirty(fs_info);
4237 inode = ERR_PTR(err);
4242 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4244 struct reloc_control *rc;
4246 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4250 INIT_LIST_HEAD(&rc->reloc_roots);
4251 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4252 backref_cache_init(&rc->backref_cache);
4253 mapping_tree_init(&rc->reloc_root_tree);
4254 extent_io_tree_init(fs_info, &rc->processed_blocks,
4255 IO_TREE_RELOC_BLOCKS, NULL);
4259 static void free_reloc_control(struct reloc_control *rc)
4261 struct mapping_node *node, *tmp;
4263 free_reloc_roots(&rc->reloc_roots);
4264 rbtree_postorder_for_each_entry_safe(node, tmp,
4265 &rc->reloc_root_tree.rb_root, rb_node)
4272 * Print the block group being relocated
4274 static void describe_relocation(struct btrfs_fs_info *fs_info,
4275 struct btrfs_block_group *block_group)
4277 char buf[128] = {'\0'};
4279 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4282 "relocating block group %llu flags %s",
4283 block_group->start, buf);
4286 static const char *stage_to_string(int stage)
4288 if (stage == MOVE_DATA_EXTENTS)
4289 return "move data extents";
4290 if (stage == UPDATE_DATA_PTRS)
4291 return "update data pointers";
4296 * function to relocate all extents in a block group.
4298 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4300 struct btrfs_block_group *bg;
4301 struct btrfs_root *extent_root = fs_info->extent_root;
4302 struct reloc_control *rc;
4303 struct inode *inode;
4304 struct btrfs_path *path;
4309 bg = btrfs_lookup_block_group(fs_info, group_start);
4313 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4314 btrfs_put_block_group(bg);
4318 rc = alloc_reloc_control(fs_info);
4320 btrfs_put_block_group(bg);
4324 rc->extent_root = extent_root;
4325 rc->block_group = bg;
4327 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4334 path = btrfs_alloc_path();
4340 inode = lookup_free_space_inode(rc->block_group, path);
4341 btrfs_free_path(path);
4344 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4346 ret = PTR_ERR(inode);
4348 if (ret && ret != -ENOENT) {
4353 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4354 if (IS_ERR(rc->data_inode)) {
4355 err = PTR_ERR(rc->data_inode);
4356 rc->data_inode = NULL;
4360 describe_relocation(fs_info, rc->block_group);
4362 btrfs_wait_block_group_reservations(rc->block_group);
4363 btrfs_wait_nocow_writers(rc->block_group);
4364 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4365 rc->block_group->start,
4366 rc->block_group->length);
4371 mutex_lock(&fs_info->cleaner_mutex);
4372 ret = relocate_block_group(rc);
4373 mutex_unlock(&fs_info->cleaner_mutex);
4377 finishes_stage = rc->stage;
4379 * We may have gotten ENOSPC after we already dirtied some
4380 * extents. If writeout happens while we're relocating a
4381 * different block group we could end up hitting the
4382 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4383 * btrfs_reloc_cow_block. Make sure we write everything out
4384 * properly so we don't trip over this problem, and then break
4385 * out of the loop if we hit an error.
4387 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4388 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4392 invalidate_mapping_pages(rc->data_inode->i_mapping,
4394 rc->stage = UPDATE_DATA_PTRS;
4400 if (rc->extents_found == 0)
4403 btrfs_info(fs_info, "found %llu extents, stage: %s",
4404 rc->extents_found, stage_to_string(finishes_stage));
4407 WARN_ON(rc->block_group->pinned > 0);
4408 WARN_ON(rc->block_group->reserved > 0);
4409 WARN_ON(rc->block_group->used > 0);
4412 btrfs_dec_block_group_ro(rc->block_group);
4413 iput(rc->data_inode);
4414 btrfs_put_block_group(rc->block_group);
4415 free_reloc_control(rc);
4419 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4421 struct btrfs_fs_info *fs_info = root->fs_info;
4422 struct btrfs_trans_handle *trans;
4425 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4427 return PTR_ERR(trans);
4429 memset(&root->root_item.drop_progress, 0,
4430 sizeof(root->root_item.drop_progress));
4431 root->root_item.drop_level = 0;
4432 btrfs_set_root_refs(&root->root_item, 0);
4433 ret = btrfs_update_root(trans, fs_info->tree_root,
4434 &root->root_key, &root->root_item);
4436 err = btrfs_end_transaction(trans);
4443 * recover relocation interrupted by system crash.
4445 * this function resumes merging reloc trees with corresponding fs trees.
4446 * this is important for keeping the sharing of tree blocks
4448 int btrfs_recover_relocation(struct btrfs_root *root)
4450 struct btrfs_fs_info *fs_info = root->fs_info;
4451 LIST_HEAD(reloc_roots);
4452 struct btrfs_key key;
4453 struct btrfs_root *fs_root;
4454 struct btrfs_root *reloc_root;
4455 struct btrfs_path *path;
4456 struct extent_buffer *leaf;
4457 struct reloc_control *rc = NULL;
4458 struct btrfs_trans_handle *trans;
4462 path = btrfs_alloc_path();
4465 path->reada = READA_BACK;
4467 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4468 key.type = BTRFS_ROOT_ITEM_KEY;
4469 key.offset = (u64)-1;
4472 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4479 if (path->slots[0] == 0)
4483 leaf = path->nodes[0];
4484 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4485 btrfs_release_path(path);
4487 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4488 key.type != BTRFS_ROOT_ITEM_KEY)
4491 reloc_root = btrfs_read_tree_root(root, &key);
4492 if (IS_ERR(reloc_root)) {
4493 err = PTR_ERR(reloc_root);
4497 set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
4498 list_add(&reloc_root->root_list, &reloc_roots);
4500 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4501 fs_root = read_fs_root(fs_info,
4502 reloc_root->root_key.offset);
4503 if (IS_ERR(fs_root)) {
4504 ret = PTR_ERR(fs_root);
4505 if (ret != -ENOENT) {
4509 ret = mark_garbage_root(reloc_root);
4515 btrfs_put_root(fs_root);
4519 if (key.offset == 0)
4524 btrfs_release_path(path);
4526 if (list_empty(&reloc_roots))
4529 rc = alloc_reloc_control(fs_info);
4535 rc->extent_root = fs_info->extent_root;
4537 set_reloc_control(rc);
4539 trans = btrfs_join_transaction(rc->extent_root);
4540 if (IS_ERR(trans)) {
4541 err = PTR_ERR(trans);
4545 rc->merge_reloc_tree = 1;
4547 while (!list_empty(&reloc_roots)) {
4548 reloc_root = list_entry(reloc_roots.next,
4549 struct btrfs_root, root_list);
4550 list_del(&reloc_root->root_list);
4552 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4553 list_add_tail(&reloc_root->root_list,
4558 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4559 if (IS_ERR(fs_root)) {
4560 err = PTR_ERR(fs_root);
4561 list_add_tail(&reloc_root->root_list, &reloc_roots);
4565 err = __add_reloc_root(reloc_root);
4566 BUG_ON(err < 0); /* -ENOMEM or logic error */
4567 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4568 btrfs_put_root(fs_root);
4571 err = btrfs_commit_transaction(trans);
4575 merge_reloc_roots(rc);
4577 unset_reloc_control(rc);
4579 trans = btrfs_join_transaction(rc->extent_root);
4580 if (IS_ERR(trans)) {
4581 err = PTR_ERR(trans);
4584 err = btrfs_commit_transaction(trans);
4586 ret = clean_dirty_subvols(rc);
4587 if (ret < 0 && !err)
4590 unset_reloc_control(rc);
4591 free_reloc_control(rc);
4593 if (!list_empty(&reloc_roots))
4594 free_reloc_roots(&reloc_roots);
4596 btrfs_free_path(path);
4599 /* cleanup orphan inode in data relocation tree */
4600 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4601 if (IS_ERR(fs_root)) {
4602 err = PTR_ERR(fs_root);
4604 err = btrfs_orphan_cleanup(fs_root);
4605 btrfs_put_root(fs_root);
4612 * helper to add ordered checksum for data relocation.
4614 * cloning checksum properly handles the nodatasum extents.
4615 * it also saves CPU time to re-calculate the checksum.
4617 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4619 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4620 struct btrfs_ordered_sum *sums;
4621 struct btrfs_ordered_extent *ordered;
4627 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4628 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4630 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4631 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4632 disk_bytenr + len - 1, &list, 0);
4636 while (!list_empty(&list)) {
4637 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4638 list_del_init(&sums->list);
4641 * We need to offset the new_bytenr based on where the csum is.
4642 * We need to do this because we will read in entire prealloc
4643 * extents but we may have written to say the middle of the
4644 * prealloc extent, so we need to make sure the csum goes with
4645 * the right disk offset.
4647 * We can do this because the data reloc inode refers strictly
4648 * to the on disk bytes, so we don't have to worry about
4649 * disk_len vs real len like with real inodes since it's all
4652 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4653 sums->bytenr = new_bytenr;
4655 btrfs_add_ordered_sum(ordered, sums);
4658 btrfs_put_ordered_extent(ordered);
4662 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4663 struct btrfs_root *root, struct extent_buffer *buf,
4664 struct extent_buffer *cow)
4666 struct btrfs_fs_info *fs_info = root->fs_info;
4667 struct reloc_control *rc;
4668 struct backref_node *node;
4673 rc = fs_info->reloc_ctl;
4677 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4678 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4680 level = btrfs_header_level(buf);
4681 if (btrfs_header_generation(buf) <=
4682 btrfs_root_last_snapshot(&root->root_item))
4685 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4686 rc->create_reloc_tree) {
4687 WARN_ON(!first_cow && level == 0);
4689 node = rc->backref_cache.path[level];
4690 BUG_ON(node->bytenr != buf->start &&
4691 node->new_bytenr != buf->start);
4693 drop_node_buffer(node);
4694 atomic_inc(&cow->refs);
4696 node->new_bytenr = cow->start;
4698 if (!node->pending) {
4699 list_move_tail(&node->list,
4700 &rc->backref_cache.pending[level]);
4705 __mark_block_processed(rc, node);
4707 if (first_cow && level > 0)
4708 rc->nodes_relocated += buf->len;
4711 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4712 ret = replace_file_extents(trans, rc, root, cow);
4717 * called before creating snapshot. it calculates metadata reservation
4718 * required for relocating tree blocks in the snapshot
4720 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4721 u64 *bytes_to_reserve)
4723 struct btrfs_root *root = pending->root;
4724 struct reloc_control *rc = root->fs_info->reloc_ctl;
4726 if (!rc || !have_reloc_root(root))
4729 if (!rc->merge_reloc_tree)
4732 root = root->reloc_root;
4733 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4735 * relocation is in the stage of merging trees. the space
4736 * used by merging a reloc tree is twice the size of
4737 * relocated tree nodes in the worst case. half for cowing
4738 * the reloc tree, half for cowing the fs tree. the space
4739 * used by cowing the reloc tree will be freed after the
4740 * tree is dropped. if we create snapshot, cowing the fs
4741 * tree may use more space than it frees. so we need
4742 * reserve extra space.
4744 *bytes_to_reserve += rc->nodes_relocated;
4748 * called after snapshot is created. migrate block reservation
4749 * and create reloc root for the newly created snapshot
4751 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4752 * references held on the reloc_root, one for root->reloc_root and one for
4755 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4756 struct btrfs_pending_snapshot *pending)
4758 struct btrfs_root *root = pending->root;
4759 struct btrfs_root *reloc_root;
4760 struct btrfs_root *new_root;
4761 struct reloc_control *rc = root->fs_info->reloc_ctl;
4764 if (!rc || !have_reloc_root(root))
4767 rc = root->fs_info->reloc_ctl;
4768 rc->merging_rsv_size += rc->nodes_relocated;
4770 if (rc->merge_reloc_tree) {
4771 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4773 rc->nodes_relocated, true);
4778 new_root = pending->snap;
4779 reloc_root = create_reloc_root(trans, root->reloc_root,
4780 new_root->root_key.objectid);
4781 if (IS_ERR(reloc_root))
4782 return PTR_ERR(reloc_root);
4784 ret = __add_reloc_root(reloc_root);
4786 new_root->reloc_root = btrfs_grab_root(reloc_root);
4788 if (rc->create_reloc_tree)
4789 ret = clone_backref_node(trans, rc, root, reloc_root);