1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
17 #include "tree-mod-log.h"
19 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
20 *root, struct btrfs_path *path, int level);
21 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
22 const struct btrfs_key *ins_key, struct btrfs_path *path,
23 int data_size, int extend);
24 static int push_node_left(struct btrfs_trans_handle *trans,
25 struct extent_buffer *dst,
26 struct extent_buffer *src, int empty);
27 static int balance_node_right(struct btrfs_trans_handle *trans,
28 struct extent_buffer *dst_buf,
29 struct extent_buffer *src_buf);
30 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
33 static const struct btrfs_csums {
36 const char driver[12];
38 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
39 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
40 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
41 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
42 .driver = "blake2b-256" },
45 int btrfs_super_csum_size(const struct btrfs_super_block *s)
47 u16 t = btrfs_super_csum_type(s);
49 * csum type is validated at mount time
51 return btrfs_csums[t].size;
54 const char *btrfs_super_csum_name(u16 csum_type)
56 /* csum type is validated at mount time */
57 return btrfs_csums[csum_type].name;
61 * Return driver name if defined, otherwise the name that's also a valid driver
64 const char *btrfs_super_csum_driver(u16 csum_type)
66 /* csum type is validated at mount time */
67 return btrfs_csums[csum_type].driver[0] ?
68 btrfs_csums[csum_type].driver :
69 btrfs_csums[csum_type].name;
72 size_t __attribute_const__ btrfs_get_num_csums(void)
74 return ARRAY_SIZE(btrfs_csums);
77 struct btrfs_path *btrfs_alloc_path(void)
79 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
82 /* this also releases the path */
83 void btrfs_free_path(struct btrfs_path *p)
87 btrfs_release_path(p);
88 kmem_cache_free(btrfs_path_cachep, p);
92 * path release drops references on the extent buffers in the path
93 * and it drops any locks held by this path
95 * It is safe to call this on paths that no locks or extent buffers held.
97 noinline void btrfs_release_path(struct btrfs_path *p)
101 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
106 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
109 free_extent_buffer(p->nodes[i]);
115 * safely gets a reference on the root node of a tree. A lock
116 * is not taken, so a concurrent writer may put a different node
117 * at the root of the tree. See btrfs_lock_root_node for the
120 * The extent buffer returned by this has a reference taken, so
121 * it won't disappear. It may stop being the root of the tree
122 * at any time because there are no locks held.
124 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
126 struct extent_buffer *eb;
130 eb = rcu_dereference(root->node);
133 * RCU really hurts here, we could free up the root node because
134 * it was COWed but we may not get the new root node yet so do
135 * the inc_not_zero dance and if it doesn't work then
136 * synchronize_rcu and try again.
138 if (atomic_inc_not_zero(&eb->refs)) {
149 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
150 * just get put onto a simple dirty list. Transaction walks this list to make
151 * sure they get properly updated on disk.
153 static void add_root_to_dirty_list(struct btrfs_root *root)
155 struct btrfs_fs_info *fs_info = root->fs_info;
157 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
158 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
161 spin_lock(&fs_info->trans_lock);
162 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
163 /* Want the extent tree to be the last on the list */
164 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
165 list_move_tail(&root->dirty_list,
166 &fs_info->dirty_cowonly_roots);
168 list_move(&root->dirty_list,
169 &fs_info->dirty_cowonly_roots);
171 spin_unlock(&fs_info->trans_lock);
175 * used by snapshot creation to make a copy of a root for a tree with
176 * a given objectid. The buffer with the new root node is returned in
177 * cow_ret, and this func returns zero on success or a negative error code.
179 int btrfs_copy_root(struct btrfs_trans_handle *trans,
180 struct btrfs_root *root,
181 struct extent_buffer *buf,
182 struct extent_buffer **cow_ret, u64 new_root_objectid)
184 struct btrfs_fs_info *fs_info = root->fs_info;
185 struct extent_buffer *cow;
188 struct btrfs_disk_key disk_key;
190 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
191 trans->transid != fs_info->running_transaction->transid);
192 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
193 trans->transid != root->last_trans);
195 level = btrfs_header_level(buf);
197 btrfs_item_key(buf, &disk_key, 0);
199 btrfs_node_key(buf, &disk_key, 0);
201 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
202 &disk_key, level, buf->start, 0,
203 BTRFS_NESTING_NEW_ROOT);
207 copy_extent_buffer_full(cow, buf);
208 btrfs_set_header_bytenr(cow, cow->start);
209 btrfs_set_header_generation(cow, trans->transid);
210 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
211 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
212 BTRFS_HEADER_FLAG_RELOC);
213 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
214 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
216 btrfs_set_header_owner(cow, new_root_objectid);
218 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
220 WARN_ON(btrfs_header_generation(buf) > trans->transid);
221 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
222 ret = btrfs_inc_ref(trans, root, cow, 1);
224 ret = btrfs_inc_ref(trans, root, cow, 0);
226 btrfs_tree_unlock(cow);
227 free_extent_buffer(cow);
228 btrfs_abort_transaction(trans, ret);
232 btrfs_mark_buffer_dirty(cow);
238 * check if the tree block can be shared by multiple trees
240 int btrfs_block_can_be_shared(struct btrfs_root *root,
241 struct extent_buffer *buf)
244 * Tree blocks not in shareable trees and tree roots are never shared.
245 * If a block was allocated after the last snapshot and the block was
246 * not allocated by tree relocation, we know the block is not shared.
248 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
249 buf != root->node && buf != root->commit_root &&
250 (btrfs_header_generation(buf) <=
251 btrfs_root_last_snapshot(&root->root_item) ||
252 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
258 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
259 struct btrfs_root *root,
260 struct extent_buffer *buf,
261 struct extent_buffer *cow,
264 struct btrfs_fs_info *fs_info = root->fs_info;
272 * Backrefs update rules:
274 * Always use full backrefs for extent pointers in tree block
275 * allocated by tree relocation.
277 * If a shared tree block is no longer referenced by its owner
278 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
279 * use full backrefs for extent pointers in tree block.
281 * If a tree block is been relocating
282 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
283 * use full backrefs for extent pointers in tree block.
284 * The reason for this is some operations (such as drop tree)
285 * are only allowed for blocks use full backrefs.
288 if (btrfs_block_can_be_shared(root, buf)) {
289 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
290 btrfs_header_level(buf), 1,
296 btrfs_handle_fs_error(fs_info, ret, NULL);
301 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
302 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
303 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
308 owner = btrfs_header_owner(buf);
309 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
310 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
313 if ((owner == root->root_key.objectid ||
314 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
315 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
316 ret = btrfs_inc_ref(trans, root, buf, 1);
320 if (root->root_key.objectid ==
321 BTRFS_TREE_RELOC_OBJECTID) {
322 ret = btrfs_dec_ref(trans, root, buf, 0);
325 ret = btrfs_inc_ref(trans, root, cow, 1);
329 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
332 if (root->root_key.objectid ==
333 BTRFS_TREE_RELOC_OBJECTID)
334 ret = btrfs_inc_ref(trans, root, cow, 1);
336 ret = btrfs_inc_ref(trans, root, cow, 0);
340 if (new_flags != 0) {
341 int level = btrfs_header_level(buf);
343 ret = btrfs_set_disk_extent_flags(trans, buf,
344 new_flags, level, 0);
349 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
350 if (root->root_key.objectid ==
351 BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
354 ret = btrfs_inc_ref(trans, root, cow, 0);
357 ret = btrfs_dec_ref(trans, root, buf, 1);
361 btrfs_clean_tree_block(buf);
368 * does the dirty work in cow of a single block. The parent block (if
369 * supplied) is updated to point to the new cow copy. The new buffer is marked
370 * dirty and returned locked. If you modify the block it needs to be marked
373 * search_start -- an allocation hint for the new block
375 * empty_size -- a hint that you plan on doing more cow. This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
379 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
380 struct btrfs_root *root,
381 struct extent_buffer *buf,
382 struct extent_buffer *parent, int parent_slot,
383 struct extent_buffer **cow_ret,
384 u64 search_start, u64 empty_size,
385 enum btrfs_lock_nesting nest)
387 struct btrfs_fs_info *fs_info = root->fs_info;
388 struct btrfs_disk_key disk_key;
389 struct extent_buffer *cow;
393 u64 parent_start = 0;
398 btrfs_assert_tree_locked(buf);
400 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
401 trans->transid != fs_info->running_transaction->transid);
402 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
403 trans->transid != root->last_trans);
405 level = btrfs_header_level(buf);
408 btrfs_item_key(buf, &disk_key, 0);
410 btrfs_node_key(buf, &disk_key, 0);
412 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
413 parent_start = parent->start;
415 cow = btrfs_alloc_tree_block(trans, root, parent_start,
416 root->root_key.objectid, &disk_key, level,
417 search_start, empty_size, nest);
421 /* cow is set to blocking by btrfs_init_new_buffer */
423 copy_extent_buffer_full(cow, buf);
424 btrfs_set_header_bytenr(cow, cow->start);
425 btrfs_set_header_generation(cow, trans->transid);
426 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
427 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
428 BTRFS_HEADER_FLAG_RELOC);
429 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
430 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
432 btrfs_set_header_owner(cow, root->root_key.objectid);
434 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
436 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
438 btrfs_tree_unlock(cow);
439 free_extent_buffer(cow);
440 btrfs_abort_transaction(trans, ret);
444 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
445 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
447 btrfs_tree_unlock(cow);
448 free_extent_buffer(cow);
449 btrfs_abort_transaction(trans, ret);
454 if (buf == root->node) {
455 WARN_ON(parent && parent != buf);
456 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
457 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
458 parent_start = buf->start;
460 atomic_inc(&cow->refs);
461 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
463 rcu_assign_pointer(root->node, cow);
465 btrfs_free_tree_block(trans, root, buf, parent_start,
467 free_extent_buffer(buf);
468 add_root_to_dirty_list(root);
470 WARN_ON(trans->transid != btrfs_header_generation(parent));
471 btrfs_tree_mod_log_insert_key(parent, parent_slot,
472 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
473 btrfs_set_node_blockptr(parent, parent_slot,
475 btrfs_set_node_ptr_generation(parent, parent_slot,
477 btrfs_mark_buffer_dirty(parent);
479 ret = btrfs_tree_mod_log_free_eb(buf);
481 btrfs_tree_unlock(cow);
482 free_extent_buffer(cow);
483 btrfs_abort_transaction(trans, ret);
487 btrfs_free_tree_block(trans, root, buf, parent_start,
491 btrfs_tree_unlock(buf);
492 free_extent_buffer_stale(buf);
493 btrfs_mark_buffer_dirty(cow);
498 static inline int should_cow_block(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root,
500 struct extent_buffer *buf)
502 if (btrfs_is_testing(root->fs_info))
505 /* Ensure we can see the FORCE_COW bit */
506 smp_mb__before_atomic();
509 * We do not need to cow a block if
510 * 1) this block is not created or changed in this transaction;
511 * 2) this block does not belong to TREE_RELOC tree;
512 * 3) the root is not forced COW.
514 * What is forced COW:
515 * when we create snapshot during committing the transaction,
516 * after we've finished copying src root, we must COW the shared
517 * block to ensure the metadata consistency.
519 if (btrfs_header_generation(buf) == trans->transid &&
520 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
521 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
522 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
523 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
529 * cows a single block, see __btrfs_cow_block for the real work.
530 * This version of it has extra checks so that a block isn't COWed more than
531 * once per transaction, as long as it hasn't been written yet
533 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
534 struct btrfs_root *root, struct extent_buffer *buf,
535 struct extent_buffer *parent, int parent_slot,
536 struct extent_buffer **cow_ret,
537 enum btrfs_lock_nesting nest)
539 struct btrfs_fs_info *fs_info = root->fs_info;
543 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
545 "COW'ing blocks on a fs root that's being dropped");
547 if (trans->transaction != fs_info->running_transaction)
548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
550 fs_info->running_transaction->transid);
552 if (trans->transid != fs_info->generation)
553 WARN(1, KERN_CRIT "trans %llu running %llu\n",
554 trans->transid, fs_info->generation);
556 if (!should_cow_block(trans, root, buf)) {
561 search_start = buf->start & ~((u64)SZ_1G - 1);
564 * Before CoWing this block for later modification, check if it's
565 * the subtree root and do the delayed subtree trace if needed.
567 * Also We don't care about the error, as it's handled internally.
569 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
570 ret = __btrfs_cow_block(trans, root, buf, parent,
571 parent_slot, cow_ret, search_start, 0, nest);
573 trace_btrfs_cow_block(root, buf, *cow_ret);
577 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
580 * helper function for defrag to decide if two blocks pointed to by a
581 * node are actually close by
583 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
585 if (blocknr < other && other - (blocknr + blocksize) < 32768)
587 if (blocknr > other && blocknr - (other + blocksize) < 32768)
592 #ifdef __LITTLE_ENDIAN
595 * Compare two keys, on little-endian the disk order is same as CPU order and
596 * we can avoid the conversion.
598 static int comp_keys(const struct btrfs_disk_key *disk_key,
599 const struct btrfs_key *k2)
601 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
603 return btrfs_comp_cpu_keys(k1, k2);
609 * compare two keys in a memcmp fashion
611 static int comp_keys(const struct btrfs_disk_key *disk,
612 const struct btrfs_key *k2)
616 btrfs_disk_key_to_cpu(&k1, disk);
618 return btrfs_comp_cpu_keys(&k1, k2);
623 * same as comp_keys only with two btrfs_key's
625 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
627 if (k1->objectid > k2->objectid)
629 if (k1->objectid < k2->objectid)
631 if (k1->type > k2->type)
633 if (k1->type < k2->type)
635 if (k1->offset > k2->offset)
637 if (k1->offset < k2->offset)
643 * this is used by the defrag code to go through all the
644 * leaves pointed to by a node and reallocate them so that
645 * disk order is close to key order
647 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
648 struct btrfs_root *root, struct extent_buffer *parent,
649 int start_slot, u64 *last_ret,
650 struct btrfs_key *progress)
652 struct btrfs_fs_info *fs_info = root->fs_info;
653 struct extent_buffer *cur;
655 u64 search_start = *last_ret;
663 int progress_passed = 0;
664 struct btrfs_disk_key disk_key;
666 WARN_ON(trans->transaction != fs_info->running_transaction);
667 WARN_ON(trans->transid != fs_info->generation);
669 parent_nritems = btrfs_header_nritems(parent);
670 blocksize = fs_info->nodesize;
671 end_slot = parent_nritems - 1;
673 if (parent_nritems <= 1)
676 for (i = start_slot; i <= end_slot; i++) {
679 btrfs_node_key(parent, &disk_key, i);
680 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
684 blocknr = btrfs_node_blockptr(parent, i);
686 last_block = blocknr;
689 other = btrfs_node_blockptr(parent, i - 1);
690 close = close_blocks(blocknr, other, blocksize);
692 if (!close && i < end_slot) {
693 other = btrfs_node_blockptr(parent, i + 1);
694 close = close_blocks(blocknr, other, blocksize);
697 last_block = blocknr;
701 cur = btrfs_read_node_slot(parent, i);
704 if (search_start == 0)
705 search_start = last_block;
707 btrfs_tree_lock(cur);
708 err = __btrfs_cow_block(trans, root, cur, parent, i,
711 (end_slot - i) * blocksize),
714 btrfs_tree_unlock(cur);
715 free_extent_buffer(cur);
718 search_start = cur->start;
719 last_block = cur->start;
720 *last_ret = search_start;
721 btrfs_tree_unlock(cur);
722 free_extent_buffer(cur);
728 * search for key in the extent_buffer. The items start at offset p,
729 * and they are item_size apart. There are 'max' items in p.
731 * the slot in the array is returned via slot, and it points to
732 * the place where you would insert key if it is not found in
735 * slot may point to max if the key is bigger than all of the keys
737 static noinline int generic_bin_search(struct extent_buffer *eb,
738 unsigned long p, int item_size,
739 const struct btrfs_key *key,
745 const int key_size = sizeof(struct btrfs_disk_key);
748 btrfs_err(eb->fs_info,
749 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
750 __func__, low, high, eb->start,
751 btrfs_header_owner(eb), btrfs_header_level(eb));
757 unsigned long offset;
758 struct btrfs_disk_key *tmp;
759 struct btrfs_disk_key unaligned;
762 mid = (low + high) / 2;
763 offset = p + mid * item_size;
764 oip = offset_in_page(offset);
766 if (oip + key_size <= PAGE_SIZE) {
767 const unsigned long idx = get_eb_page_index(offset);
768 char *kaddr = page_address(eb->pages[idx]);
770 oip = get_eb_offset_in_page(eb, offset);
771 tmp = (struct btrfs_disk_key *)(kaddr + oip);
773 read_extent_buffer(eb, &unaligned, offset, key_size);
777 ret = comp_keys(tmp, key);
793 * simple bin_search frontend that does the right thing for
796 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
799 if (btrfs_header_level(eb) == 0)
800 return generic_bin_search(eb,
801 offsetof(struct btrfs_leaf, items),
802 sizeof(struct btrfs_item),
803 key, btrfs_header_nritems(eb),
806 return generic_bin_search(eb,
807 offsetof(struct btrfs_node, ptrs),
808 sizeof(struct btrfs_key_ptr),
809 key, btrfs_header_nritems(eb),
813 static void root_add_used(struct btrfs_root *root, u32 size)
815 spin_lock(&root->accounting_lock);
816 btrfs_set_root_used(&root->root_item,
817 btrfs_root_used(&root->root_item) + size);
818 spin_unlock(&root->accounting_lock);
821 static void root_sub_used(struct btrfs_root *root, u32 size)
823 spin_lock(&root->accounting_lock);
824 btrfs_set_root_used(&root->root_item,
825 btrfs_root_used(&root->root_item) - size);
826 spin_unlock(&root->accounting_lock);
829 /* given a node and slot number, this reads the blocks it points to. The
830 * extent buffer is returned with a reference taken (but unlocked).
832 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
835 int level = btrfs_header_level(parent);
836 struct extent_buffer *eb;
837 struct btrfs_key first_key;
839 if (slot < 0 || slot >= btrfs_header_nritems(parent))
840 return ERR_PTR(-ENOENT);
844 btrfs_node_key_to_cpu(parent, &first_key, slot);
845 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
846 btrfs_header_owner(parent),
847 btrfs_node_ptr_generation(parent, slot),
848 level - 1, &first_key);
849 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
850 free_extent_buffer(eb);
858 * node level balancing, used to make sure nodes are in proper order for
859 * item deletion. We balance from the top down, so we have to make sure
860 * that a deletion won't leave an node completely empty later on.
862 static noinline int balance_level(struct btrfs_trans_handle *trans,
863 struct btrfs_root *root,
864 struct btrfs_path *path, int level)
866 struct btrfs_fs_info *fs_info = root->fs_info;
867 struct extent_buffer *right = NULL;
868 struct extent_buffer *mid;
869 struct extent_buffer *left = NULL;
870 struct extent_buffer *parent = NULL;
874 int orig_slot = path->slots[level];
879 mid = path->nodes[level];
881 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
882 WARN_ON(btrfs_header_generation(mid) != trans->transid);
884 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
886 if (level < BTRFS_MAX_LEVEL - 1) {
887 parent = path->nodes[level + 1];
888 pslot = path->slots[level + 1];
892 * deal with the case where there is only one pointer in the root
893 * by promoting the node below to a root
896 struct extent_buffer *child;
898 if (btrfs_header_nritems(mid) != 1)
901 /* promote the child to a root */
902 child = btrfs_read_node_slot(mid, 0);
904 ret = PTR_ERR(child);
905 btrfs_handle_fs_error(fs_info, ret, NULL);
909 btrfs_tree_lock(child);
910 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
913 btrfs_tree_unlock(child);
914 free_extent_buffer(child);
918 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
920 rcu_assign_pointer(root->node, child);
922 add_root_to_dirty_list(root);
923 btrfs_tree_unlock(child);
925 path->locks[level] = 0;
926 path->nodes[level] = NULL;
927 btrfs_clean_tree_block(mid);
928 btrfs_tree_unlock(mid);
929 /* once for the path */
930 free_extent_buffer(mid);
932 root_sub_used(root, mid->len);
933 btrfs_free_tree_block(trans, root, mid, 0, 1);
934 /* once for the root ptr */
935 free_extent_buffer_stale(mid);
938 if (btrfs_header_nritems(mid) >
939 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
942 left = btrfs_read_node_slot(parent, pslot - 1);
947 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
948 wret = btrfs_cow_block(trans, root, left,
949 parent, pslot - 1, &left,
950 BTRFS_NESTING_LEFT_COW);
957 right = btrfs_read_node_slot(parent, pslot + 1);
962 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
963 wret = btrfs_cow_block(trans, root, right,
964 parent, pslot + 1, &right,
965 BTRFS_NESTING_RIGHT_COW);
972 /* first, try to make some room in the middle buffer */
974 orig_slot += btrfs_header_nritems(left);
975 wret = push_node_left(trans, left, mid, 1);
981 * then try to empty the right most buffer into the middle
984 wret = push_node_left(trans, mid, right, 1);
985 if (wret < 0 && wret != -ENOSPC)
987 if (btrfs_header_nritems(right) == 0) {
988 btrfs_clean_tree_block(right);
989 btrfs_tree_unlock(right);
990 del_ptr(root, path, level + 1, pslot + 1);
991 root_sub_used(root, right->len);
992 btrfs_free_tree_block(trans, root, right, 0, 1);
993 free_extent_buffer_stale(right);
996 struct btrfs_disk_key right_key;
997 btrfs_node_key(right, &right_key, 0);
998 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
999 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1001 btrfs_set_node_key(parent, &right_key, pslot + 1);
1002 btrfs_mark_buffer_dirty(parent);
1005 if (btrfs_header_nritems(mid) == 1) {
1007 * we're not allowed to leave a node with one item in the
1008 * tree during a delete. A deletion from lower in the tree
1009 * could try to delete the only pointer in this node.
1010 * So, pull some keys from the left.
1011 * There has to be a left pointer at this point because
1012 * otherwise we would have pulled some pointers from the
1017 btrfs_handle_fs_error(fs_info, ret, NULL);
1020 wret = balance_node_right(trans, mid, left);
1026 wret = push_node_left(trans, left, mid, 1);
1032 if (btrfs_header_nritems(mid) == 0) {
1033 btrfs_clean_tree_block(mid);
1034 btrfs_tree_unlock(mid);
1035 del_ptr(root, path, level + 1, pslot);
1036 root_sub_used(root, mid->len);
1037 btrfs_free_tree_block(trans, root, mid, 0, 1);
1038 free_extent_buffer_stale(mid);
1041 /* update the parent key to reflect our changes */
1042 struct btrfs_disk_key mid_key;
1043 btrfs_node_key(mid, &mid_key, 0);
1044 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1045 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1047 btrfs_set_node_key(parent, &mid_key, pslot);
1048 btrfs_mark_buffer_dirty(parent);
1051 /* update the path */
1053 if (btrfs_header_nritems(left) > orig_slot) {
1054 atomic_inc(&left->refs);
1055 /* left was locked after cow */
1056 path->nodes[level] = left;
1057 path->slots[level + 1] -= 1;
1058 path->slots[level] = orig_slot;
1060 btrfs_tree_unlock(mid);
1061 free_extent_buffer(mid);
1064 orig_slot -= btrfs_header_nritems(left);
1065 path->slots[level] = orig_slot;
1068 /* double check we haven't messed things up */
1070 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1074 btrfs_tree_unlock(right);
1075 free_extent_buffer(right);
1078 if (path->nodes[level] != left)
1079 btrfs_tree_unlock(left);
1080 free_extent_buffer(left);
1085 /* Node balancing for insertion. Here we only split or push nodes around
1086 * when they are completely full. This is also done top down, so we
1087 * have to be pessimistic.
1089 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1090 struct btrfs_root *root,
1091 struct btrfs_path *path, int level)
1093 struct btrfs_fs_info *fs_info = root->fs_info;
1094 struct extent_buffer *right = NULL;
1095 struct extent_buffer *mid;
1096 struct extent_buffer *left = NULL;
1097 struct extent_buffer *parent = NULL;
1101 int orig_slot = path->slots[level];
1106 mid = path->nodes[level];
1107 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1109 if (level < BTRFS_MAX_LEVEL - 1) {
1110 parent = path->nodes[level + 1];
1111 pslot = path->slots[level + 1];
1117 left = btrfs_read_node_slot(parent, pslot - 1);
1121 /* first, try to make some room in the middle buffer */
1125 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1127 left_nr = btrfs_header_nritems(left);
1128 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1131 ret = btrfs_cow_block(trans, root, left, parent,
1133 BTRFS_NESTING_LEFT_COW);
1137 wret = push_node_left(trans, left, mid, 0);
1143 struct btrfs_disk_key disk_key;
1144 orig_slot += left_nr;
1145 btrfs_node_key(mid, &disk_key, 0);
1146 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1147 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1149 btrfs_set_node_key(parent, &disk_key, pslot);
1150 btrfs_mark_buffer_dirty(parent);
1151 if (btrfs_header_nritems(left) > orig_slot) {
1152 path->nodes[level] = left;
1153 path->slots[level + 1] -= 1;
1154 path->slots[level] = orig_slot;
1155 btrfs_tree_unlock(mid);
1156 free_extent_buffer(mid);
1159 btrfs_header_nritems(left);
1160 path->slots[level] = orig_slot;
1161 btrfs_tree_unlock(left);
1162 free_extent_buffer(left);
1166 btrfs_tree_unlock(left);
1167 free_extent_buffer(left);
1169 right = btrfs_read_node_slot(parent, pslot + 1);
1174 * then try to empty the right most buffer into the middle
1179 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1181 right_nr = btrfs_header_nritems(right);
1182 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1185 ret = btrfs_cow_block(trans, root, right,
1187 &right, BTRFS_NESTING_RIGHT_COW);
1191 wret = balance_node_right(trans, right, mid);
1197 struct btrfs_disk_key disk_key;
1199 btrfs_node_key(right, &disk_key, 0);
1200 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1201 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1203 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1204 btrfs_mark_buffer_dirty(parent);
1206 if (btrfs_header_nritems(mid) <= orig_slot) {
1207 path->nodes[level] = right;
1208 path->slots[level + 1] += 1;
1209 path->slots[level] = orig_slot -
1210 btrfs_header_nritems(mid);
1211 btrfs_tree_unlock(mid);
1212 free_extent_buffer(mid);
1214 btrfs_tree_unlock(right);
1215 free_extent_buffer(right);
1219 btrfs_tree_unlock(right);
1220 free_extent_buffer(right);
1226 * readahead one full node of leaves, finding things that are close
1227 * to the block in 'slot', and triggering ra on them.
1229 static void reada_for_search(struct btrfs_fs_info *fs_info,
1230 struct btrfs_path *path,
1231 int level, int slot, u64 objectid)
1233 struct extent_buffer *node;
1234 struct btrfs_disk_key disk_key;
1240 struct extent_buffer *eb;
1245 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1248 if (!path->nodes[level])
1251 node = path->nodes[level];
1254 * Since the time between visiting leaves is much shorter than the time
1255 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1256 * much IO at once (possibly random).
1258 if (path->reada == READA_FORWARD_ALWAYS) {
1260 nread_max = node->fs_info->nodesize;
1262 nread_max = SZ_128K;
1267 search = btrfs_node_blockptr(node, slot);
1268 blocksize = fs_info->nodesize;
1269 eb = find_extent_buffer(fs_info, search);
1271 free_extent_buffer(eb);
1277 nritems = btrfs_header_nritems(node);
1281 if (path->reada == READA_BACK) {
1285 } else if (path->reada == READA_FORWARD ||
1286 path->reada == READA_FORWARD_ALWAYS) {
1291 if (path->reada == READA_BACK && objectid) {
1292 btrfs_node_key(node, &disk_key, nr);
1293 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1296 search = btrfs_node_blockptr(node, nr);
1297 if (path->reada == READA_FORWARD_ALWAYS ||
1298 (search <= target && target - search <= 65536) ||
1299 (search > target && search - target <= 65536)) {
1300 btrfs_readahead_node_child(node, nr);
1304 if (nread > nread_max || nscan > 32)
1309 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1311 struct extent_buffer *parent;
1315 parent = path->nodes[level + 1];
1319 nritems = btrfs_header_nritems(parent);
1320 slot = path->slots[level + 1];
1323 btrfs_readahead_node_child(parent, slot - 1);
1324 if (slot + 1 < nritems)
1325 btrfs_readahead_node_child(parent, slot + 1);
1330 * when we walk down the tree, it is usually safe to unlock the higher layers
1331 * in the tree. The exceptions are when our path goes through slot 0, because
1332 * operations on the tree might require changing key pointers higher up in the
1335 * callers might also have set path->keep_locks, which tells this code to keep
1336 * the lock if the path points to the last slot in the block. This is part of
1337 * walking through the tree, and selecting the next slot in the higher block.
1339 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1340 * if lowest_unlock is 1, level 0 won't be unlocked
1342 static noinline void unlock_up(struct btrfs_path *path, int level,
1343 int lowest_unlock, int min_write_lock_level,
1344 int *write_lock_level)
1347 int skip_level = level;
1349 struct extent_buffer *t;
1351 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1352 if (!path->nodes[i])
1354 if (!path->locks[i])
1356 if (!no_skips && path->slots[i] == 0) {
1360 if (!no_skips && path->keep_locks) {
1363 nritems = btrfs_header_nritems(t);
1364 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1369 if (skip_level < i && i >= lowest_unlock)
1373 if (i >= lowest_unlock && i > skip_level) {
1374 btrfs_tree_unlock_rw(t, path->locks[i]);
1376 if (write_lock_level &&
1377 i > min_write_lock_level &&
1378 i <= *write_lock_level) {
1379 *write_lock_level = i - 1;
1386 * helper function for btrfs_search_slot. The goal is to find a block
1387 * in cache without setting the path to blocking. If we find the block
1388 * we return zero and the path is unchanged.
1390 * If we can't find the block, we set the path blocking and do some
1391 * reada. -EAGAIN is returned and the search must be repeated.
1394 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1395 struct extent_buffer **eb_ret, int level, int slot,
1396 const struct btrfs_key *key)
1398 struct btrfs_fs_info *fs_info = root->fs_info;
1401 struct extent_buffer *tmp;
1402 struct btrfs_key first_key;
1406 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1407 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1408 parent_level = btrfs_header_level(*eb_ret);
1409 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1411 tmp = find_extent_buffer(fs_info, blocknr);
1413 if (p->reada == READA_FORWARD_ALWAYS)
1414 reada_for_search(fs_info, p, level, slot, key->objectid);
1416 /* first we do an atomic uptodate check */
1417 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1419 * Do extra check for first_key, eb can be stale due to
1420 * being cached, read from scrub, or have multiple
1421 * parents (shared tree blocks).
1423 if (btrfs_verify_level_key(tmp,
1424 parent_level - 1, &first_key, gen)) {
1425 free_extent_buffer(tmp);
1432 /* now we're allowed to do a blocking uptodate check */
1433 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
1438 free_extent_buffer(tmp);
1439 btrfs_release_path(p);
1444 * reduce lock contention at high levels
1445 * of the btree by dropping locks before
1446 * we read. Don't release the lock on the current
1447 * level because we need to walk this node to figure
1448 * out which blocks to read.
1450 btrfs_unlock_up_safe(p, level + 1);
1452 if (p->reada != READA_NONE)
1453 reada_for_search(fs_info, p, level, slot, key->objectid);
1456 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1457 gen, parent_level - 1, &first_key);
1460 * If the read above didn't mark this buffer up to date,
1461 * it will never end up being up to date. Set ret to EIO now
1462 * and give up so that our caller doesn't loop forever
1465 if (!extent_buffer_uptodate(tmp))
1467 free_extent_buffer(tmp);
1472 btrfs_release_path(p);
1477 * helper function for btrfs_search_slot. This does all of the checks
1478 * for node-level blocks and does any balancing required based on
1481 * If no extra work was required, zero is returned. If we had to
1482 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1486 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1487 struct btrfs_root *root, struct btrfs_path *p,
1488 struct extent_buffer *b, int level, int ins_len,
1489 int *write_lock_level)
1491 struct btrfs_fs_info *fs_info = root->fs_info;
1494 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1495 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1497 if (*write_lock_level < level + 1) {
1498 *write_lock_level = level + 1;
1499 btrfs_release_path(p);
1503 reada_for_balance(p, level);
1504 ret = split_node(trans, root, p, level);
1506 b = p->nodes[level];
1507 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1508 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1510 if (*write_lock_level < level + 1) {
1511 *write_lock_level = level + 1;
1512 btrfs_release_path(p);
1516 reada_for_balance(p, level);
1517 ret = balance_level(trans, root, p, level);
1521 b = p->nodes[level];
1523 btrfs_release_path(p);
1526 BUG_ON(btrfs_header_nritems(b) == 1);
1531 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1532 u64 iobjectid, u64 ioff, u8 key_type,
1533 struct btrfs_key *found_key)
1536 struct btrfs_key key;
1537 struct extent_buffer *eb;
1542 key.type = key_type;
1543 key.objectid = iobjectid;
1546 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1550 eb = path->nodes[0];
1551 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1552 ret = btrfs_next_leaf(fs_root, path);
1555 eb = path->nodes[0];
1558 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1559 if (found_key->type != key.type ||
1560 found_key->objectid != key.objectid)
1566 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1567 struct btrfs_path *p,
1568 int write_lock_level)
1570 struct btrfs_fs_info *fs_info = root->fs_info;
1571 struct extent_buffer *b;
1575 /* We try very hard to do read locks on the root */
1576 root_lock = BTRFS_READ_LOCK;
1578 if (p->search_commit_root) {
1580 * The commit roots are read only so we always do read locks,
1581 * and we always must hold the commit_root_sem when doing
1582 * searches on them, the only exception is send where we don't
1583 * want to block transaction commits for a long time, so
1584 * we need to clone the commit root in order to avoid races
1585 * with transaction commits that create a snapshot of one of
1586 * the roots used by a send operation.
1588 if (p->need_commit_sem) {
1589 down_read(&fs_info->commit_root_sem);
1590 b = btrfs_clone_extent_buffer(root->commit_root);
1591 up_read(&fs_info->commit_root_sem);
1593 return ERR_PTR(-ENOMEM);
1596 b = root->commit_root;
1597 atomic_inc(&b->refs);
1599 level = btrfs_header_level(b);
1601 * Ensure that all callers have set skip_locking when
1602 * p->search_commit_root = 1.
1604 ASSERT(p->skip_locking == 1);
1609 if (p->skip_locking) {
1610 b = btrfs_root_node(root);
1611 level = btrfs_header_level(b);
1616 * If the level is set to maximum, we can skip trying to get the read
1619 if (write_lock_level < BTRFS_MAX_LEVEL) {
1621 * We don't know the level of the root node until we actually
1622 * have it read locked
1624 b = btrfs_read_lock_root_node(root);
1625 level = btrfs_header_level(b);
1626 if (level > write_lock_level)
1629 /* Whoops, must trade for write lock */
1630 btrfs_tree_read_unlock(b);
1631 free_extent_buffer(b);
1634 b = btrfs_lock_root_node(root);
1635 root_lock = BTRFS_WRITE_LOCK;
1637 /* The level might have changed, check again */
1638 level = btrfs_header_level(b);
1641 p->nodes[level] = b;
1642 if (!p->skip_locking)
1643 p->locks[level] = root_lock;
1645 * Callers are responsible for dropping b's references.
1652 * btrfs_search_slot - look for a key in a tree and perform necessary
1653 * modifications to preserve tree invariants.
1655 * @trans: Handle of transaction, used when modifying the tree
1656 * @p: Holds all btree nodes along the search path
1657 * @root: The root node of the tree
1658 * @key: The key we are looking for
1659 * @ins_len: Indicates purpose of search:
1660 * >0 for inserts it's size of item inserted (*)
1662 * 0 for plain searches, not modifying the tree
1664 * (*) If size of item inserted doesn't include
1665 * sizeof(struct btrfs_item), then p->search_for_extension must
1667 * @cow: boolean should CoW operations be performed. Must always be 1
1668 * when modifying the tree.
1670 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1671 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1673 * If @key is found, 0 is returned and you can find the item in the leaf level
1674 * of the path (level 0)
1676 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1677 * points to the slot where it should be inserted
1679 * If an error is encountered while searching the tree a negative error number
1682 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1683 const struct btrfs_key *key, struct btrfs_path *p,
1684 int ins_len, int cow)
1686 struct extent_buffer *b;
1691 int lowest_unlock = 1;
1692 /* everything at write_lock_level or lower must be write locked */
1693 int write_lock_level = 0;
1694 u8 lowest_level = 0;
1695 int min_write_lock_level;
1698 lowest_level = p->lowest_level;
1699 WARN_ON(lowest_level && ins_len > 0);
1700 WARN_ON(p->nodes[0] != NULL);
1701 BUG_ON(!cow && ins_len);
1706 /* when we are removing items, we might have to go up to level
1707 * two as we update tree pointers Make sure we keep write
1708 * for those levels as well
1710 write_lock_level = 2;
1711 } else if (ins_len > 0) {
1713 * for inserting items, make sure we have a write lock on
1714 * level 1 so we can update keys
1716 write_lock_level = 1;
1720 write_lock_level = -1;
1722 if (cow && (p->keep_locks || p->lowest_level))
1723 write_lock_level = BTRFS_MAX_LEVEL;
1725 min_write_lock_level = write_lock_level;
1729 b = btrfs_search_slot_get_root(root, p, write_lock_level);
1738 level = btrfs_header_level(b);
1741 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
1744 * if we don't really need to cow this block
1745 * then we don't want to set the path blocking,
1746 * so we test it here
1748 if (!should_cow_block(trans, root, b))
1752 * must have write locks on this node and the
1755 if (level > write_lock_level ||
1756 (level + 1 > write_lock_level &&
1757 level + 1 < BTRFS_MAX_LEVEL &&
1758 p->nodes[level + 1])) {
1759 write_lock_level = level + 1;
1760 btrfs_release_path(p);
1765 err = btrfs_cow_block(trans, root, b, NULL, 0,
1769 err = btrfs_cow_block(trans, root, b,
1770 p->nodes[level + 1],
1771 p->slots[level + 1], &b,
1779 p->nodes[level] = b;
1781 * Leave path with blocking locks to avoid massive
1782 * lock context switch, this is made on purpose.
1786 * we have a lock on b and as long as we aren't changing
1787 * the tree, there is no way to for the items in b to change.
1788 * It is safe to drop the lock on our parent before we
1789 * go through the expensive btree search on b.
1791 * If we're inserting or deleting (ins_len != 0), then we might
1792 * be changing slot zero, which may require changing the parent.
1793 * So, we can't drop the lock until after we know which slot
1794 * we're operating on.
1796 if (!ins_len && !p->keep_locks) {
1799 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
1800 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
1806 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
1807 * we can safely assume the target key will always be in slot 0
1808 * on lower levels due to the invariants BTRFS' btree provides,
1809 * namely that a btrfs_key_ptr entry always points to the
1810 * lowest key in the child node, thus we can skip searching
1813 if (prev_cmp == 0) {
1817 ret = btrfs_bin_search(b, key, &slot);
1824 p->slots[level] = slot;
1826 * Item key already exists. In this case, if we are
1827 * allowed to insert the item (for example, in dir_item
1828 * case, item key collision is allowed), it will be
1829 * merged with the original item. Only the item size
1830 * grows, no new btrfs item will be added. If
1831 * search_for_extension is not set, ins_len already
1832 * accounts the size btrfs_item, deduct it here so leaf
1833 * space check will be correct.
1835 if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
1836 ASSERT(ins_len >= sizeof(struct btrfs_item));
1837 ins_len -= sizeof(struct btrfs_item);
1840 btrfs_leaf_free_space(b) < ins_len) {
1841 if (write_lock_level < 1) {
1842 write_lock_level = 1;
1843 btrfs_release_path(p);
1847 err = split_leaf(trans, root, key,
1848 p, ins_len, ret == 0);
1856 if (!p->search_for_split)
1857 unlock_up(p, level, lowest_unlock,
1858 min_write_lock_level, NULL);
1861 if (ret && slot > 0) {
1865 p->slots[level] = slot;
1866 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
1874 b = p->nodes[level];
1875 slot = p->slots[level];
1878 * Slot 0 is special, if we change the key we have to update
1879 * the parent pointer which means we must have a write lock on
1882 if (slot == 0 && ins_len && write_lock_level < level + 1) {
1883 write_lock_level = level + 1;
1884 btrfs_release_path(p);
1888 unlock_up(p, level, lowest_unlock, min_write_lock_level,
1891 if (level == lowest_level) {
1897 err = read_block_for_search(root, p, &b, level, slot, key);
1905 if (!p->skip_locking) {
1906 level = btrfs_header_level(b);
1907 if (level <= write_lock_level) {
1909 p->locks[level] = BTRFS_WRITE_LOCK;
1911 btrfs_tree_read_lock(b);
1912 p->locks[level] = BTRFS_READ_LOCK;
1914 p->nodes[level] = b;
1919 if (ret < 0 && !p->skip_release_on_error)
1920 btrfs_release_path(p);
1923 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
1926 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
1927 * current state of the tree together with the operations recorded in the tree
1928 * modification log to search for the key in a previous version of this tree, as
1929 * denoted by the time_seq parameter.
1931 * Naturally, there is no support for insert, delete or cow operations.
1933 * The resulting path and return value will be set up as if we called
1934 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
1936 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1937 struct btrfs_path *p, u64 time_seq)
1939 struct btrfs_fs_info *fs_info = root->fs_info;
1940 struct extent_buffer *b;
1945 int lowest_unlock = 1;
1946 u8 lowest_level = 0;
1948 lowest_level = p->lowest_level;
1949 WARN_ON(p->nodes[0] != NULL);
1951 if (p->search_commit_root) {
1953 return btrfs_search_slot(NULL, root, key, p, 0, 0);
1957 b = btrfs_get_old_root(root, time_seq);
1962 level = btrfs_header_level(b);
1963 p->locks[level] = BTRFS_READ_LOCK;
1968 level = btrfs_header_level(b);
1969 p->nodes[level] = b;
1972 * we have a lock on b and as long as we aren't changing
1973 * the tree, there is no way to for the items in b to change.
1974 * It is safe to drop the lock on our parent before we
1975 * go through the expensive btree search on b.
1977 btrfs_unlock_up_safe(p, level + 1);
1979 ret = btrfs_bin_search(b, key, &slot);
1984 p->slots[level] = slot;
1985 unlock_up(p, level, lowest_unlock, 0, NULL);
1989 if (ret && slot > 0) {
1993 p->slots[level] = slot;
1994 unlock_up(p, level, lowest_unlock, 0, NULL);
1996 if (level == lowest_level) {
2002 err = read_block_for_search(root, p, &b, level, slot, key);
2010 level = btrfs_header_level(b);
2011 btrfs_tree_read_lock(b);
2012 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2017 p->locks[level] = BTRFS_READ_LOCK;
2018 p->nodes[level] = b;
2023 btrfs_release_path(p);
2029 * helper to use instead of search slot if no exact match is needed but
2030 * instead the next or previous item should be returned.
2031 * When find_higher is true, the next higher item is returned, the next lower
2033 * When return_any and find_higher are both true, and no higher item is found,
2034 * return the next lower instead.
2035 * When return_any is true and find_higher is false, and no lower item is found,
2036 * return the next higher instead.
2037 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2040 int btrfs_search_slot_for_read(struct btrfs_root *root,
2041 const struct btrfs_key *key,
2042 struct btrfs_path *p, int find_higher,
2046 struct extent_buffer *leaf;
2049 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2053 * a return value of 1 means the path is at the position where the
2054 * item should be inserted. Normally this is the next bigger item,
2055 * but in case the previous item is the last in a leaf, path points
2056 * to the first free slot in the previous leaf, i.e. at an invalid
2062 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2063 ret = btrfs_next_leaf(root, p);
2069 * no higher item found, return the next
2074 btrfs_release_path(p);
2078 if (p->slots[0] == 0) {
2079 ret = btrfs_prev_leaf(root, p);
2084 if (p->slots[0] == btrfs_header_nritems(leaf))
2091 * no lower item found, return the next
2096 btrfs_release_path(p);
2106 * adjust the pointers going up the tree, starting at level
2107 * making sure the right key of each node is points to 'key'.
2108 * This is used after shifting pointers to the left, so it stops
2109 * fixing up pointers when a given leaf/node is not in slot 0 of the
2113 static void fixup_low_keys(struct btrfs_path *path,
2114 struct btrfs_disk_key *key, int level)
2117 struct extent_buffer *t;
2120 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2121 int tslot = path->slots[i];
2123 if (!path->nodes[i])
2126 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2127 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2129 btrfs_set_node_key(t, key, tslot);
2130 btrfs_mark_buffer_dirty(path->nodes[i]);
2139 * This function isn't completely safe. It's the caller's responsibility
2140 * that the new key won't break the order
2142 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2143 struct btrfs_path *path,
2144 const struct btrfs_key *new_key)
2146 struct btrfs_disk_key disk_key;
2147 struct extent_buffer *eb;
2150 eb = path->nodes[0];
2151 slot = path->slots[0];
2153 btrfs_item_key(eb, &disk_key, slot - 1);
2154 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2156 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2157 slot, btrfs_disk_key_objectid(&disk_key),
2158 btrfs_disk_key_type(&disk_key),
2159 btrfs_disk_key_offset(&disk_key),
2160 new_key->objectid, new_key->type,
2162 btrfs_print_leaf(eb);
2166 if (slot < btrfs_header_nritems(eb) - 1) {
2167 btrfs_item_key(eb, &disk_key, slot + 1);
2168 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2170 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2171 slot, btrfs_disk_key_objectid(&disk_key),
2172 btrfs_disk_key_type(&disk_key),
2173 btrfs_disk_key_offset(&disk_key),
2174 new_key->objectid, new_key->type,
2176 btrfs_print_leaf(eb);
2181 btrfs_cpu_key_to_disk(&disk_key, new_key);
2182 btrfs_set_item_key(eb, &disk_key, slot);
2183 btrfs_mark_buffer_dirty(eb);
2185 fixup_low_keys(path, &disk_key, 1);
2189 * Check key order of two sibling extent buffers.
2191 * Return true if something is wrong.
2192 * Return false if everything is fine.
2194 * Tree-checker only works inside one tree block, thus the following
2195 * corruption can not be detected by tree-checker:
2197 * Leaf @left | Leaf @right
2198 * --------------------------------------------------------------
2199 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2201 * Key f6 in leaf @left itself is valid, but not valid when the next
2202 * key in leaf @right is 7.
2203 * This can only be checked at tree block merge time.
2204 * And since tree checker has ensured all key order in each tree block
2205 * is correct, we only need to bother the last key of @left and the first
2208 static bool check_sibling_keys(struct extent_buffer *left,
2209 struct extent_buffer *right)
2211 struct btrfs_key left_last;
2212 struct btrfs_key right_first;
2213 int level = btrfs_header_level(left);
2214 int nr_left = btrfs_header_nritems(left);
2215 int nr_right = btrfs_header_nritems(right);
2217 /* No key to check in one of the tree blocks */
2218 if (!nr_left || !nr_right)
2222 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2223 btrfs_node_key_to_cpu(right, &right_first, 0);
2225 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2226 btrfs_item_key_to_cpu(right, &right_first, 0);
2229 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2230 btrfs_crit(left->fs_info,
2231 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2232 left_last.objectid, left_last.type,
2233 left_last.offset, right_first.objectid,
2234 right_first.type, right_first.offset);
2241 * try to push data from one node into the next node left in the
2244 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2245 * error, and > 0 if there was no room in the left hand block.
2247 static int push_node_left(struct btrfs_trans_handle *trans,
2248 struct extent_buffer *dst,
2249 struct extent_buffer *src, int empty)
2251 struct btrfs_fs_info *fs_info = trans->fs_info;
2257 src_nritems = btrfs_header_nritems(src);
2258 dst_nritems = btrfs_header_nritems(dst);
2259 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2260 WARN_ON(btrfs_header_generation(src) != trans->transid);
2261 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2263 if (!empty && src_nritems <= 8)
2266 if (push_items <= 0)
2270 push_items = min(src_nritems, push_items);
2271 if (push_items < src_nritems) {
2272 /* leave at least 8 pointers in the node if
2273 * we aren't going to empty it
2275 if (src_nritems - push_items < 8) {
2276 if (push_items <= 8)
2282 push_items = min(src_nritems - 8, push_items);
2284 /* dst is the left eb, src is the middle eb */
2285 if (check_sibling_keys(dst, src)) {
2287 btrfs_abort_transaction(trans, ret);
2290 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2292 btrfs_abort_transaction(trans, ret);
2295 copy_extent_buffer(dst, src,
2296 btrfs_node_key_ptr_offset(dst_nritems),
2297 btrfs_node_key_ptr_offset(0),
2298 push_items * sizeof(struct btrfs_key_ptr));
2300 if (push_items < src_nritems) {
2302 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2303 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2305 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2306 btrfs_node_key_ptr_offset(push_items),
2307 (src_nritems - push_items) *
2308 sizeof(struct btrfs_key_ptr));
2310 btrfs_set_header_nritems(src, src_nritems - push_items);
2311 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2312 btrfs_mark_buffer_dirty(src);
2313 btrfs_mark_buffer_dirty(dst);
2319 * try to push data from one node into the next node right in the
2322 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2323 * error, and > 0 if there was no room in the right hand block.
2325 * this will only push up to 1/2 the contents of the left node over
2327 static int balance_node_right(struct btrfs_trans_handle *trans,
2328 struct extent_buffer *dst,
2329 struct extent_buffer *src)
2331 struct btrfs_fs_info *fs_info = trans->fs_info;
2338 WARN_ON(btrfs_header_generation(src) != trans->transid);
2339 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2341 src_nritems = btrfs_header_nritems(src);
2342 dst_nritems = btrfs_header_nritems(dst);
2343 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2344 if (push_items <= 0)
2347 if (src_nritems < 4)
2350 max_push = src_nritems / 2 + 1;
2351 /* don't try to empty the node */
2352 if (max_push >= src_nritems)
2355 if (max_push < push_items)
2356 push_items = max_push;
2358 /* dst is the right eb, src is the middle eb */
2359 if (check_sibling_keys(src, dst)) {
2361 btrfs_abort_transaction(trans, ret);
2364 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2366 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2367 btrfs_node_key_ptr_offset(0),
2369 sizeof(struct btrfs_key_ptr));
2371 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2374 btrfs_abort_transaction(trans, ret);
2377 copy_extent_buffer(dst, src,
2378 btrfs_node_key_ptr_offset(0),
2379 btrfs_node_key_ptr_offset(src_nritems - push_items),
2380 push_items * sizeof(struct btrfs_key_ptr));
2382 btrfs_set_header_nritems(src, src_nritems - push_items);
2383 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2385 btrfs_mark_buffer_dirty(src);
2386 btrfs_mark_buffer_dirty(dst);
2392 * helper function to insert a new root level in the tree.
2393 * A new node is allocated, and a single item is inserted to
2394 * point to the existing root
2396 * returns zero on success or < 0 on failure.
2398 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2399 struct btrfs_root *root,
2400 struct btrfs_path *path, int level)
2402 struct btrfs_fs_info *fs_info = root->fs_info;
2404 struct extent_buffer *lower;
2405 struct extent_buffer *c;
2406 struct extent_buffer *old;
2407 struct btrfs_disk_key lower_key;
2410 BUG_ON(path->nodes[level]);
2411 BUG_ON(path->nodes[level-1] != root->node);
2413 lower = path->nodes[level-1];
2415 btrfs_item_key(lower, &lower_key, 0);
2417 btrfs_node_key(lower, &lower_key, 0);
2419 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2420 &lower_key, level, root->node->start, 0,
2421 BTRFS_NESTING_NEW_ROOT);
2425 root_add_used(root, fs_info->nodesize);
2427 btrfs_set_header_nritems(c, 1);
2428 btrfs_set_node_key(c, &lower_key, 0);
2429 btrfs_set_node_blockptr(c, 0, lower->start);
2430 lower_gen = btrfs_header_generation(lower);
2431 WARN_ON(lower_gen != trans->transid);
2433 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2435 btrfs_mark_buffer_dirty(c);
2438 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2440 rcu_assign_pointer(root->node, c);
2442 /* the super has an extra ref to root->node */
2443 free_extent_buffer(old);
2445 add_root_to_dirty_list(root);
2446 atomic_inc(&c->refs);
2447 path->nodes[level] = c;
2448 path->locks[level] = BTRFS_WRITE_LOCK;
2449 path->slots[level] = 0;
2454 * worker function to insert a single pointer in a node.
2455 * the node should have enough room for the pointer already
2457 * slot and level indicate where you want the key to go, and
2458 * blocknr is the block the key points to.
2460 static void insert_ptr(struct btrfs_trans_handle *trans,
2461 struct btrfs_path *path,
2462 struct btrfs_disk_key *key, u64 bytenr,
2463 int slot, int level)
2465 struct extent_buffer *lower;
2469 BUG_ON(!path->nodes[level]);
2470 btrfs_assert_tree_locked(path->nodes[level]);
2471 lower = path->nodes[level];
2472 nritems = btrfs_header_nritems(lower);
2473 BUG_ON(slot > nritems);
2474 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2475 if (slot != nritems) {
2477 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2478 slot, nritems - slot);
2481 memmove_extent_buffer(lower,
2482 btrfs_node_key_ptr_offset(slot + 1),
2483 btrfs_node_key_ptr_offset(slot),
2484 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2487 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2488 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2491 btrfs_set_node_key(lower, key, slot);
2492 btrfs_set_node_blockptr(lower, slot, bytenr);
2493 WARN_ON(trans->transid == 0);
2494 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2495 btrfs_set_header_nritems(lower, nritems + 1);
2496 btrfs_mark_buffer_dirty(lower);
2500 * split the node at the specified level in path in two.
2501 * The path is corrected to point to the appropriate node after the split
2503 * Before splitting this tries to make some room in the node by pushing
2504 * left and right, if either one works, it returns right away.
2506 * returns 0 on success and < 0 on failure
2508 static noinline int split_node(struct btrfs_trans_handle *trans,
2509 struct btrfs_root *root,
2510 struct btrfs_path *path, int level)
2512 struct btrfs_fs_info *fs_info = root->fs_info;
2513 struct extent_buffer *c;
2514 struct extent_buffer *split;
2515 struct btrfs_disk_key disk_key;
2520 c = path->nodes[level];
2521 WARN_ON(btrfs_header_generation(c) != trans->transid);
2522 if (c == root->node) {
2524 * trying to split the root, lets make a new one
2526 * tree mod log: We don't log_removal old root in
2527 * insert_new_root, because that root buffer will be kept as a
2528 * normal node. We are going to log removal of half of the
2529 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2530 * holding a tree lock on the buffer, which is why we cannot
2531 * race with other tree_mod_log users.
2533 ret = insert_new_root(trans, root, path, level + 1);
2537 ret = push_nodes_for_insert(trans, root, path, level);
2538 c = path->nodes[level];
2539 if (!ret && btrfs_header_nritems(c) <
2540 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2546 c_nritems = btrfs_header_nritems(c);
2547 mid = (c_nritems + 1) / 2;
2548 btrfs_node_key(c, &disk_key, mid);
2550 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2551 &disk_key, level, c->start, 0,
2552 BTRFS_NESTING_SPLIT);
2554 return PTR_ERR(split);
2556 root_add_used(root, fs_info->nodesize);
2557 ASSERT(btrfs_header_level(c) == level);
2559 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2561 btrfs_abort_transaction(trans, ret);
2564 copy_extent_buffer(split, c,
2565 btrfs_node_key_ptr_offset(0),
2566 btrfs_node_key_ptr_offset(mid),
2567 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2568 btrfs_set_header_nritems(split, c_nritems - mid);
2569 btrfs_set_header_nritems(c, mid);
2571 btrfs_mark_buffer_dirty(c);
2572 btrfs_mark_buffer_dirty(split);
2574 insert_ptr(trans, path, &disk_key, split->start,
2575 path->slots[level + 1] + 1, level + 1);
2577 if (path->slots[level] >= mid) {
2578 path->slots[level] -= mid;
2579 btrfs_tree_unlock(c);
2580 free_extent_buffer(c);
2581 path->nodes[level] = split;
2582 path->slots[level + 1] += 1;
2584 btrfs_tree_unlock(split);
2585 free_extent_buffer(split);
2591 * how many bytes are required to store the items in a leaf. start
2592 * and nr indicate which items in the leaf to check. This totals up the
2593 * space used both by the item structs and the item data
2595 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2597 struct btrfs_item *start_item;
2598 struct btrfs_item *end_item;
2600 int nritems = btrfs_header_nritems(l);
2601 int end = min(nritems, start + nr) - 1;
2605 start_item = btrfs_item_nr(start);
2606 end_item = btrfs_item_nr(end);
2607 data_len = btrfs_item_offset(l, start_item) +
2608 btrfs_item_size(l, start_item);
2609 data_len = data_len - btrfs_item_offset(l, end_item);
2610 data_len += sizeof(struct btrfs_item) * nr;
2611 WARN_ON(data_len < 0);
2616 * The space between the end of the leaf items and
2617 * the start of the leaf data. IOW, how much room
2618 * the leaf has left for both items and data
2620 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2622 struct btrfs_fs_info *fs_info = leaf->fs_info;
2623 int nritems = btrfs_header_nritems(leaf);
2626 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2629 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2631 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2632 leaf_space_used(leaf, 0, nritems), nritems);
2638 * min slot controls the lowest index we're willing to push to the
2639 * right. We'll push up to and including min_slot, but no lower
2641 static noinline int __push_leaf_right(struct btrfs_path *path,
2642 int data_size, int empty,
2643 struct extent_buffer *right,
2644 int free_space, u32 left_nritems,
2647 struct btrfs_fs_info *fs_info = right->fs_info;
2648 struct extent_buffer *left = path->nodes[0];
2649 struct extent_buffer *upper = path->nodes[1];
2650 struct btrfs_map_token token;
2651 struct btrfs_disk_key disk_key;
2656 struct btrfs_item *item;
2665 nr = max_t(u32, 1, min_slot);
2667 if (path->slots[0] >= left_nritems)
2668 push_space += data_size;
2670 slot = path->slots[1];
2671 i = left_nritems - 1;
2673 item = btrfs_item_nr(i);
2675 if (!empty && push_items > 0) {
2676 if (path->slots[0] > i)
2678 if (path->slots[0] == i) {
2679 int space = btrfs_leaf_free_space(left);
2681 if (space + push_space * 2 > free_space)
2686 if (path->slots[0] == i)
2687 push_space += data_size;
2689 this_item_size = btrfs_item_size(left, item);
2690 if (this_item_size + sizeof(*item) + push_space > free_space)
2694 push_space += this_item_size + sizeof(*item);
2700 if (push_items == 0)
2703 WARN_ON(!empty && push_items == left_nritems);
2705 /* push left to right */
2706 right_nritems = btrfs_header_nritems(right);
2708 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2709 push_space -= leaf_data_end(left);
2711 /* make room in the right data area */
2712 data_end = leaf_data_end(right);
2713 memmove_extent_buffer(right,
2714 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
2715 BTRFS_LEAF_DATA_OFFSET + data_end,
2716 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
2718 /* copy from the left data area */
2719 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
2720 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2721 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
2724 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2725 btrfs_item_nr_offset(0),
2726 right_nritems * sizeof(struct btrfs_item));
2728 /* copy the items from left to right */
2729 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2730 btrfs_item_nr_offset(left_nritems - push_items),
2731 push_items * sizeof(struct btrfs_item));
2733 /* update the item pointers */
2734 btrfs_init_map_token(&token, right);
2735 right_nritems += push_items;
2736 btrfs_set_header_nritems(right, right_nritems);
2737 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2738 for (i = 0; i < right_nritems; i++) {
2739 item = btrfs_item_nr(i);
2740 push_space -= btrfs_token_item_size(&token, item);
2741 btrfs_set_token_item_offset(&token, item, push_space);
2744 left_nritems -= push_items;
2745 btrfs_set_header_nritems(left, left_nritems);
2748 btrfs_mark_buffer_dirty(left);
2750 btrfs_clean_tree_block(left);
2752 btrfs_mark_buffer_dirty(right);
2754 btrfs_item_key(right, &disk_key, 0);
2755 btrfs_set_node_key(upper, &disk_key, slot + 1);
2756 btrfs_mark_buffer_dirty(upper);
2758 /* then fixup the leaf pointer in the path */
2759 if (path->slots[0] >= left_nritems) {
2760 path->slots[0] -= left_nritems;
2761 if (btrfs_header_nritems(path->nodes[0]) == 0)
2762 btrfs_clean_tree_block(path->nodes[0]);
2763 btrfs_tree_unlock(path->nodes[0]);
2764 free_extent_buffer(path->nodes[0]);
2765 path->nodes[0] = right;
2766 path->slots[1] += 1;
2768 btrfs_tree_unlock(right);
2769 free_extent_buffer(right);
2774 btrfs_tree_unlock(right);
2775 free_extent_buffer(right);
2780 * push some data in the path leaf to the right, trying to free up at
2781 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2783 * returns 1 if the push failed because the other node didn't have enough
2784 * room, 0 if everything worked out and < 0 if there were major errors.
2786 * this will push starting from min_slot to the end of the leaf. It won't
2787 * push any slot lower than min_slot
2789 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2790 *root, struct btrfs_path *path,
2791 int min_data_size, int data_size,
2792 int empty, u32 min_slot)
2794 struct extent_buffer *left = path->nodes[0];
2795 struct extent_buffer *right;
2796 struct extent_buffer *upper;
2802 if (!path->nodes[1])
2805 slot = path->slots[1];
2806 upper = path->nodes[1];
2807 if (slot >= btrfs_header_nritems(upper) - 1)
2810 btrfs_assert_tree_locked(path->nodes[1]);
2812 right = btrfs_read_node_slot(upper, slot + 1);
2814 * slot + 1 is not valid or we fail to read the right node,
2815 * no big deal, just return.
2820 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
2822 free_space = btrfs_leaf_free_space(right);
2823 if (free_space < data_size)
2826 /* cow and double check */
2827 ret = btrfs_cow_block(trans, root, right, upper,
2828 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
2832 free_space = btrfs_leaf_free_space(right);
2833 if (free_space < data_size)
2836 left_nritems = btrfs_header_nritems(left);
2837 if (left_nritems == 0)
2840 if (check_sibling_keys(left, right)) {
2842 btrfs_tree_unlock(right);
2843 free_extent_buffer(right);
2846 if (path->slots[0] == left_nritems && !empty) {
2847 /* Key greater than all keys in the leaf, right neighbor has
2848 * enough room for it and we're not emptying our leaf to delete
2849 * it, therefore use right neighbor to insert the new item and
2850 * no need to touch/dirty our left leaf. */
2851 btrfs_tree_unlock(left);
2852 free_extent_buffer(left);
2853 path->nodes[0] = right;
2859 return __push_leaf_right(path, min_data_size, empty,
2860 right, free_space, left_nritems, min_slot);
2862 btrfs_tree_unlock(right);
2863 free_extent_buffer(right);
2868 * push some data in the path leaf to the left, trying to free up at
2869 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2871 * max_slot can put a limit on how far into the leaf we'll push items. The
2872 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2875 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
2876 int empty, struct extent_buffer *left,
2877 int free_space, u32 right_nritems,
2880 struct btrfs_fs_info *fs_info = left->fs_info;
2881 struct btrfs_disk_key disk_key;
2882 struct extent_buffer *right = path->nodes[0];
2886 struct btrfs_item *item;
2887 u32 old_left_nritems;
2891 u32 old_left_item_size;
2892 struct btrfs_map_token token;
2895 nr = min(right_nritems, max_slot);
2897 nr = min(right_nritems - 1, max_slot);
2899 for (i = 0; i < nr; i++) {
2900 item = btrfs_item_nr(i);
2902 if (!empty && push_items > 0) {
2903 if (path->slots[0] < i)
2905 if (path->slots[0] == i) {
2906 int space = btrfs_leaf_free_space(right);
2908 if (space + push_space * 2 > free_space)
2913 if (path->slots[0] == i)
2914 push_space += data_size;
2916 this_item_size = btrfs_item_size(right, item);
2917 if (this_item_size + sizeof(*item) + push_space > free_space)
2921 push_space += this_item_size + sizeof(*item);
2924 if (push_items == 0) {
2928 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
2930 /* push data from right to left */
2931 copy_extent_buffer(left, right,
2932 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2933 btrfs_item_nr_offset(0),
2934 push_items * sizeof(struct btrfs_item));
2936 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
2937 btrfs_item_offset_nr(right, push_items - 1);
2939 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
2940 leaf_data_end(left) - push_space,
2941 BTRFS_LEAF_DATA_OFFSET +
2942 btrfs_item_offset_nr(right, push_items - 1),
2944 old_left_nritems = btrfs_header_nritems(left);
2945 BUG_ON(old_left_nritems <= 0);
2947 btrfs_init_map_token(&token, left);
2948 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2949 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2952 item = btrfs_item_nr(i);
2954 ioff = btrfs_token_item_offset(&token, item);
2955 btrfs_set_token_item_offset(&token, item,
2956 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
2958 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2960 /* fixup right node */
2961 if (push_items > right_nritems)
2962 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
2965 if (push_items < right_nritems) {
2966 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2967 leaf_data_end(right);
2968 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
2969 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2970 BTRFS_LEAF_DATA_OFFSET +
2971 leaf_data_end(right), push_space);
2973 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2974 btrfs_item_nr_offset(push_items),
2975 (btrfs_header_nritems(right) - push_items) *
2976 sizeof(struct btrfs_item));
2979 btrfs_init_map_token(&token, right);
2980 right_nritems -= push_items;
2981 btrfs_set_header_nritems(right, right_nritems);
2982 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2983 for (i = 0; i < right_nritems; i++) {
2984 item = btrfs_item_nr(i);
2986 push_space = push_space - btrfs_token_item_size(&token, item);
2987 btrfs_set_token_item_offset(&token, item, push_space);
2990 btrfs_mark_buffer_dirty(left);
2992 btrfs_mark_buffer_dirty(right);
2994 btrfs_clean_tree_block(right);
2996 btrfs_item_key(right, &disk_key, 0);
2997 fixup_low_keys(path, &disk_key, 1);
2999 /* then fixup the leaf pointer in the path */
3000 if (path->slots[0] < push_items) {
3001 path->slots[0] += old_left_nritems;
3002 btrfs_tree_unlock(path->nodes[0]);
3003 free_extent_buffer(path->nodes[0]);
3004 path->nodes[0] = left;
3005 path->slots[1] -= 1;
3007 btrfs_tree_unlock(left);
3008 free_extent_buffer(left);
3009 path->slots[0] -= push_items;
3011 BUG_ON(path->slots[0] < 0);
3014 btrfs_tree_unlock(left);
3015 free_extent_buffer(left);
3020 * push some data in the path leaf to the left, trying to free up at
3021 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3023 * max_slot can put a limit on how far into the leaf we'll push items. The
3024 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3027 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3028 *root, struct btrfs_path *path, int min_data_size,
3029 int data_size, int empty, u32 max_slot)
3031 struct extent_buffer *right = path->nodes[0];
3032 struct extent_buffer *left;
3038 slot = path->slots[1];
3041 if (!path->nodes[1])
3044 right_nritems = btrfs_header_nritems(right);
3045 if (right_nritems == 0)
3048 btrfs_assert_tree_locked(path->nodes[1]);
3050 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3052 * slot - 1 is not valid or we fail to read the left node,
3053 * no big deal, just return.
3058 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3060 free_space = btrfs_leaf_free_space(left);
3061 if (free_space < data_size) {
3066 /* cow and double check */
3067 ret = btrfs_cow_block(trans, root, left,
3068 path->nodes[1], slot - 1, &left,
3069 BTRFS_NESTING_LEFT_COW);
3071 /* we hit -ENOSPC, but it isn't fatal here */
3077 free_space = btrfs_leaf_free_space(left);
3078 if (free_space < data_size) {
3083 if (check_sibling_keys(left, right)) {
3087 return __push_leaf_left(path, min_data_size,
3088 empty, left, free_space, right_nritems,
3091 btrfs_tree_unlock(left);
3092 free_extent_buffer(left);
3097 * split the path's leaf in two, making sure there is at least data_size
3098 * available for the resulting leaf level of the path.
3100 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3101 struct btrfs_path *path,
3102 struct extent_buffer *l,
3103 struct extent_buffer *right,
3104 int slot, int mid, int nritems)
3106 struct btrfs_fs_info *fs_info = trans->fs_info;
3110 struct btrfs_disk_key disk_key;
3111 struct btrfs_map_token token;
3113 nritems = nritems - mid;
3114 btrfs_set_header_nritems(right, nritems);
3115 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
3117 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3118 btrfs_item_nr_offset(mid),
3119 nritems * sizeof(struct btrfs_item));
3121 copy_extent_buffer(right, l,
3122 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3123 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3124 leaf_data_end(l), data_copy_size);
3126 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
3128 btrfs_init_map_token(&token, right);
3129 for (i = 0; i < nritems; i++) {
3130 struct btrfs_item *item = btrfs_item_nr(i);
3133 ioff = btrfs_token_item_offset(&token, item);
3134 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
3137 btrfs_set_header_nritems(l, mid);
3138 btrfs_item_key(right, &disk_key, 0);
3139 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3141 btrfs_mark_buffer_dirty(right);
3142 btrfs_mark_buffer_dirty(l);
3143 BUG_ON(path->slots[0] != slot);
3146 btrfs_tree_unlock(path->nodes[0]);
3147 free_extent_buffer(path->nodes[0]);
3148 path->nodes[0] = right;
3149 path->slots[0] -= mid;
3150 path->slots[1] += 1;
3152 btrfs_tree_unlock(right);
3153 free_extent_buffer(right);
3156 BUG_ON(path->slots[0] < 0);
3160 * double splits happen when we need to insert a big item in the middle
3161 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3162 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3165 * We avoid this by trying to push the items on either side of our target
3166 * into the adjacent leaves. If all goes well we can avoid the double split
3169 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3170 struct btrfs_root *root,
3171 struct btrfs_path *path,
3178 int space_needed = data_size;
3180 slot = path->slots[0];
3181 if (slot < btrfs_header_nritems(path->nodes[0]))
3182 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3185 * try to push all the items after our slot into the
3188 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3195 nritems = btrfs_header_nritems(path->nodes[0]);
3197 * our goal is to get our slot at the start or end of a leaf. If
3198 * we've done so we're done
3200 if (path->slots[0] == 0 || path->slots[0] == nritems)
3203 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3206 /* try to push all the items before our slot into the next leaf */
3207 slot = path->slots[0];
3208 space_needed = data_size;
3210 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3211 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3224 * split the path's leaf in two, making sure there is at least data_size
3225 * available for the resulting leaf level of the path.
3227 * returns 0 if all went well and < 0 on failure.
3229 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3230 struct btrfs_root *root,
3231 const struct btrfs_key *ins_key,
3232 struct btrfs_path *path, int data_size,
3235 struct btrfs_disk_key disk_key;
3236 struct extent_buffer *l;
3240 struct extent_buffer *right;
3241 struct btrfs_fs_info *fs_info = root->fs_info;
3245 int num_doubles = 0;
3246 int tried_avoid_double = 0;
3249 slot = path->slots[0];
3250 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3251 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3254 /* first try to make some room by pushing left and right */
3255 if (data_size && path->nodes[1]) {
3256 int space_needed = data_size;
3258 if (slot < btrfs_header_nritems(l))
3259 space_needed -= btrfs_leaf_free_space(l);
3261 wret = push_leaf_right(trans, root, path, space_needed,
3262 space_needed, 0, 0);
3266 space_needed = data_size;
3268 space_needed -= btrfs_leaf_free_space(l);
3269 wret = push_leaf_left(trans, root, path, space_needed,
3270 space_needed, 0, (u32)-1);
3276 /* did the pushes work? */
3277 if (btrfs_leaf_free_space(l) >= data_size)
3281 if (!path->nodes[1]) {
3282 ret = insert_new_root(trans, root, path, 1);
3289 slot = path->slots[0];
3290 nritems = btrfs_header_nritems(l);
3291 mid = (nritems + 1) / 2;
3295 leaf_space_used(l, mid, nritems - mid) + data_size >
3296 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3297 if (slot >= nritems) {
3301 if (mid != nritems &&
3302 leaf_space_used(l, mid, nritems - mid) +
3303 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3304 if (data_size && !tried_avoid_double)
3305 goto push_for_double;
3311 if (leaf_space_used(l, 0, mid) + data_size >
3312 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3313 if (!extend && data_size && slot == 0) {
3315 } else if ((extend || !data_size) && slot == 0) {
3319 if (mid != nritems &&
3320 leaf_space_used(l, mid, nritems - mid) +
3321 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3322 if (data_size && !tried_avoid_double)
3323 goto push_for_double;
3331 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3333 btrfs_item_key(l, &disk_key, mid);
3336 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3337 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3338 * subclasses, which is 8 at the time of this patch, and we've maxed it
3339 * out. In the future we could add a
3340 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3341 * use BTRFS_NESTING_NEW_ROOT.
3343 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3344 &disk_key, 0, l->start, 0,
3345 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3346 BTRFS_NESTING_SPLIT);
3348 return PTR_ERR(right);
3350 root_add_used(root, fs_info->nodesize);
3354 btrfs_set_header_nritems(right, 0);
3355 insert_ptr(trans, path, &disk_key,
3356 right->start, path->slots[1] + 1, 1);
3357 btrfs_tree_unlock(path->nodes[0]);
3358 free_extent_buffer(path->nodes[0]);
3359 path->nodes[0] = right;
3361 path->slots[1] += 1;
3363 btrfs_set_header_nritems(right, 0);
3364 insert_ptr(trans, path, &disk_key,
3365 right->start, path->slots[1], 1);
3366 btrfs_tree_unlock(path->nodes[0]);
3367 free_extent_buffer(path->nodes[0]);
3368 path->nodes[0] = right;
3370 if (path->slots[1] == 0)
3371 fixup_low_keys(path, &disk_key, 1);
3374 * We create a new leaf 'right' for the required ins_len and
3375 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3376 * the content of ins_len to 'right'.
3381 copy_for_split(trans, path, l, right, slot, mid, nritems);
3384 BUG_ON(num_doubles != 0);
3392 push_for_double_split(trans, root, path, data_size);
3393 tried_avoid_double = 1;
3394 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3399 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3400 struct btrfs_root *root,
3401 struct btrfs_path *path, int ins_len)
3403 struct btrfs_key key;
3404 struct extent_buffer *leaf;
3405 struct btrfs_file_extent_item *fi;
3410 leaf = path->nodes[0];
3411 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3413 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3414 key.type != BTRFS_EXTENT_CSUM_KEY);
3416 if (btrfs_leaf_free_space(leaf) >= ins_len)
3419 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3420 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3421 fi = btrfs_item_ptr(leaf, path->slots[0],
3422 struct btrfs_file_extent_item);
3423 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3425 btrfs_release_path(path);
3427 path->keep_locks = 1;
3428 path->search_for_split = 1;
3429 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3430 path->search_for_split = 0;
3437 leaf = path->nodes[0];
3438 /* if our item isn't there, return now */
3439 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3442 /* the leaf has changed, it now has room. return now */
3443 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3446 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3447 fi = btrfs_item_ptr(leaf, path->slots[0],
3448 struct btrfs_file_extent_item);
3449 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3453 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3457 path->keep_locks = 0;
3458 btrfs_unlock_up_safe(path, 1);
3461 path->keep_locks = 0;
3465 static noinline int split_item(struct btrfs_path *path,
3466 const struct btrfs_key *new_key,
3467 unsigned long split_offset)
3469 struct extent_buffer *leaf;
3470 struct btrfs_item *item;
3471 struct btrfs_item *new_item;
3477 struct btrfs_disk_key disk_key;
3479 leaf = path->nodes[0];
3480 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3482 item = btrfs_item_nr(path->slots[0]);
3483 orig_offset = btrfs_item_offset(leaf, item);
3484 item_size = btrfs_item_size(leaf, item);
3486 buf = kmalloc(item_size, GFP_NOFS);
3490 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3491 path->slots[0]), item_size);
3493 slot = path->slots[0] + 1;
3494 nritems = btrfs_header_nritems(leaf);
3495 if (slot != nritems) {
3496 /* shift the items */
3497 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3498 btrfs_item_nr_offset(slot),
3499 (nritems - slot) * sizeof(struct btrfs_item));
3502 btrfs_cpu_key_to_disk(&disk_key, new_key);
3503 btrfs_set_item_key(leaf, &disk_key, slot);
3505 new_item = btrfs_item_nr(slot);
3507 btrfs_set_item_offset(leaf, new_item, orig_offset);
3508 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3510 btrfs_set_item_offset(leaf, item,
3511 orig_offset + item_size - split_offset);
3512 btrfs_set_item_size(leaf, item, split_offset);
3514 btrfs_set_header_nritems(leaf, nritems + 1);
3516 /* write the data for the start of the original item */
3517 write_extent_buffer(leaf, buf,
3518 btrfs_item_ptr_offset(leaf, path->slots[0]),
3521 /* write the data for the new item */
3522 write_extent_buffer(leaf, buf + split_offset,
3523 btrfs_item_ptr_offset(leaf, slot),
3524 item_size - split_offset);
3525 btrfs_mark_buffer_dirty(leaf);
3527 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3533 * This function splits a single item into two items,
3534 * giving 'new_key' to the new item and splitting the
3535 * old one at split_offset (from the start of the item).
3537 * The path may be released by this operation. After
3538 * the split, the path is pointing to the old item. The
3539 * new item is going to be in the same node as the old one.
3541 * Note, the item being split must be smaller enough to live alone on
3542 * a tree block with room for one extra struct btrfs_item
3544 * This allows us to split the item in place, keeping a lock on the
3545 * leaf the entire time.
3547 int btrfs_split_item(struct btrfs_trans_handle *trans,
3548 struct btrfs_root *root,
3549 struct btrfs_path *path,
3550 const struct btrfs_key *new_key,
3551 unsigned long split_offset)
3554 ret = setup_leaf_for_split(trans, root, path,
3555 sizeof(struct btrfs_item));
3559 ret = split_item(path, new_key, split_offset);
3564 * This function duplicate a item, giving 'new_key' to the new item.
3565 * It guarantees both items live in the same tree leaf and the new item
3566 * is contiguous with the original item.
3568 * This allows us to split file extent in place, keeping a lock on the
3569 * leaf the entire time.
3571 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3572 struct btrfs_root *root,
3573 struct btrfs_path *path,
3574 const struct btrfs_key *new_key)
3576 struct extent_buffer *leaf;
3580 leaf = path->nodes[0];
3581 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3582 ret = setup_leaf_for_split(trans, root, path,
3583 item_size + sizeof(struct btrfs_item));
3588 setup_items_for_insert(root, path, new_key, &item_size, 1);
3589 leaf = path->nodes[0];
3590 memcpy_extent_buffer(leaf,
3591 btrfs_item_ptr_offset(leaf, path->slots[0]),
3592 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3598 * make the item pointed to by the path smaller. new_size indicates
3599 * how small to make it, and from_end tells us if we just chop bytes
3600 * off the end of the item or if we shift the item to chop bytes off
3603 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3606 struct extent_buffer *leaf;
3607 struct btrfs_item *item;
3609 unsigned int data_end;
3610 unsigned int old_data_start;
3611 unsigned int old_size;
3612 unsigned int size_diff;
3614 struct btrfs_map_token token;
3616 leaf = path->nodes[0];
3617 slot = path->slots[0];
3619 old_size = btrfs_item_size_nr(leaf, slot);
3620 if (old_size == new_size)
3623 nritems = btrfs_header_nritems(leaf);
3624 data_end = leaf_data_end(leaf);
3626 old_data_start = btrfs_item_offset_nr(leaf, slot);
3628 size_diff = old_size - new_size;
3631 BUG_ON(slot >= nritems);
3634 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3636 /* first correct the data pointers */
3637 btrfs_init_map_token(&token, leaf);
3638 for (i = slot; i < nritems; i++) {
3640 item = btrfs_item_nr(i);
3642 ioff = btrfs_token_item_offset(&token, item);
3643 btrfs_set_token_item_offset(&token, item, ioff + size_diff);
3646 /* shift the data */
3648 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3649 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3650 data_end, old_data_start + new_size - data_end);
3652 struct btrfs_disk_key disk_key;
3655 btrfs_item_key(leaf, &disk_key, slot);
3657 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3659 struct btrfs_file_extent_item *fi;
3661 fi = btrfs_item_ptr(leaf, slot,
3662 struct btrfs_file_extent_item);
3663 fi = (struct btrfs_file_extent_item *)(
3664 (unsigned long)fi - size_diff);
3666 if (btrfs_file_extent_type(leaf, fi) ==
3667 BTRFS_FILE_EXTENT_INLINE) {
3668 ptr = btrfs_item_ptr_offset(leaf, slot);
3669 memmove_extent_buffer(leaf, ptr,
3671 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3675 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3676 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3677 data_end, old_data_start - data_end);
3679 offset = btrfs_disk_key_offset(&disk_key);
3680 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3681 btrfs_set_item_key(leaf, &disk_key, slot);
3683 fixup_low_keys(path, &disk_key, 1);
3686 item = btrfs_item_nr(slot);
3687 btrfs_set_item_size(leaf, item, new_size);
3688 btrfs_mark_buffer_dirty(leaf);
3690 if (btrfs_leaf_free_space(leaf) < 0) {
3691 btrfs_print_leaf(leaf);
3697 * make the item pointed to by the path bigger, data_size is the added size.
3699 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3702 struct extent_buffer *leaf;
3703 struct btrfs_item *item;
3705 unsigned int data_end;
3706 unsigned int old_data;
3707 unsigned int old_size;
3709 struct btrfs_map_token token;
3711 leaf = path->nodes[0];
3713 nritems = btrfs_header_nritems(leaf);
3714 data_end = leaf_data_end(leaf);
3716 if (btrfs_leaf_free_space(leaf) < data_size) {
3717 btrfs_print_leaf(leaf);
3720 slot = path->slots[0];
3721 old_data = btrfs_item_end_nr(leaf, slot);
3724 if (slot >= nritems) {
3725 btrfs_print_leaf(leaf);
3726 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
3732 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3734 /* first correct the data pointers */
3735 btrfs_init_map_token(&token, leaf);
3736 for (i = slot; i < nritems; i++) {
3738 item = btrfs_item_nr(i);
3740 ioff = btrfs_token_item_offset(&token, item);
3741 btrfs_set_token_item_offset(&token, item, ioff - data_size);
3744 /* shift the data */
3745 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3746 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
3747 data_end, old_data - data_end);
3749 data_end = old_data;
3750 old_size = btrfs_item_size_nr(leaf, slot);
3751 item = btrfs_item_nr(slot);
3752 btrfs_set_item_size(leaf, item, old_size + data_size);
3753 btrfs_mark_buffer_dirty(leaf);
3755 if (btrfs_leaf_free_space(leaf) < 0) {
3756 btrfs_print_leaf(leaf);
3762 * setup_items_for_insert - Helper called before inserting one or more items
3763 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
3764 * in a function that doesn't call btrfs_search_slot
3766 * @root: root we are inserting items to
3767 * @path: points to the leaf/slot where we are going to insert new items
3768 * @cpu_key: array of keys for items to be inserted
3769 * @data_size: size of the body of each item we are going to insert
3770 * @nr: size of @cpu_key/@data_size arrays
3772 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
3773 const struct btrfs_key *cpu_key, u32 *data_size,
3776 struct btrfs_fs_info *fs_info = root->fs_info;
3777 struct btrfs_item *item;
3780 unsigned int data_end;
3781 struct btrfs_disk_key disk_key;
3782 struct extent_buffer *leaf;
3784 struct btrfs_map_token token;
3788 for (i = 0; i < nr; i++)
3789 total_data += data_size[i];
3790 total_size = total_data + (nr * sizeof(struct btrfs_item));
3792 if (path->slots[0] == 0) {
3793 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3794 fixup_low_keys(path, &disk_key, 1);
3796 btrfs_unlock_up_safe(path, 1);
3798 leaf = path->nodes[0];
3799 slot = path->slots[0];
3801 nritems = btrfs_header_nritems(leaf);
3802 data_end = leaf_data_end(leaf);
3804 if (btrfs_leaf_free_space(leaf) < total_size) {
3805 btrfs_print_leaf(leaf);
3806 btrfs_crit(fs_info, "not enough freespace need %u have %d",
3807 total_size, btrfs_leaf_free_space(leaf));
3811 btrfs_init_map_token(&token, leaf);
3812 if (slot != nritems) {
3813 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3815 if (old_data < data_end) {
3816 btrfs_print_leaf(leaf);
3818 "item at slot %d with data offset %u beyond data end of leaf %u",
3819 slot, old_data, data_end);
3823 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3825 /* first correct the data pointers */
3826 for (i = slot; i < nritems; i++) {
3829 item = btrfs_item_nr(i);
3830 ioff = btrfs_token_item_offset(&token, item);
3831 btrfs_set_token_item_offset(&token, item,
3834 /* shift the items */
3835 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3836 btrfs_item_nr_offset(slot),
3837 (nritems - slot) * sizeof(struct btrfs_item));
3839 /* shift the data */
3840 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3841 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
3842 data_end, old_data - data_end);
3843 data_end = old_data;
3846 /* setup the item for the new data */
3847 for (i = 0; i < nr; i++) {
3848 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3849 btrfs_set_item_key(leaf, &disk_key, slot + i);
3850 item = btrfs_item_nr(slot + i);
3851 data_end -= data_size[i];
3852 btrfs_set_token_item_offset(&token, item, data_end);
3853 btrfs_set_token_item_size(&token, item, data_size[i]);
3856 btrfs_set_header_nritems(leaf, nritems + nr);
3857 btrfs_mark_buffer_dirty(leaf);
3859 if (btrfs_leaf_free_space(leaf) < 0) {
3860 btrfs_print_leaf(leaf);
3866 * Given a key and some data, insert items into the tree.
3867 * This does all the path init required, making room in the tree if needed.
3869 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3870 struct btrfs_root *root,
3871 struct btrfs_path *path,
3872 const struct btrfs_key *cpu_key, u32 *data_size,
3881 for (i = 0; i < nr; i++)
3882 total_data += data_size[i];
3884 total_size = total_data + (nr * sizeof(struct btrfs_item));
3885 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3891 slot = path->slots[0];
3894 setup_items_for_insert(root, path, cpu_key, data_size, nr);
3899 * Given a key and some data, insert an item into the tree.
3900 * This does all the path init required, making room in the tree if needed.
3902 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3903 const struct btrfs_key *cpu_key, void *data,
3907 struct btrfs_path *path;
3908 struct extent_buffer *leaf;
3911 path = btrfs_alloc_path();
3914 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3916 leaf = path->nodes[0];
3917 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3918 write_extent_buffer(leaf, data, ptr, data_size);
3919 btrfs_mark_buffer_dirty(leaf);
3921 btrfs_free_path(path);
3926 * delete the pointer from a given node.
3928 * the tree should have been previously balanced so the deletion does not
3931 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
3932 int level, int slot)
3934 struct extent_buffer *parent = path->nodes[level];
3938 nritems = btrfs_header_nritems(parent);
3939 if (slot != nritems - 1) {
3941 ret = btrfs_tree_mod_log_insert_move(parent, slot,
3942 slot + 1, nritems - slot - 1);
3945 memmove_extent_buffer(parent,
3946 btrfs_node_key_ptr_offset(slot),
3947 btrfs_node_key_ptr_offset(slot + 1),
3948 sizeof(struct btrfs_key_ptr) *
3949 (nritems - slot - 1));
3951 ret = btrfs_tree_mod_log_insert_key(parent, slot,
3952 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
3957 btrfs_set_header_nritems(parent, nritems);
3958 if (nritems == 0 && parent == root->node) {
3959 BUG_ON(btrfs_header_level(root->node) != 1);
3960 /* just turn the root into a leaf and break */
3961 btrfs_set_header_level(root->node, 0);
3962 } else if (slot == 0) {
3963 struct btrfs_disk_key disk_key;
3965 btrfs_node_key(parent, &disk_key, 0);
3966 fixup_low_keys(path, &disk_key, level + 1);
3968 btrfs_mark_buffer_dirty(parent);
3972 * a helper function to delete the leaf pointed to by path->slots[1] and
3975 * This deletes the pointer in path->nodes[1] and frees the leaf
3976 * block extent. zero is returned if it all worked out, < 0 otherwise.
3978 * The path must have already been setup for deleting the leaf, including
3979 * all the proper balancing. path->nodes[1] must be locked.
3981 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3982 struct btrfs_root *root,
3983 struct btrfs_path *path,
3984 struct extent_buffer *leaf)
3986 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3987 del_ptr(root, path, 1, path->slots[1]);
3990 * btrfs_free_extent is expensive, we want to make sure we
3991 * aren't holding any locks when we call it
3993 btrfs_unlock_up_safe(path, 0);
3995 root_sub_used(root, leaf->len);
3997 atomic_inc(&leaf->refs);
3998 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3999 free_extent_buffer_stale(leaf);
4002 * delete the item at the leaf level in path. If that empties
4003 * the leaf, remove it from the tree
4005 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4006 struct btrfs_path *path, int slot, int nr)
4008 struct btrfs_fs_info *fs_info = root->fs_info;
4009 struct extent_buffer *leaf;
4010 struct btrfs_item *item;
4018 leaf = path->nodes[0];
4019 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4021 for (i = 0; i < nr; i++)
4022 dsize += btrfs_item_size_nr(leaf, slot + i);
4024 nritems = btrfs_header_nritems(leaf);
4026 if (slot + nr != nritems) {
4027 int data_end = leaf_data_end(leaf);
4028 struct btrfs_map_token token;
4030 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4032 BTRFS_LEAF_DATA_OFFSET + data_end,
4033 last_off - data_end);
4035 btrfs_init_map_token(&token, leaf);
4036 for (i = slot + nr; i < nritems; i++) {
4039 item = btrfs_item_nr(i);
4040 ioff = btrfs_token_item_offset(&token, item);
4041 btrfs_set_token_item_offset(&token, item, ioff + dsize);
4044 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4045 btrfs_item_nr_offset(slot + nr),
4046 sizeof(struct btrfs_item) *
4047 (nritems - slot - nr));
4049 btrfs_set_header_nritems(leaf, nritems - nr);
4052 /* delete the leaf if we've emptied it */
4054 if (leaf == root->node) {
4055 btrfs_set_header_level(leaf, 0);
4057 btrfs_clean_tree_block(leaf);
4058 btrfs_del_leaf(trans, root, path, leaf);
4061 int used = leaf_space_used(leaf, 0, nritems);
4063 struct btrfs_disk_key disk_key;
4065 btrfs_item_key(leaf, &disk_key, 0);
4066 fixup_low_keys(path, &disk_key, 1);
4069 /* delete the leaf if it is mostly empty */
4070 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4071 /* push_leaf_left fixes the path.
4072 * make sure the path still points to our leaf
4073 * for possible call to del_ptr below
4075 slot = path->slots[1];
4076 atomic_inc(&leaf->refs);
4078 wret = push_leaf_left(trans, root, path, 1, 1,
4080 if (wret < 0 && wret != -ENOSPC)
4083 if (path->nodes[0] == leaf &&
4084 btrfs_header_nritems(leaf)) {
4085 wret = push_leaf_right(trans, root, path, 1,
4087 if (wret < 0 && wret != -ENOSPC)
4091 if (btrfs_header_nritems(leaf) == 0) {
4092 path->slots[1] = slot;
4093 btrfs_del_leaf(trans, root, path, leaf);
4094 free_extent_buffer(leaf);
4097 /* if we're still in the path, make sure
4098 * we're dirty. Otherwise, one of the
4099 * push_leaf functions must have already
4100 * dirtied this buffer
4102 if (path->nodes[0] == leaf)
4103 btrfs_mark_buffer_dirty(leaf);
4104 free_extent_buffer(leaf);
4107 btrfs_mark_buffer_dirty(leaf);
4114 * search the tree again to find a leaf with lesser keys
4115 * returns 0 if it found something or 1 if there are no lesser leaves.
4116 * returns < 0 on io errors.
4118 * This may release the path, and so you may lose any locks held at the
4121 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4123 struct btrfs_key key;
4124 struct btrfs_disk_key found_key;
4127 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4129 if (key.offset > 0) {
4131 } else if (key.type > 0) {
4133 key.offset = (u64)-1;
4134 } else if (key.objectid > 0) {
4137 key.offset = (u64)-1;
4142 btrfs_release_path(path);
4143 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4146 btrfs_item_key(path->nodes[0], &found_key, 0);
4147 ret = comp_keys(&found_key, &key);
4149 * We might have had an item with the previous key in the tree right
4150 * before we released our path. And after we released our path, that
4151 * item might have been pushed to the first slot (0) of the leaf we
4152 * were holding due to a tree balance. Alternatively, an item with the
4153 * previous key can exist as the only element of a leaf (big fat item).
4154 * Therefore account for these 2 cases, so that our callers (like
4155 * btrfs_previous_item) don't miss an existing item with a key matching
4156 * the previous key we computed above.
4164 * A helper function to walk down the tree starting at min_key, and looking
4165 * for nodes or leaves that are have a minimum transaction id.
4166 * This is used by the btree defrag code, and tree logging
4168 * This does not cow, but it does stuff the starting key it finds back
4169 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4170 * key and get a writable path.
4172 * This honors path->lowest_level to prevent descent past a given level
4175 * min_trans indicates the oldest transaction that you are interested
4176 * in walking through. Any nodes or leaves older than min_trans are
4177 * skipped over (without reading them).
4179 * returns zero if something useful was found, < 0 on error and 1 if there
4180 * was nothing in the tree that matched the search criteria.
4182 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4183 struct btrfs_path *path,
4186 struct extent_buffer *cur;
4187 struct btrfs_key found_key;
4193 int keep_locks = path->keep_locks;
4195 path->keep_locks = 1;
4197 cur = btrfs_read_lock_root_node(root);
4198 level = btrfs_header_level(cur);
4199 WARN_ON(path->nodes[level]);
4200 path->nodes[level] = cur;
4201 path->locks[level] = BTRFS_READ_LOCK;
4203 if (btrfs_header_generation(cur) < min_trans) {
4208 nritems = btrfs_header_nritems(cur);
4209 level = btrfs_header_level(cur);
4210 sret = btrfs_bin_search(cur, min_key, &slot);
4216 /* at the lowest level, we're done, setup the path and exit */
4217 if (level == path->lowest_level) {
4218 if (slot >= nritems)
4221 path->slots[level] = slot;
4222 btrfs_item_key_to_cpu(cur, &found_key, slot);
4225 if (sret && slot > 0)
4228 * check this node pointer against the min_trans parameters.
4229 * If it is too old, skip to the next one.
4231 while (slot < nritems) {
4234 gen = btrfs_node_ptr_generation(cur, slot);
4235 if (gen < min_trans) {
4243 * we didn't find a candidate key in this node, walk forward
4244 * and find another one
4246 if (slot >= nritems) {
4247 path->slots[level] = slot;
4248 sret = btrfs_find_next_key(root, path, min_key, level,
4251 btrfs_release_path(path);
4257 /* save our key for returning back */
4258 btrfs_node_key_to_cpu(cur, &found_key, slot);
4259 path->slots[level] = slot;
4260 if (level == path->lowest_level) {
4264 cur = btrfs_read_node_slot(cur, slot);
4270 btrfs_tree_read_lock(cur);
4272 path->locks[level - 1] = BTRFS_READ_LOCK;
4273 path->nodes[level - 1] = cur;
4274 unlock_up(path, level, 1, 0, NULL);
4277 path->keep_locks = keep_locks;
4279 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4280 memcpy(min_key, &found_key, sizeof(found_key));
4286 * this is similar to btrfs_next_leaf, but does not try to preserve
4287 * and fixup the path. It looks for and returns the next key in the
4288 * tree based on the current path and the min_trans parameters.
4290 * 0 is returned if another key is found, < 0 if there are any errors
4291 * and 1 is returned if there are no higher keys in the tree
4293 * path->keep_locks should be set to 1 on the search made before
4294 * calling this function.
4296 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4297 struct btrfs_key *key, int level, u64 min_trans)
4300 struct extent_buffer *c;
4302 WARN_ON(!path->keep_locks && !path->skip_locking);
4303 while (level < BTRFS_MAX_LEVEL) {
4304 if (!path->nodes[level])
4307 slot = path->slots[level] + 1;
4308 c = path->nodes[level];
4310 if (slot >= btrfs_header_nritems(c)) {
4313 struct btrfs_key cur_key;
4314 if (level + 1 >= BTRFS_MAX_LEVEL ||
4315 !path->nodes[level + 1])
4318 if (path->locks[level + 1] || path->skip_locking) {
4323 slot = btrfs_header_nritems(c) - 1;
4325 btrfs_item_key_to_cpu(c, &cur_key, slot);
4327 btrfs_node_key_to_cpu(c, &cur_key, slot);
4329 orig_lowest = path->lowest_level;
4330 btrfs_release_path(path);
4331 path->lowest_level = level;
4332 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4334 path->lowest_level = orig_lowest;
4338 c = path->nodes[level];
4339 slot = path->slots[level];
4346 btrfs_item_key_to_cpu(c, key, slot);
4348 u64 gen = btrfs_node_ptr_generation(c, slot);
4350 if (gen < min_trans) {
4354 btrfs_node_key_to_cpu(c, key, slot);
4362 * search the tree again to find a leaf with greater keys
4363 * returns 0 if it found something or 1 if there are no greater leaves.
4364 * returns < 0 on io errors.
4366 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4368 return btrfs_next_old_leaf(root, path, 0);
4371 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4376 struct extent_buffer *c;
4377 struct extent_buffer *next;
4378 struct btrfs_key key;
4383 nritems = btrfs_header_nritems(path->nodes[0]);
4387 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4391 btrfs_release_path(path);
4393 path->keep_locks = 1;
4396 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4398 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4399 path->keep_locks = 0;
4404 nritems = btrfs_header_nritems(path->nodes[0]);
4406 * by releasing the path above we dropped all our locks. A balance
4407 * could have added more items next to the key that used to be
4408 * at the very end of the block. So, check again here and
4409 * advance the path if there are now more items available.
4411 if (nritems > 0 && path->slots[0] < nritems - 1) {
4418 * So the above check misses one case:
4419 * - after releasing the path above, someone has removed the item that
4420 * used to be at the very end of the block, and balance between leafs
4421 * gets another one with bigger key.offset to replace it.
4423 * This one should be returned as well, or we can get leaf corruption
4424 * later(esp. in __btrfs_drop_extents()).
4426 * And a bit more explanation about this check,
4427 * with ret > 0, the key isn't found, the path points to the slot
4428 * where it should be inserted, so the path->slots[0] item must be the
4431 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4436 while (level < BTRFS_MAX_LEVEL) {
4437 if (!path->nodes[level]) {
4442 slot = path->slots[level] + 1;
4443 c = path->nodes[level];
4444 if (slot >= btrfs_header_nritems(c)) {
4446 if (level == BTRFS_MAX_LEVEL) {
4455 * Our current level is where we're going to start from, and to
4456 * make sure lockdep doesn't complain we need to drop our locks
4457 * and nodes from 0 to our current level.
4459 for (i = 0; i < level; i++) {
4460 if (path->locks[level]) {
4461 btrfs_tree_read_unlock(path->nodes[i]);
4464 free_extent_buffer(path->nodes[i]);
4465 path->nodes[i] = NULL;
4469 ret = read_block_for_search(root, path, &next, level,
4475 btrfs_release_path(path);
4479 if (!path->skip_locking) {
4480 ret = btrfs_try_tree_read_lock(next);
4481 if (!ret && time_seq) {
4483 * If we don't get the lock, we may be racing
4484 * with push_leaf_left, holding that lock while
4485 * itself waiting for the leaf we've currently
4486 * locked. To solve this situation, we give up
4487 * on our lock and cycle.
4489 free_extent_buffer(next);
4490 btrfs_release_path(path);
4495 btrfs_tree_read_lock(next);
4499 path->slots[level] = slot;
4502 path->nodes[level] = next;
4503 path->slots[level] = 0;
4504 if (!path->skip_locking)
4505 path->locks[level] = BTRFS_READ_LOCK;
4509 ret = read_block_for_search(root, path, &next, level,
4515 btrfs_release_path(path);
4519 if (!path->skip_locking)
4520 btrfs_tree_read_lock(next);
4524 unlock_up(path, 0, 1, 0, NULL);
4530 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4531 * searching until it gets past min_objectid or finds an item of 'type'
4533 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4535 int btrfs_previous_item(struct btrfs_root *root,
4536 struct btrfs_path *path, u64 min_objectid,
4539 struct btrfs_key found_key;
4540 struct extent_buffer *leaf;
4545 if (path->slots[0] == 0) {
4546 ret = btrfs_prev_leaf(root, path);
4552 leaf = path->nodes[0];
4553 nritems = btrfs_header_nritems(leaf);
4556 if (path->slots[0] == nritems)
4559 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4560 if (found_key.objectid < min_objectid)
4562 if (found_key.type == type)
4564 if (found_key.objectid == min_objectid &&
4565 found_key.type < type)
4572 * search in extent tree to find a previous Metadata/Data extent item with
4575 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4577 int btrfs_previous_extent_item(struct btrfs_root *root,
4578 struct btrfs_path *path, u64 min_objectid)
4580 struct btrfs_key found_key;
4581 struct extent_buffer *leaf;
4586 if (path->slots[0] == 0) {
4587 ret = btrfs_prev_leaf(root, path);
4593 leaf = path->nodes[0];
4594 nritems = btrfs_header_nritems(leaf);
4597 if (path->slots[0] == nritems)
4600 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4601 if (found_key.objectid < min_objectid)
4603 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
4604 found_key.type == BTRFS_METADATA_ITEM_KEY)
4606 if (found_key.objectid == min_objectid &&
4607 found_key.type < BTRFS_EXTENT_ITEM_KEY)