1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
10 #include "delayed-inode.h"
12 #include "transaction.h"
16 #include "inode-item.h"
18 #define BTRFS_DELAYED_WRITEBACK 512
19 #define BTRFS_DELAYED_BACKGROUND 128
20 #define BTRFS_DELAYED_BATCH 16
22 static struct kmem_cache *delayed_node_cache;
24 int __init btrfs_delayed_inode_init(void)
26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 sizeof(struct btrfs_delayed_node),
31 if (!delayed_node_cache)
36 void __cold btrfs_delayed_inode_exit(void)
38 kmem_cache_destroy(delayed_node_cache);
41 static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
47 refcount_set(&delayed_node->refs, 0);
48 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
50 mutex_init(&delayed_node->mutex);
51 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
55 static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
74 node = READ_ONCE(btrfs_inode->delayed_node);
76 refcount_inc(&node->refs);
80 spin_lock(&root->inode_lock);
81 node = xa_load(&root->delayed_nodes, ino);
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
92 * It's possible that we're racing into the middle of removing
93 * this node from the xarray. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the xarray at all; our release
96 * function is in the process of removing it.
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
103 * If this node is properly in the xarray, we want to bump the
104 * refcount twice, once for the inode and once for this get
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
114 spin_unlock(&root->inode_lock);
117 spin_unlock(&root->inode_lock);
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
132 node = btrfs_get_delayed_node(btrfs_inode);
136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
138 return ERR_PTR(-ENOMEM);
139 btrfs_init_delayed_node(node, root, ino);
141 /* Cached in the inode and can be accessed */
142 refcount_set(&node->refs, 2);
144 spin_lock(&root->inode_lock);
145 ret = xa_insert(&root->delayed_nodes, ino, node, GFP_NOFS);
147 spin_unlock(&root->inode_lock);
148 kmem_cache_free(delayed_node_cache, node);
153 btrfs_inode->delayed_node = node;
154 spin_unlock(&root->inode_lock);
160 * Call it when holding delayed_node->mutex
162 * If mod = 1, add this node into the prepared list.
164 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
165 struct btrfs_delayed_node *node,
168 spin_lock(&root->lock);
169 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
170 if (!list_empty(&node->p_list))
171 list_move_tail(&node->p_list, &root->prepare_list);
173 list_add_tail(&node->p_list, &root->prepare_list);
175 list_add_tail(&node->n_list, &root->node_list);
176 list_add_tail(&node->p_list, &root->prepare_list);
177 refcount_inc(&node->refs); /* inserted into list */
179 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
181 spin_unlock(&root->lock);
184 /* Call it when holding delayed_node->mutex */
185 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
186 struct btrfs_delayed_node *node)
188 spin_lock(&root->lock);
189 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
191 refcount_dec(&node->refs); /* not in the list */
192 list_del_init(&node->n_list);
193 if (!list_empty(&node->p_list))
194 list_del_init(&node->p_list);
195 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
197 spin_unlock(&root->lock);
200 static struct btrfs_delayed_node *btrfs_first_delayed_node(
201 struct btrfs_delayed_root *delayed_root)
204 struct btrfs_delayed_node *node = NULL;
206 spin_lock(&delayed_root->lock);
207 if (list_empty(&delayed_root->node_list))
210 p = delayed_root->node_list.next;
211 node = list_entry(p, struct btrfs_delayed_node, n_list);
212 refcount_inc(&node->refs);
214 spin_unlock(&delayed_root->lock);
219 static struct btrfs_delayed_node *btrfs_next_delayed_node(
220 struct btrfs_delayed_node *node)
222 struct btrfs_delayed_root *delayed_root;
224 struct btrfs_delayed_node *next = NULL;
226 delayed_root = node->root->fs_info->delayed_root;
227 spin_lock(&delayed_root->lock);
228 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
229 /* not in the list */
230 if (list_empty(&delayed_root->node_list))
232 p = delayed_root->node_list.next;
233 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
236 p = node->n_list.next;
238 next = list_entry(p, struct btrfs_delayed_node, n_list);
239 refcount_inc(&next->refs);
241 spin_unlock(&delayed_root->lock);
246 static void __btrfs_release_delayed_node(
247 struct btrfs_delayed_node *delayed_node,
250 struct btrfs_delayed_root *delayed_root;
255 delayed_root = delayed_node->root->fs_info->delayed_root;
257 mutex_lock(&delayed_node->mutex);
258 if (delayed_node->count)
259 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
261 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
262 mutex_unlock(&delayed_node->mutex);
264 if (refcount_dec_and_test(&delayed_node->refs)) {
265 struct btrfs_root *root = delayed_node->root;
267 spin_lock(&root->inode_lock);
269 * Once our refcount goes to zero, nobody is allowed to bump it
270 * back up. We can delete it now.
272 ASSERT(refcount_read(&delayed_node->refs) == 0);
273 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
274 spin_unlock(&root->inode_lock);
275 kmem_cache_free(delayed_node_cache, delayed_node);
279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
281 __btrfs_release_delayed_node(node, 0);
284 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
285 struct btrfs_delayed_root *delayed_root)
288 struct btrfs_delayed_node *node = NULL;
290 spin_lock(&delayed_root->lock);
291 if (list_empty(&delayed_root->prepare_list))
294 p = delayed_root->prepare_list.next;
296 node = list_entry(p, struct btrfs_delayed_node, p_list);
297 refcount_inc(&node->refs);
299 spin_unlock(&delayed_root->lock);
304 static inline void btrfs_release_prepared_delayed_node(
305 struct btrfs_delayed_node *node)
307 __btrfs_release_delayed_node(node, 1);
310 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
312 struct btrfs_delayed_item *item;
313 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
315 item->data_len = data_len;
316 item->ins_or_del = 0;
317 item->bytes_reserved = 0;
318 item->delayed_node = NULL;
319 refcount_set(&item->refs, 1);
325 * __btrfs_lookup_delayed_item - look up the delayed item by key
326 * @delayed_node: pointer to the delayed node
327 * @key: the key to look up
328 * @prev: used to store the prev item if the right item isn't found
329 * @next: used to store the next item if the right item isn't found
331 * Note: if we don't find the right item, we will return the prev item and
334 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
335 struct rb_root *root,
336 struct btrfs_key *key,
337 struct btrfs_delayed_item **prev,
338 struct btrfs_delayed_item **next)
340 struct rb_node *node, *prev_node = NULL;
341 struct btrfs_delayed_item *delayed_item = NULL;
344 node = root->rb_node;
347 delayed_item = rb_entry(node, struct btrfs_delayed_item,
350 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
352 node = node->rb_right;
354 node = node->rb_left;
363 *prev = delayed_item;
364 else if ((node = rb_prev(prev_node)) != NULL) {
365 *prev = rb_entry(node, struct btrfs_delayed_item,
375 *next = delayed_item;
376 else if ((node = rb_next(prev_node)) != NULL) {
377 *next = rb_entry(node, struct btrfs_delayed_item,
385 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
386 struct btrfs_delayed_node *delayed_node,
387 struct btrfs_key *key)
389 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
393 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
394 struct btrfs_delayed_item *ins,
397 struct rb_node **p, *node;
398 struct rb_node *parent_node = NULL;
399 struct rb_root_cached *root;
400 struct btrfs_delayed_item *item;
402 bool leftmost = true;
404 if (action == BTRFS_DELAYED_INSERTION_ITEM)
405 root = &delayed_node->ins_root;
406 else if (action == BTRFS_DELAYED_DELETION_ITEM)
407 root = &delayed_node->del_root;
410 p = &root->rb_root.rb_node;
411 node = &ins->rb_node;
415 item = rb_entry(parent_node, struct btrfs_delayed_item,
418 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
422 } else if (cmp > 0) {
429 rb_link_node(node, parent_node, p);
430 rb_insert_color_cached(node, root, leftmost);
431 ins->delayed_node = delayed_node;
432 ins->ins_or_del = action;
434 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
435 action == BTRFS_DELAYED_INSERTION_ITEM &&
436 ins->key.offset >= delayed_node->index_cnt)
437 delayed_node->index_cnt = ins->key.offset + 1;
439 delayed_node->count++;
440 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
444 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
445 struct btrfs_delayed_item *item)
447 return __btrfs_add_delayed_item(node, item,
448 BTRFS_DELAYED_INSERTION_ITEM);
451 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_DELETION_ITEM);
458 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
460 int seq = atomic_inc_return(&delayed_root->items_seq);
462 /* atomic_dec_return implies a barrier */
463 if ((atomic_dec_return(&delayed_root->items) <
464 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
465 cond_wake_up_nomb(&delayed_root->wait);
468 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
470 struct rb_root_cached *root;
471 struct btrfs_delayed_root *delayed_root;
473 /* Not associated with any delayed_node */
474 if (!delayed_item->delayed_node)
476 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
478 BUG_ON(!delayed_root);
479 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
480 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
482 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
483 root = &delayed_item->delayed_node->ins_root;
485 root = &delayed_item->delayed_node->del_root;
487 rb_erase_cached(&delayed_item->rb_node, root);
488 delayed_item->delayed_node->count--;
490 finish_one_item(delayed_root);
493 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
496 __btrfs_remove_delayed_item(item);
497 if (refcount_dec_and_test(&item->refs))
502 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
503 struct btrfs_delayed_node *delayed_node)
506 struct btrfs_delayed_item *item = NULL;
508 p = rb_first_cached(&delayed_node->ins_root);
510 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
515 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
516 struct btrfs_delayed_node *delayed_node)
519 struct btrfs_delayed_item *item = NULL;
521 p = rb_first_cached(&delayed_node->del_root);
523 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
528 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
529 struct btrfs_delayed_item *item)
532 struct btrfs_delayed_item *next = NULL;
534 p = rb_next(&item->rb_node);
536 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
541 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
542 struct btrfs_root *root,
543 struct btrfs_delayed_item *item)
545 struct btrfs_block_rsv *src_rsv;
546 struct btrfs_block_rsv *dst_rsv;
547 struct btrfs_fs_info *fs_info = root->fs_info;
551 if (!trans->bytes_reserved)
554 src_rsv = trans->block_rsv;
555 dst_rsv = &fs_info->delayed_block_rsv;
557 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
560 * Here we migrate space rsv from transaction rsv, since have already
561 * reserved space when starting a transaction. So no need to reserve
564 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
566 trace_btrfs_space_reservation(fs_info, "delayed_item",
569 item->bytes_reserved = num_bytes;
575 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
576 struct btrfs_delayed_item *item)
578 struct btrfs_block_rsv *rsv;
579 struct btrfs_fs_info *fs_info = root->fs_info;
581 if (!item->bytes_reserved)
584 rsv = &fs_info->delayed_block_rsv;
586 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
587 * to release/reserve qgroup space.
589 trace_btrfs_space_reservation(fs_info, "delayed_item",
590 item->key.objectid, item->bytes_reserved,
592 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
595 static int btrfs_delayed_inode_reserve_metadata(
596 struct btrfs_trans_handle *trans,
597 struct btrfs_root *root,
598 struct btrfs_delayed_node *node)
600 struct btrfs_fs_info *fs_info = root->fs_info;
601 struct btrfs_block_rsv *src_rsv;
602 struct btrfs_block_rsv *dst_rsv;
606 src_rsv = trans->block_rsv;
607 dst_rsv = &fs_info->delayed_block_rsv;
609 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
612 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
613 * which doesn't reserve space for speed. This is a problem since we
614 * still need to reserve space for this update, so try to reserve the
617 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
618 * we always reserve enough to update the inode item.
620 if (!src_rsv || (!trans->bytes_reserved &&
621 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
622 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
623 BTRFS_QGROUP_RSV_META_PREALLOC, true);
626 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
628 /* NO_FLUSH could only fail with -ENOSPC */
629 ASSERT(ret == 0 || ret == -ENOSPC);
631 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
633 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
637 trace_btrfs_space_reservation(fs_info, "delayed_inode",
638 node->inode_id, num_bytes, 1);
639 node->bytes_reserved = num_bytes;
645 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
646 struct btrfs_delayed_node *node,
649 struct btrfs_block_rsv *rsv;
651 if (!node->bytes_reserved)
654 rsv = &fs_info->delayed_block_rsv;
655 trace_btrfs_space_reservation(fs_info, "delayed_inode",
656 node->inode_id, node->bytes_reserved, 0);
657 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
659 btrfs_qgroup_free_meta_prealloc(node->root,
660 node->bytes_reserved);
662 btrfs_qgroup_convert_reserved_meta(node->root,
663 node->bytes_reserved);
664 node->bytes_reserved = 0;
668 * Insert a single delayed item or a batch of delayed items that have consecutive
669 * keys if they exist.
671 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
672 struct btrfs_root *root,
673 struct btrfs_path *path,
674 struct btrfs_delayed_item *first_item)
676 LIST_HEAD(item_list);
677 struct btrfs_delayed_item *curr;
678 struct btrfs_delayed_item *next;
679 const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
680 struct btrfs_item_batch batch;
682 char *ins_data = NULL;
685 list_add_tail(&first_item->tree_list, &item_list);
686 batch.total_data_size = first_item->data_len;
688 total_size = first_item->data_len + sizeof(struct btrfs_item);
694 next = __btrfs_next_delayed_item(curr);
695 if (!next || !btrfs_is_continuous_delayed_item(curr, next))
698 next_size = next->data_len + sizeof(struct btrfs_item);
699 if (total_size + next_size > max_size)
702 list_add_tail(&next->tree_list, &item_list);
704 total_size += next_size;
705 batch.total_data_size += next->data_len;
710 batch.keys = &first_item->key;
711 batch.data_sizes = &first_item->data_len;
713 struct btrfs_key *ins_keys;
717 ins_data = kmalloc(batch.nr * sizeof(u32) +
718 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
723 ins_sizes = (u32 *)ins_data;
724 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
725 batch.keys = ins_keys;
726 batch.data_sizes = ins_sizes;
727 list_for_each_entry(curr, &item_list, tree_list) {
728 ins_keys[i] = curr->key;
729 ins_sizes[i] = curr->data_len;
734 ret = btrfs_insert_empty_items(trans, root, path, &batch);
738 list_for_each_entry(curr, &item_list, tree_list) {
741 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
742 write_extent_buffer(path->nodes[0], &curr->data,
743 (unsigned long)data_ptr, curr->data_len);
748 * Now release our path before releasing the delayed items and their
749 * metadata reservations, so that we don't block other tasks for more
752 btrfs_release_path(path);
754 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
755 list_del(&curr->tree_list);
756 btrfs_delayed_item_release_metadata(root, curr);
757 btrfs_release_delayed_item(curr);
764 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
765 struct btrfs_path *path,
766 struct btrfs_root *root,
767 struct btrfs_delayed_node *node)
772 struct btrfs_delayed_item *curr;
774 mutex_lock(&node->mutex);
775 curr = __btrfs_first_delayed_insertion_item(node);
777 mutex_unlock(&node->mutex);
780 ret = btrfs_insert_delayed_item(trans, root, path, curr);
781 mutex_unlock(&node->mutex);
787 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
788 struct btrfs_root *root,
789 struct btrfs_path *path,
790 struct btrfs_delayed_item *item)
792 struct btrfs_delayed_item *curr, *next;
793 struct extent_buffer *leaf;
794 struct btrfs_key key;
795 struct list_head head;
796 int nitems, i, last_item;
799 BUG_ON(!path->nodes[0]);
801 leaf = path->nodes[0];
804 last_item = btrfs_header_nritems(leaf) - 1;
806 return -ENOENT; /* FIXME: Is errno suitable? */
809 INIT_LIST_HEAD(&head);
810 btrfs_item_key_to_cpu(leaf, &key, i);
813 * count the number of the dir index items that we can delete in batch
815 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
816 list_add_tail(&next->tree_list, &head);
820 next = __btrfs_next_delayed_item(curr);
824 if (!btrfs_is_continuous_delayed_item(curr, next))
830 btrfs_item_key_to_cpu(leaf, &key, i);
836 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
840 list_for_each_entry_safe(curr, next, &head, tree_list) {
841 btrfs_delayed_item_release_metadata(root, curr);
842 list_del(&curr->tree_list);
843 btrfs_release_delayed_item(curr);
850 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
851 struct btrfs_path *path,
852 struct btrfs_root *root,
853 struct btrfs_delayed_node *node)
855 struct btrfs_delayed_item *curr, *prev;
859 mutex_lock(&node->mutex);
860 curr = __btrfs_first_delayed_deletion_item(node);
864 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
869 * can't find the item which the node points to, so this node
870 * is invalid, just drop it.
873 curr = __btrfs_next_delayed_item(prev);
874 btrfs_release_delayed_item(prev);
876 btrfs_release_path(path);
878 mutex_unlock(&node->mutex);
884 btrfs_batch_delete_items(trans, root, path, curr);
885 btrfs_release_path(path);
886 mutex_unlock(&node->mutex);
890 btrfs_release_path(path);
891 mutex_unlock(&node->mutex);
895 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
897 struct btrfs_delayed_root *delayed_root;
900 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
901 BUG_ON(!delayed_node->root);
902 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
903 delayed_node->count--;
905 delayed_root = delayed_node->root->fs_info->delayed_root;
906 finish_one_item(delayed_root);
910 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
913 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
914 struct btrfs_delayed_root *delayed_root;
916 ASSERT(delayed_node->root);
917 delayed_node->count--;
919 delayed_root = delayed_node->root->fs_info->delayed_root;
920 finish_one_item(delayed_root);
924 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
925 struct btrfs_root *root,
926 struct btrfs_path *path,
927 struct btrfs_delayed_node *node)
929 struct btrfs_fs_info *fs_info = root->fs_info;
930 struct btrfs_key key;
931 struct btrfs_inode_item *inode_item;
932 struct extent_buffer *leaf;
936 key.objectid = node->inode_id;
937 key.type = BTRFS_INODE_ITEM_KEY;
940 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
945 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
951 leaf = path->nodes[0];
952 inode_item = btrfs_item_ptr(leaf, path->slots[0],
953 struct btrfs_inode_item);
954 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
955 sizeof(struct btrfs_inode_item));
956 btrfs_mark_buffer_dirty(leaf);
958 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
962 if (path->slots[0] >= btrfs_header_nritems(leaf))
965 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
966 if (key.objectid != node->inode_id)
969 if (key.type != BTRFS_INODE_REF_KEY &&
970 key.type != BTRFS_INODE_EXTREF_KEY)
974 * Delayed iref deletion is for the inode who has only one link,
975 * so there is only one iref. The case that several irefs are
976 * in the same item doesn't exist.
978 btrfs_del_item(trans, root, path);
980 btrfs_release_delayed_iref(node);
981 btrfs_release_path(path);
983 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
984 btrfs_release_delayed_inode(node);
987 * If we fail to update the delayed inode we need to abort the
988 * transaction, because we could leave the inode with the improper
991 if (ret && ret != -ENOENT)
992 btrfs_abort_transaction(trans, ret);
997 btrfs_release_path(path);
999 key.type = BTRFS_INODE_EXTREF_KEY;
1002 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1008 leaf = path->nodes[0];
1013 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1014 struct btrfs_root *root,
1015 struct btrfs_path *path,
1016 struct btrfs_delayed_node *node)
1020 mutex_lock(&node->mutex);
1021 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1022 mutex_unlock(&node->mutex);
1026 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1027 mutex_unlock(&node->mutex);
1032 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1033 struct btrfs_path *path,
1034 struct btrfs_delayed_node *node)
1038 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1042 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1046 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1051 * Called when committing the transaction.
1052 * Returns 0 on success.
1053 * Returns < 0 on error and returns with an aborted transaction with any
1054 * outstanding delayed items cleaned up.
1056 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1058 struct btrfs_fs_info *fs_info = trans->fs_info;
1059 struct btrfs_delayed_root *delayed_root;
1060 struct btrfs_delayed_node *curr_node, *prev_node;
1061 struct btrfs_path *path;
1062 struct btrfs_block_rsv *block_rsv;
1064 bool count = (nr > 0);
1066 if (TRANS_ABORTED(trans))
1069 path = btrfs_alloc_path();
1073 block_rsv = trans->block_rsv;
1074 trans->block_rsv = &fs_info->delayed_block_rsv;
1076 delayed_root = fs_info->delayed_root;
1078 curr_node = btrfs_first_delayed_node(delayed_root);
1079 while (curr_node && (!count || nr--)) {
1080 ret = __btrfs_commit_inode_delayed_items(trans, path,
1083 btrfs_release_delayed_node(curr_node);
1085 btrfs_abort_transaction(trans, ret);
1089 prev_node = curr_node;
1090 curr_node = btrfs_next_delayed_node(curr_node);
1091 btrfs_release_delayed_node(prev_node);
1095 btrfs_release_delayed_node(curr_node);
1096 btrfs_free_path(path);
1097 trans->block_rsv = block_rsv;
1102 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1104 return __btrfs_run_delayed_items(trans, -1);
1107 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1109 return __btrfs_run_delayed_items(trans, nr);
1112 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1113 struct btrfs_inode *inode)
1115 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1116 struct btrfs_path *path;
1117 struct btrfs_block_rsv *block_rsv;
1123 mutex_lock(&delayed_node->mutex);
1124 if (!delayed_node->count) {
1125 mutex_unlock(&delayed_node->mutex);
1126 btrfs_release_delayed_node(delayed_node);
1129 mutex_unlock(&delayed_node->mutex);
1131 path = btrfs_alloc_path();
1133 btrfs_release_delayed_node(delayed_node);
1137 block_rsv = trans->block_rsv;
1138 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1140 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1142 btrfs_release_delayed_node(delayed_node);
1143 btrfs_free_path(path);
1144 trans->block_rsv = block_rsv;
1149 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1151 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1152 struct btrfs_trans_handle *trans;
1153 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1154 struct btrfs_path *path;
1155 struct btrfs_block_rsv *block_rsv;
1161 mutex_lock(&delayed_node->mutex);
1162 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1163 mutex_unlock(&delayed_node->mutex);
1164 btrfs_release_delayed_node(delayed_node);
1167 mutex_unlock(&delayed_node->mutex);
1169 trans = btrfs_join_transaction(delayed_node->root);
1170 if (IS_ERR(trans)) {
1171 ret = PTR_ERR(trans);
1175 path = btrfs_alloc_path();
1181 block_rsv = trans->block_rsv;
1182 trans->block_rsv = &fs_info->delayed_block_rsv;
1184 mutex_lock(&delayed_node->mutex);
1185 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1186 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1187 path, delayed_node);
1190 mutex_unlock(&delayed_node->mutex);
1192 btrfs_free_path(path);
1193 trans->block_rsv = block_rsv;
1195 btrfs_end_transaction(trans);
1196 btrfs_btree_balance_dirty(fs_info);
1198 btrfs_release_delayed_node(delayed_node);
1203 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1205 struct btrfs_delayed_node *delayed_node;
1207 delayed_node = READ_ONCE(inode->delayed_node);
1211 inode->delayed_node = NULL;
1212 btrfs_release_delayed_node(delayed_node);
1215 struct btrfs_async_delayed_work {
1216 struct btrfs_delayed_root *delayed_root;
1218 struct btrfs_work work;
1221 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1223 struct btrfs_async_delayed_work *async_work;
1224 struct btrfs_delayed_root *delayed_root;
1225 struct btrfs_trans_handle *trans;
1226 struct btrfs_path *path;
1227 struct btrfs_delayed_node *delayed_node = NULL;
1228 struct btrfs_root *root;
1229 struct btrfs_block_rsv *block_rsv;
1232 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1233 delayed_root = async_work->delayed_root;
1235 path = btrfs_alloc_path();
1240 if (atomic_read(&delayed_root->items) <
1241 BTRFS_DELAYED_BACKGROUND / 2)
1244 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1248 root = delayed_node->root;
1250 trans = btrfs_join_transaction(root);
1251 if (IS_ERR(trans)) {
1252 btrfs_release_path(path);
1253 btrfs_release_prepared_delayed_node(delayed_node);
1258 block_rsv = trans->block_rsv;
1259 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1261 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1263 trans->block_rsv = block_rsv;
1264 btrfs_end_transaction(trans);
1265 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1267 btrfs_release_path(path);
1268 btrfs_release_prepared_delayed_node(delayed_node);
1271 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1272 || total_done < async_work->nr);
1274 btrfs_free_path(path);
1276 wake_up(&delayed_root->wait);
1281 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1282 struct btrfs_fs_info *fs_info, int nr)
1284 struct btrfs_async_delayed_work *async_work;
1286 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1290 async_work->delayed_root = delayed_root;
1291 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1293 async_work->nr = nr;
1295 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1299 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1301 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1304 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1306 int val = atomic_read(&delayed_root->items_seq);
1308 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1311 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1317 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1319 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1321 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1322 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1325 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1329 seq = atomic_read(&delayed_root->items_seq);
1331 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1335 wait_event_interruptible(delayed_root->wait,
1336 could_end_wait(delayed_root, seq));
1340 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1343 /* Will return 0 or -ENOMEM */
1344 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1345 const char *name, int name_len,
1346 struct btrfs_inode *dir,
1347 struct btrfs_disk_key *disk_key, u8 type,
1350 struct btrfs_delayed_node *delayed_node;
1351 struct btrfs_delayed_item *delayed_item;
1352 struct btrfs_dir_item *dir_item;
1355 delayed_node = btrfs_get_or_create_delayed_node(dir);
1356 if (IS_ERR(delayed_node))
1357 return PTR_ERR(delayed_node);
1359 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1360 if (!delayed_item) {
1365 delayed_item->key.objectid = btrfs_ino(dir);
1366 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1367 delayed_item->key.offset = index;
1369 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1370 dir_item->location = *disk_key;
1371 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1372 btrfs_set_stack_dir_data_len(dir_item, 0);
1373 btrfs_set_stack_dir_name_len(dir_item, name_len);
1374 btrfs_set_stack_dir_type(dir_item, type);
1375 memcpy((char *)(dir_item + 1), name, name_len);
1377 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1379 * we have reserved enough space when we start a new transaction,
1380 * so reserving metadata failure is impossible
1384 mutex_lock(&delayed_node->mutex);
1385 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1386 if (unlikely(ret)) {
1387 btrfs_err(trans->fs_info,
1388 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1389 name_len, name, delayed_node->root->root_key.objectid,
1390 delayed_node->inode_id, ret);
1393 mutex_unlock(&delayed_node->mutex);
1396 btrfs_release_delayed_node(delayed_node);
1400 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1401 struct btrfs_delayed_node *node,
1402 struct btrfs_key *key)
1404 struct btrfs_delayed_item *item;
1406 mutex_lock(&node->mutex);
1407 item = __btrfs_lookup_delayed_insertion_item(node, key);
1409 mutex_unlock(&node->mutex);
1413 btrfs_delayed_item_release_metadata(node->root, item);
1414 btrfs_release_delayed_item(item);
1415 mutex_unlock(&node->mutex);
1419 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1420 struct btrfs_inode *dir, u64 index)
1422 struct btrfs_delayed_node *node;
1423 struct btrfs_delayed_item *item;
1424 struct btrfs_key item_key;
1427 node = btrfs_get_or_create_delayed_node(dir);
1429 return PTR_ERR(node);
1431 item_key.objectid = btrfs_ino(dir);
1432 item_key.type = BTRFS_DIR_INDEX_KEY;
1433 item_key.offset = index;
1435 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1440 item = btrfs_alloc_delayed_item(0);
1446 item->key = item_key;
1448 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1450 * we have reserved enough space when we start a new transaction,
1451 * so reserving metadata failure is impossible.
1454 btrfs_err(trans->fs_info,
1455 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1456 btrfs_release_delayed_item(item);
1460 mutex_lock(&node->mutex);
1461 ret = __btrfs_add_delayed_deletion_item(node, item);
1462 if (unlikely(ret)) {
1463 btrfs_err(trans->fs_info,
1464 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1465 index, node->root->root_key.objectid,
1466 node->inode_id, ret);
1467 btrfs_delayed_item_release_metadata(dir->root, item);
1468 btrfs_release_delayed_item(item);
1470 mutex_unlock(&node->mutex);
1472 btrfs_release_delayed_node(node);
1476 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1478 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1484 * Since we have held i_mutex of this directory, it is impossible that
1485 * a new directory index is added into the delayed node and index_cnt
1486 * is updated now. So we needn't lock the delayed node.
1488 if (!delayed_node->index_cnt) {
1489 btrfs_release_delayed_node(delayed_node);
1493 inode->index_cnt = delayed_node->index_cnt;
1494 btrfs_release_delayed_node(delayed_node);
1498 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1499 struct list_head *ins_list,
1500 struct list_head *del_list)
1502 struct btrfs_delayed_node *delayed_node;
1503 struct btrfs_delayed_item *item;
1505 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1510 * We can only do one readdir with delayed items at a time because of
1511 * item->readdir_list.
1513 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1514 btrfs_inode_lock(inode, 0);
1516 mutex_lock(&delayed_node->mutex);
1517 item = __btrfs_first_delayed_insertion_item(delayed_node);
1519 refcount_inc(&item->refs);
1520 list_add_tail(&item->readdir_list, ins_list);
1521 item = __btrfs_next_delayed_item(item);
1524 item = __btrfs_first_delayed_deletion_item(delayed_node);
1526 refcount_inc(&item->refs);
1527 list_add_tail(&item->readdir_list, del_list);
1528 item = __btrfs_next_delayed_item(item);
1530 mutex_unlock(&delayed_node->mutex);
1532 * This delayed node is still cached in the btrfs inode, so refs
1533 * must be > 1 now, and we needn't check it is going to be freed
1536 * Besides that, this function is used to read dir, we do not
1537 * insert/delete delayed items in this period. So we also needn't
1538 * requeue or dequeue this delayed node.
1540 refcount_dec(&delayed_node->refs);
1545 void btrfs_readdir_put_delayed_items(struct inode *inode,
1546 struct list_head *ins_list,
1547 struct list_head *del_list)
1549 struct btrfs_delayed_item *curr, *next;
1551 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1552 list_del(&curr->readdir_list);
1553 if (refcount_dec_and_test(&curr->refs))
1557 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1558 list_del(&curr->readdir_list);
1559 if (refcount_dec_and_test(&curr->refs))
1564 * The VFS is going to do up_read(), so we need to downgrade back to a
1567 downgrade_write(&inode->i_rwsem);
1570 int btrfs_should_delete_dir_index(struct list_head *del_list,
1573 struct btrfs_delayed_item *curr;
1576 list_for_each_entry(curr, del_list, readdir_list) {
1577 if (curr->key.offset > index)
1579 if (curr->key.offset == index) {
1588 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1591 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1592 struct list_head *ins_list)
1594 struct btrfs_dir_item *di;
1595 struct btrfs_delayed_item *curr, *next;
1596 struct btrfs_key location;
1600 unsigned char d_type;
1602 if (list_empty(ins_list))
1606 * Changing the data of the delayed item is impossible. So
1607 * we needn't lock them. And we have held i_mutex of the
1608 * directory, nobody can delete any directory indexes now.
1610 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1611 list_del(&curr->readdir_list);
1613 if (curr->key.offset < ctx->pos) {
1614 if (refcount_dec_and_test(&curr->refs))
1619 ctx->pos = curr->key.offset;
1621 di = (struct btrfs_dir_item *)curr->data;
1622 name = (char *)(di + 1);
1623 name_len = btrfs_stack_dir_name_len(di);
1625 d_type = fs_ftype_to_dtype(di->type);
1626 btrfs_disk_key_to_cpu(&location, &di->location);
1628 over = !dir_emit(ctx, name, name_len,
1629 location.objectid, d_type);
1631 if (refcount_dec_and_test(&curr->refs))
1641 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1642 struct btrfs_inode_item *inode_item,
1643 struct inode *inode)
1647 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1648 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1649 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1650 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1651 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1652 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1653 btrfs_set_stack_inode_generation(inode_item,
1654 BTRFS_I(inode)->generation);
1655 btrfs_set_stack_inode_sequence(inode_item,
1656 inode_peek_iversion(inode));
1657 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1658 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1659 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1660 BTRFS_I(inode)->ro_flags);
1661 btrfs_set_stack_inode_flags(inode_item, flags);
1662 btrfs_set_stack_inode_block_group(inode_item, 0);
1664 btrfs_set_stack_timespec_sec(&inode_item->atime,
1665 inode->i_atime.tv_sec);
1666 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1667 inode->i_atime.tv_nsec);
1669 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1670 inode->i_mtime.tv_sec);
1671 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1672 inode->i_mtime.tv_nsec);
1674 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1675 inode->i_ctime.tv_sec);
1676 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1677 inode->i_ctime.tv_nsec);
1679 btrfs_set_stack_timespec_sec(&inode_item->otime,
1680 BTRFS_I(inode)->i_otime.tv_sec);
1681 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1682 BTRFS_I(inode)->i_otime.tv_nsec);
1685 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1687 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1688 struct btrfs_delayed_node *delayed_node;
1689 struct btrfs_inode_item *inode_item;
1691 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1695 mutex_lock(&delayed_node->mutex);
1696 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1697 mutex_unlock(&delayed_node->mutex);
1698 btrfs_release_delayed_node(delayed_node);
1702 inode_item = &delayed_node->inode_item;
1704 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1705 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1706 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1707 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1708 round_up(i_size_read(inode), fs_info->sectorsize));
1709 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1710 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1711 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1712 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1713 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1715 inode_set_iversion_queried(inode,
1716 btrfs_stack_inode_sequence(inode_item));
1718 *rdev = btrfs_stack_inode_rdev(inode_item);
1719 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1720 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1722 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1723 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1725 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1726 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1728 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1729 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1731 BTRFS_I(inode)->i_otime.tv_sec =
1732 btrfs_stack_timespec_sec(&inode_item->otime);
1733 BTRFS_I(inode)->i_otime.tv_nsec =
1734 btrfs_stack_timespec_nsec(&inode_item->otime);
1736 inode->i_generation = BTRFS_I(inode)->generation;
1737 BTRFS_I(inode)->index_cnt = (u64)-1;
1739 mutex_unlock(&delayed_node->mutex);
1740 btrfs_release_delayed_node(delayed_node);
1744 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1745 struct btrfs_root *root,
1746 struct btrfs_inode *inode)
1748 struct btrfs_delayed_node *delayed_node;
1751 delayed_node = btrfs_get_or_create_delayed_node(inode);
1752 if (IS_ERR(delayed_node))
1753 return PTR_ERR(delayed_node);
1755 mutex_lock(&delayed_node->mutex);
1756 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1757 fill_stack_inode_item(trans, &delayed_node->inode_item,
1762 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1766 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1767 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1768 delayed_node->count++;
1769 atomic_inc(&root->fs_info->delayed_root->items);
1771 mutex_unlock(&delayed_node->mutex);
1772 btrfs_release_delayed_node(delayed_node);
1776 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1778 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1779 struct btrfs_delayed_node *delayed_node;
1782 * we don't do delayed inode updates during log recovery because it
1783 * leads to enospc problems. This means we also can't do
1784 * delayed inode refs
1786 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1789 delayed_node = btrfs_get_or_create_delayed_node(inode);
1790 if (IS_ERR(delayed_node))
1791 return PTR_ERR(delayed_node);
1794 * We don't reserve space for inode ref deletion is because:
1795 * - We ONLY do async inode ref deletion for the inode who has only
1796 * one link(i_nlink == 1), it means there is only one inode ref.
1797 * And in most case, the inode ref and the inode item are in the
1798 * same leaf, and we will deal with them at the same time.
1799 * Since we are sure we will reserve the space for the inode item,
1800 * it is unnecessary to reserve space for inode ref deletion.
1801 * - If the inode ref and the inode item are not in the same leaf,
1802 * We also needn't worry about enospc problem, because we reserve
1803 * much more space for the inode update than it needs.
1804 * - At the worst, we can steal some space from the global reservation.
1807 mutex_lock(&delayed_node->mutex);
1808 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1811 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1812 delayed_node->count++;
1813 atomic_inc(&fs_info->delayed_root->items);
1815 mutex_unlock(&delayed_node->mutex);
1816 btrfs_release_delayed_node(delayed_node);
1820 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1822 struct btrfs_root *root = delayed_node->root;
1823 struct btrfs_fs_info *fs_info = root->fs_info;
1824 struct btrfs_delayed_item *curr_item, *prev_item;
1826 mutex_lock(&delayed_node->mutex);
1827 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1829 btrfs_delayed_item_release_metadata(root, curr_item);
1830 prev_item = curr_item;
1831 curr_item = __btrfs_next_delayed_item(prev_item);
1832 btrfs_release_delayed_item(prev_item);
1835 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1837 btrfs_delayed_item_release_metadata(root, curr_item);
1838 prev_item = curr_item;
1839 curr_item = __btrfs_next_delayed_item(prev_item);
1840 btrfs_release_delayed_item(prev_item);
1843 btrfs_release_delayed_iref(delayed_node);
1845 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1846 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1847 btrfs_release_delayed_inode(delayed_node);
1849 mutex_unlock(&delayed_node->mutex);
1852 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1854 struct btrfs_delayed_node *delayed_node;
1856 delayed_node = btrfs_get_delayed_node(inode);
1860 __btrfs_kill_delayed_node(delayed_node);
1861 btrfs_release_delayed_node(delayed_node);
1864 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1866 unsigned long index = 0;
1867 struct btrfs_delayed_node *delayed_node;
1868 struct btrfs_delayed_node *delayed_nodes[8];
1873 spin_lock(&root->inode_lock);
1874 if (xa_empty(&root->delayed_nodes)) {
1875 spin_unlock(&root->inode_lock);
1879 xa_for_each_start(&root->delayed_nodes, index, delayed_node, index) {
1881 * Don't increase refs in case the node is dead and
1882 * about to be removed from the tree in the loop below
1884 if (refcount_inc_not_zero(&delayed_node->refs)) {
1885 delayed_nodes[n] = delayed_node;
1888 if (n >= ARRAY_SIZE(delayed_nodes))
1892 spin_unlock(&root->inode_lock);
1894 for (int i = 0; i < n; i++) {
1895 __btrfs_kill_delayed_node(delayed_nodes[i]);
1896 btrfs_release_delayed_node(delayed_nodes[i]);
1901 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1903 struct btrfs_delayed_node *curr_node, *prev_node;
1905 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1907 __btrfs_kill_delayed_node(curr_node);
1909 prev_node = curr_node;
1910 curr_node = btrfs_next_delayed_node(curr_node);
1911 btrfs_release_delayed_node(prev_node);