1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
11 #include "delayed-inode.h"
13 #include "transaction.h"
18 #define BTRFS_DELAYED_WRITEBACK 512
19 #define BTRFS_DELAYED_BACKGROUND 128
20 #define BTRFS_DELAYED_BATCH 16
22 static struct kmem_cache *delayed_node_cache;
24 int __init btrfs_delayed_inode_init(void)
26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 sizeof(struct btrfs_delayed_node),
31 if (!delayed_node_cache)
36 void __cold btrfs_delayed_inode_exit(void)
38 kmem_cache_destroy(delayed_node_cache);
41 static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
47 refcount_set(&delayed_node->refs, 0);
48 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
50 mutex_init(&delayed_node->mutex);
51 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
55 static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
74 node = READ_ONCE(btrfs_inode->delayed_node);
76 refcount_inc(&node->refs);
80 spin_lock(&root->inode_lock);
81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
92 * It's possible that we're racing into the middle of removing
93 * this node from the radix tree. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the radix at all; our release
96 * function is in the process of removing it.
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
103 * If this node is properly in the radix, we want to bump the
104 * refcount twice, once for the inode and once for this get
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
114 spin_unlock(&root->inode_lock);
117 spin_unlock(&root->inode_lock);
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
132 node = btrfs_get_delayed_node(btrfs_inode);
136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
138 return ERR_PTR(-ENOMEM);
139 btrfs_init_delayed_node(node, root, ino);
141 /* cached in the btrfs inode and can be accessed */
142 refcount_set(&node->refs, 2);
144 ret = radix_tree_preload(GFP_NOFS);
146 kmem_cache_free(delayed_node_cache, node);
150 spin_lock(&root->inode_lock);
151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 if (ret == -EEXIST) {
153 spin_unlock(&root->inode_lock);
154 kmem_cache_free(delayed_node_cache, node);
155 radix_tree_preload_end();
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
166 * Call it when holding delayed_node->mutex
168 * If mod = 1, add this node into the prepared list.
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
174 spin_lock(&root->lock);
175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
179 list_add_tail(&node->p_list, &root->prepare_list);
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
183 refcount_inc(&node->refs); /* inserted into list */
185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
187 spin_unlock(&root->lock);
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
194 spin_lock(&root->lock);
195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
197 refcount_dec(&node->refs); /* not in the list */
198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
203 spin_unlock(&root->lock);
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 struct btrfs_delayed_root *delayed_root)
210 struct btrfs_delayed_node *node = NULL;
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
218 refcount_inc(&node->refs);
220 spin_unlock(&delayed_root->lock);
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 struct btrfs_delayed_node *node)
228 struct btrfs_delayed_root *delayed_root;
230 struct btrfs_delayed_node *next = NULL;
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 /* not in the list */
236 if (list_empty(&delayed_root->node_list))
238 p = delayed_root->node_list.next;
239 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
242 p = node->n_list.next;
244 next = list_entry(p, struct btrfs_delayed_node, n_list);
245 refcount_inc(&next->refs);
247 spin_unlock(&delayed_root->lock);
252 static void __btrfs_release_delayed_node(
253 struct btrfs_delayed_node *delayed_node,
256 struct btrfs_delayed_root *delayed_root;
261 delayed_root = delayed_node->root->fs_info->delayed_root;
263 mutex_lock(&delayed_node->mutex);
264 if (delayed_node->count)
265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
267 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 mutex_unlock(&delayed_node->mutex);
270 if (refcount_dec_and_test(&delayed_node->refs)) {
271 struct btrfs_root *root = delayed_node->root;
273 spin_lock(&root->inode_lock);
275 * Once our refcount goes to zero, nobody is allowed to bump it
276 * back up. We can delete it now.
278 ASSERT(refcount_read(&delayed_node->refs) == 0);
279 radix_tree_delete(&root->delayed_nodes_tree,
280 delayed_node->inode_id);
281 spin_unlock(&root->inode_lock);
282 kmem_cache_free(delayed_node_cache, delayed_node);
286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
288 __btrfs_release_delayed_node(node, 0);
291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
292 struct btrfs_delayed_root *delayed_root)
295 struct btrfs_delayed_node *node = NULL;
297 spin_lock(&delayed_root->lock);
298 if (list_empty(&delayed_root->prepare_list))
301 p = delayed_root->prepare_list.next;
303 node = list_entry(p, struct btrfs_delayed_node, p_list);
304 refcount_inc(&node->refs);
306 spin_unlock(&delayed_root->lock);
311 static inline void btrfs_release_prepared_delayed_node(
312 struct btrfs_delayed_node *node)
314 __btrfs_release_delayed_node(node, 1);
317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
319 struct btrfs_delayed_item *item;
320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
322 item->data_len = data_len;
323 item->ins_or_del = 0;
324 item->bytes_reserved = 0;
325 item->delayed_node = NULL;
326 refcount_set(&item->refs, 1);
332 * __btrfs_lookup_delayed_item - look up the delayed item by key
333 * @delayed_node: pointer to the delayed node
334 * @key: the key to look up
335 * @prev: used to store the prev item if the right item isn't found
336 * @next: used to store the next item if the right item isn't found
338 * Note: if we don't find the right item, we will return the prev item and
341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 struct rb_root *root,
343 struct btrfs_key *key,
344 struct btrfs_delayed_item **prev,
345 struct btrfs_delayed_item **next)
347 struct rb_node *node, *prev_node = NULL;
348 struct btrfs_delayed_item *delayed_item = NULL;
351 node = root->rb_node;
354 delayed_item = rb_entry(node, struct btrfs_delayed_item,
357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
359 node = node->rb_right;
361 node = node->rb_left;
370 *prev = delayed_item;
371 else if ((node = rb_prev(prev_node)) != NULL) {
372 *prev = rb_entry(node, struct btrfs_delayed_item,
382 *next = delayed_item;
383 else if ((node = rb_next(prev_node)) != NULL) {
384 *next = rb_entry(node, struct btrfs_delayed_item,
392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
393 struct btrfs_delayed_node *delayed_node,
394 struct btrfs_key *key)
396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 struct btrfs_delayed_item *ins,
404 struct rb_node **p, *node;
405 struct rb_node *parent_node = NULL;
406 struct rb_root_cached *root;
407 struct btrfs_delayed_item *item;
409 bool leftmost = true;
411 if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 root = &delayed_node->ins_root;
413 else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 root = &delayed_node->del_root;
417 p = &root->rb_root.rb_node;
418 node = &ins->rb_node;
422 item = rb_entry(parent_node, struct btrfs_delayed_item,
425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
429 } else if (cmp > 0) {
436 rb_link_node(node, parent_node, p);
437 rb_insert_color_cached(node, root, leftmost);
438 ins->delayed_node = delayed_node;
439 ins->ins_or_del = action;
441 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 action == BTRFS_DELAYED_INSERTION_ITEM &&
443 ins->key.offset >= delayed_node->index_cnt)
444 delayed_node->index_cnt = ins->key.offset + 1;
446 delayed_node->count++;
447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_INSERTION_ITEM);
458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_DELETION_ITEM);
465 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
467 int seq = atomic_inc_return(&delayed_root->items_seq);
469 /* atomic_dec_return implies a barrier */
470 if ((atomic_dec_return(&delayed_root->items) <
471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 cond_wake_up_nomb(&delayed_root->wait);
475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
477 struct rb_root_cached *root;
478 struct btrfs_delayed_root *delayed_root;
480 /* Not associated with any delayed_node */
481 if (!delayed_item->delayed_node)
483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
485 BUG_ON(!delayed_root);
486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 root = &delayed_item->delayed_node->ins_root;
492 root = &delayed_item->delayed_node->del_root;
494 rb_erase_cached(&delayed_item->rb_node, root);
495 delayed_item->delayed_node->count--;
497 finish_one_item(delayed_root);
500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
503 __btrfs_remove_delayed_item(item);
504 if (refcount_dec_and_test(&item->refs))
509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
510 struct btrfs_delayed_node *delayed_node)
513 struct btrfs_delayed_item *item = NULL;
515 p = rb_first_cached(&delayed_node->ins_root);
517 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
523 struct btrfs_delayed_node *delayed_node)
526 struct btrfs_delayed_item *item = NULL;
528 p = rb_first_cached(&delayed_node->del_root);
530 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
535 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
536 struct btrfs_delayed_item *item)
539 struct btrfs_delayed_item *next = NULL;
541 p = rb_next(&item->rb_node);
543 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
549 struct btrfs_root *root,
550 struct btrfs_delayed_item *item)
552 struct btrfs_block_rsv *src_rsv;
553 struct btrfs_block_rsv *dst_rsv;
554 struct btrfs_fs_info *fs_info = root->fs_info;
558 if (!trans->bytes_reserved)
561 src_rsv = trans->block_rsv;
562 dst_rsv = &fs_info->delayed_block_rsv;
564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
567 * Here we migrate space rsv from transaction rsv, since have already
568 * reserved space when starting a transaction. So no need to reserve
571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
573 trace_btrfs_space_reservation(fs_info, "delayed_item",
576 item->bytes_reserved = num_bytes;
582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
583 struct btrfs_delayed_item *item)
585 struct btrfs_block_rsv *rsv;
586 struct btrfs_fs_info *fs_info = root->fs_info;
588 if (!item->bytes_reserved)
591 rsv = &fs_info->delayed_block_rsv;
593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 * to release/reserve qgroup space.
596 trace_btrfs_space_reservation(fs_info, "delayed_item",
597 item->key.objectid, item->bytes_reserved,
599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
602 static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 struct btrfs_inode *inode,
606 struct btrfs_delayed_node *node)
608 struct btrfs_fs_info *fs_info = root->fs_info;
609 struct btrfs_block_rsv *src_rsv;
610 struct btrfs_block_rsv *dst_rsv;
614 src_rsv = trans->block_rsv;
615 dst_rsv = &fs_info->delayed_block_rsv;
617 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
620 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 * which doesn't reserve space for speed. This is a problem since we
622 * still need to reserve space for this update, so try to reserve the
625 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626 * we always reserve enough to update the inode item.
628 if (!src_rsv || (!trans->bytes_reserved &&
629 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630 ret = btrfs_qgroup_reserve_meta_prealloc(root,
631 fs_info->nodesize, true);
634 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
635 BTRFS_RESERVE_NO_FLUSH);
637 * Since we're under a transaction reserve_metadata_bytes could
638 * try to commit the transaction which will make it return
639 * EAGAIN to make us stop the transaction we have, so return
640 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
642 if (ret == -EAGAIN) {
644 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
647 node->bytes_reserved = num_bytes;
648 trace_btrfs_space_reservation(fs_info,
653 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
658 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
660 trace_btrfs_space_reservation(fs_info, "delayed_inode",
661 btrfs_ino(inode), num_bytes, 1);
662 node->bytes_reserved = num_bytes;
668 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
669 struct btrfs_delayed_node *node,
672 struct btrfs_block_rsv *rsv;
674 if (!node->bytes_reserved)
677 rsv = &fs_info->delayed_block_rsv;
678 trace_btrfs_space_reservation(fs_info, "delayed_inode",
679 node->inode_id, node->bytes_reserved, 0);
680 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
682 btrfs_qgroup_free_meta_prealloc(node->root,
683 node->bytes_reserved);
685 btrfs_qgroup_convert_reserved_meta(node->root,
686 node->bytes_reserved);
687 node->bytes_reserved = 0;
691 * This helper will insert some continuous items into the same leaf according
692 * to the free space of the leaf.
694 static int btrfs_batch_insert_items(struct btrfs_root *root,
695 struct btrfs_path *path,
696 struct btrfs_delayed_item *item)
698 struct btrfs_delayed_item *curr, *next;
700 int total_data_size = 0, total_size = 0;
701 struct extent_buffer *leaf;
703 struct btrfs_key *keys;
705 struct list_head head;
711 BUG_ON(!path->nodes[0]);
713 leaf = path->nodes[0];
714 free_space = btrfs_leaf_free_space(leaf);
715 INIT_LIST_HEAD(&head);
721 * count the number of the continuous items that we can insert in batch
723 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
725 total_data_size += next->data_len;
726 total_size += next->data_len + sizeof(struct btrfs_item);
727 list_add_tail(&next->tree_list, &head);
731 next = __btrfs_next_delayed_item(curr);
735 if (!btrfs_is_continuous_delayed_item(curr, next))
745 * we need allocate some memory space, but it might cause the task
746 * to sleep, so we set all locked nodes in the path to blocking locks
749 btrfs_set_path_blocking(path);
751 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
757 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
763 /* get keys of all the delayed items */
765 list_for_each_entry(next, &head, tree_list) {
767 data_size[i] = next->data_len;
771 /* insert the keys of the items */
772 setup_items_for_insert(root, path, keys, data_size,
773 total_data_size, total_size, nitems);
775 /* insert the dir index items */
776 slot = path->slots[0];
777 list_for_each_entry_safe(curr, next, &head, tree_list) {
778 data_ptr = btrfs_item_ptr(leaf, slot, char);
779 write_extent_buffer(leaf, &curr->data,
780 (unsigned long)data_ptr,
784 btrfs_delayed_item_release_metadata(root, curr);
786 list_del(&curr->tree_list);
787 btrfs_release_delayed_item(curr);
798 * This helper can just do simple insertion that needn't extend item for new
799 * data, such as directory name index insertion, inode insertion.
801 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
802 struct btrfs_root *root,
803 struct btrfs_path *path,
804 struct btrfs_delayed_item *delayed_item)
806 struct extent_buffer *leaf;
807 unsigned int nofs_flag;
811 nofs_flag = memalloc_nofs_save();
812 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
813 delayed_item->data_len);
814 memalloc_nofs_restore(nofs_flag);
815 if (ret < 0 && ret != -EEXIST)
818 leaf = path->nodes[0];
820 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
822 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
823 delayed_item->data_len);
824 btrfs_mark_buffer_dirty(leaf);
826 btrfs_delayed_item_release_metadata(root, delayed_item);
831 * we insert an item first, then if there are some continuous items, we try
832 * to insert those items into the same leaf.
834 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
835 struct btrfs_path *path,
836 struct btrfs_root *root,
837 struct btrfs_delayed_node *node)
839 struct btrfs_delayed_item *curr, *prev;
843 mutex_lock(&node->mutex);
844 curr = __btrfs_first_delayed_insertion_item(node);
848 ret = btrfs_insert_delayed_item(trans, root, path, curr);
850 btrfs_release_path(path);
855 curr = __btrfs_next_delayed_item(prev);
856 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
857 /* insert the continuous items into the same leaf */
859 btrfs_batch_insert_items(root, path, curr);
861 btrfs_release_delayed_item(prev);
862 btrfs_mark_buffer_dirty(path->nodes[0]);
864 btrfs_release_path(path);
865 mutex_unlock(&node->mutex);
869 mutex_unlock(&node->mutex);
873 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
874 struct btrfs_root *root,
875 struct btrfs_path *path,
876 struct btrfs_delayed_item *item)
878 struct btrfs_delayed_item *curr, *next;
879 struct extent_buffer *leaf;
880 struct btrfs_key key;
881 struct list_head head;
882 int nitems, i, last_item;
885 BUG_ON(!path->nodes[0]);
887 leaf = path->nodes[0];
890 last_item = btrfs_header_nritems(leaf) - 1;
892 return -ENOENT; /* FIXME: Is errno suitable? */
895 INIT_LIST_HEAD(&head);
896 btrfs_item_key_to_cpu(leaf, &key, i);
899 * count the number of the dir index items that we can delete in batch
901 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
902 list_add_tail(&next->tree_list, &head);
906 next = __btrfs_next_delayed_item(curr);
910 if (!btrfs_is_continuous_delayed_item(curr, next))
916 btrfs_item_key_to_cpu(leaf, &key, i);
922 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
926 list_for_each_entry_safe(curr, next, &head, tree_list) {
927 btrfs_delayed_item_release_metadata(root, curr);
928 list_del(&curr->tree_list);
929 btrfs_release_delayed_item(curr);
936 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
937 struct btrfs_path *path,
938 struct btrfs_root *root,
939 struct btrfs_delayed_node *node)
941 struct btrfs_delayed_item *curr, *prev;
942 unsigned int nofs_flag;
946 mutex_lock(&node->mutex);
947 curr = __btrfs_first_delayed_deletion_item(node);
951 nofs_flag = memalloc_nofs_save();
952 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
953 memalloc_nofs_restore(nofs_flag);
958 * can't find the item which the node points to, so this node
959 * is invalid, just drop it.
962 curr = __btrfs_next_delayed_item(prev);
963 btrfs_release_delayed_item(prev);
965 btrfs_release_path(path);
967 mutex_unlock(&node->mutex);
973 btrfs_batch_delete_items(trans, root, path, curr);
974 btrfs_release_path(path);
975 mutex_unlock(&node->mutex);
979 btrfs_release_path(path);
980 mutex_unlock(&node->mutex);
984 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
986 struct btrfs_delayed_root *delayed_root;
989 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
990 BUG_ON(!delayed_node->root);
991 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
992 delayed_node->count--;
994 delayed_root = delayed_node->root->fs_info->delayed_root;
995 finish_one_item(delayed_root);
999 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1001 struct btrfs_delayed_root *delayed_root;
1003 ASSERT(delayed_node->root);
1004 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1005 delayed_node->count--;
1007 delayed_root = delayed_node->root->fs_info->delayed_root;
1008 finish_one_item(delayed_root);
1011 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1012 struct btrfs_root *root,
1013 struct btrfs_path *path,
1014 struct btrfs_delayed_node *node)
1016 struct btrfs_fs_info *fs_info = root->fs_info;
1017 struct btrfs_key key;
1018 struct btrfs_inode_item *inode_item;
1019 struct extent_buffer *leaf;
1020 unsigned int nofs_flag;
1024 key.objectid = node->inode_id;
1025 key.type = BTRFS_INODE_ITEM_KEY;
1028 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1033 nofs_flag = memalloc_nofs_save();
1034 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035 memalloc_nofs_restore(nofs_flag);
1037 btrfs_release_path(path);
1039 } else if (ret < 0) {
1043 leaf = path->nodes[0];
1044 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1045 struct btrfs_inode_item);
1046 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1047 sizeof(struct btrfs_inode_item));
1048 btrfs_mark_buffer_dirty(leaf);
1050 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1054 if (path->slots[0] >= btrfs_header_nritems(leaf))
1057 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1058 if (key.objectid != node->inode_id)
1061 if (key.type != BTRFS_INODE_REF_KEY &&
1062 key.type != BTRFS_INODE_EXTREF_KEY)
1066 * Delayed iref deletion is for the inode who has only one link,
1067 * so there is only one iref. The case that several irefs are
1068 * in the same item doesn't exist.
1070 btrfs_del_item(trans, root, path);
1072 btrfs_release_delayed_iref(node);
1074 btrfs_release_path(path);
1076 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1077 btrfs_release_delayed_inode(node);
1082 btrfs_release_path(path);
1084 key.type = BTRFS_INODE_EXTREF_KEY;
1087 nofs_flag = memalloc_nofs_save();
1088 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1089 memalloc_nofs_restore(nofs_flag);
1095 leaf = path->nodes[0];
1100 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1101 struct btrfs_root *root,
1102 struct btrfs_path *path,
1103 struct btrfs_delayed_node *node)
1107 mutex_lock(&node->mutex);
1108 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1109 mutex_unlock(&node->mutex);
1113 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1114 mutex_unlock(&node->mutex);
1119 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1120 struct btrfs_path *path,
1121 struct btrfs_delayed_node *node)
1125 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1129 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1133 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1138 * Called when committing the transaction.
1139 * Returns 0 on success.
1140 * Returns < 0 on error and returns with an aborted transaction with any
1141 * outstanding delayed items cleaned up.
1143 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1145 struct btrfs_fs_info *fs_info = trans->fs_info;
1146 struct btrfs_delayed_root *delayed_root;
1147 struct btrfs_delayed_node *curr_node, *prev_node;
1148 struct btrfs_path *path;
1149 struct btrfs_block_rsv *block_rsv;
1151 bool count = (nr > 0);
1153 if (TRANS_ABORTED(trans))
1156 path = btrfs_alloc_path();
1159 path->leave_spinning = 1;
1161 block_rsv = trans->block_rsv;
1162 trans->block_rsv = &fs_info->delayed_block_rsv;
1164 delayed_root = fs_info->delayed_root;
1166 curr_node = btrfs_first_delayed_node(delayed_root);
1167 while (curr_node && (!count || (count && nr--))) {
1168 ret = __btrfs_commit_inode_delayed_items(trans, path,
1171 btrfs_release_delayed_node(curr_node);
1173 btrfs_abort_transaction(trans, ret);
1177 prev_node = curr_node;
1178 curr_node = btrfs_next_delayed_node(curr_node);
1179 btrfs_release_delayed_node(prev_node);
1183 btrfs_release_delayed_node(curr_node);
1184 btrfs_free_path(path);
1185 trans->block_rsv = block_rsv;
1190 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1192 return __btrfs_run_delayed_items(trans, -1);
1195 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1197 return __btrfs_run_delayed_items(trans, nr);
1200 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1201 struct btrfs_inode *inode)
1203 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1204 struct btrfs_path *path;
1205 struct btrfs_block_rsv *block_rsv;
1211 mutex_lock(&delayed_node->mutex);
1212 if (!delayed_node->count) {
1213 mutex_unlock(&delayed_node->mutex);
1214 btrfs_release_delayed_node(delayed_node);
1217 mutex_unlock(&delayed_node->mutex);
1219 path = btrfs_alloc_path();
1221 btrfs_release_delayed_node(delayed_node);
1224 path->leave_spinning = 1;
1226 block_rsv = trans->block_rsv;
1227 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1229 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1231 btrfs_release_delayed_node(delayed_node);
1232 btrfs_free_path(path);
1233 trans->block_rsv = block_rsv;
1238 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1240 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1241 struct btrfs_trans_handle *trans;
1242 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1243 struct btrfs_path *path;
1244 struct btrfs_block_rsv *block_rsv;
1250 mutex_lock(&delayed_node->mutex);
1251 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1252 mutex_unlock(&delayed_node->mutex);
1253 btrfs_release_delayed_node(delayed_node);
1256 mutex_unlock(&delayed_node->mutex);
1258 trans = btrfs_join_transaction(delayed_node->root);
1259 if (IS_ERR(trans)) {
1260 ret = PTR_ERR(trans);
1264 path = btrfs_alloc_path();
1269 path->leave_spinning = 1;
1271 block_rsv = trans->block_rsv;
1272 trans->block_rsv = &fs_info->delayed_block_rsv;
1274 mutex_lock(&delayed_node->mutex);
1275 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1276 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1277 path, delayed_node);
1280 mutex_unlock(&delayed_node->mutex);
1282 btrfs_free_path(path);
1283 trans->block_rsv = block_rsv;
1285 btrfs_end_transaction(trans);
1286 btrfs_btree_balance_dirty(fs_info);
1288 btrfs_release_delayed_node(delayed_node);
1293 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1295 struct btrfs_delayed_node *delayed_node;
1297 delayed_node = READ_ONCE(inode->delayed_node);
1301 inode->delayed_node = NULL;
1302 btrfs_release_delayed_node(delayed_node);
1305 struct btrfs_async_delayed_work {
1306 struct btrfs_delayed_root *delayed_root;
1308 struct btrfs_work work;
1311 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1313 struct btrfs_async_delayed_work *async_work;
1314 struct btrfs_delayed_root *delayed_root;
1315 struct btrfs_trans_handle *trans;
1316 struct btrfs_path *path;
1317 struct btrfs_delayed_node *delayed_node = NULL;
1318 struct btrfs_root *root;
1319 struct btrfs_block_rsv *block_rsv;
1322 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1323 delayed_root = async_work->delayed_root;
1325 path = btrfs_alloc_path();
1330 if (atomic_read(&delayed_root->items) <
1331 BTRFS_DELAYED_BACKGROUND / 2)
1334 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1338 path->leave_spinning = 1;
1339 root = delayed_node->root;
1341 trans = btrfs_join_transaction(root);
1342 if (IS_ERR(trans)) {
1343 btrfs_release_path(path);
1344 btrfs_release_prepared_delayed_node(delayed_node);
1349 block_rsv = trans->block_rsv;
1350 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1352 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1354 trans->block_rsv = block_rsv;
1355 btrfs_end_transaction(trans);
1356 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1358 btrfs_release_path(path);
1359 btrfs_release_prepared_delayed_node(delayed_node);
1362 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1363 || total_done < async_work->nr);
1365 btrfs_free_path(path);
1367 wake_up(&delayed_root->wait);
1372 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1373 struct btrfs_fs_info *fs_info, int nr)
1375 struct btrfs_async_delayed_work *async_work;
1377 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1381 async_work->delayed_root = delayed_root;
1382 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1384 async_work->nr = nr;
1386 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1390 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1392 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1395 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1397 int val = atomic_read(&delayed_root->items_seq);
1399 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1402 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1408 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1410 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1412 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1413 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1416 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1420 seq = atomic_read(&delayed_root->items_seq);
1422 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1426 wait_event_interruptible(delayed_root->wait,
1427 could_end_wait(delayed_root, seq));
1431 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1434 /* Will return 0 or -ENOMEM */
1435 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1436 const char *name, int name_len,
1437 struct btrfs_inode *dir,
1438 struct btrfs_disk_key *disk_key, u8 type,
1441 struct btrfs_delayed_node *delayed_node;
1442 struct btrfs_delayed_item *delayed_item;
1443 struct btrfs_dir_item *dir_item;
1446 delayed_node = btrfs_get_or_create_delayed_node(dir);
1447 if (IS_ERR(delayed_node))
1448 return PTR_ERR(delayed_node);
1450 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451 if (!delayed_item) {
1456 delayed_item->key.objectid = btrfs_ino(dir);
1457 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458 delayed_item->key.offset = index;
1460 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461 dir_item->location = *disk_key;
1462 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463 btrfs_set_stack_dir_data_len(dir_item, 0);
1464 btrfs_set_stack_dir_name_len(dir_item, name_len);
1465 btrfs_set_stack_dir_type(dir_item, type);
1466 memcpy((char *)(dir_item + 1), name, name_len);
1468 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1470 * we have reserved enough space when we start a new transaction,
1471 * so reserving metadata failure is impossible
1475 mutex_lock(&delayed_node->mutex);
1476 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1477 if (unlikely(ret)) {
1478 btrfs_err(trans->fs_info,
1479 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1480 name_len, name, delayed_node->root->root_key.objectid,
1481 delayed_node->inode_id, ret);
1484 mutex_unlock(&delayed_node->mutex);
1487 btrfs_release_delayed_node(delayed_node);
1491 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1492 struct btrfs_delayed_node *node,
1493 struct btrfs_key *key)
1495 struct btrfs_delayed_item *item;
1497 mutex_lock(&node->mutex);
1498 item = __btrfs_lookup_delayed_insertion_item(node, key);
1500 mutex_unlock(&node->mutex);
1504 btrfs_delayed_item_release_metadata(node->root, item);
1505 btrfs_release_delayed_item(item);
1506 mutex_unlock(&node->mutex);
1510 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1511 struct btrfs_inode *dir, u64 index)
1513 struct btrfs_delayed_node *node;
1514 struct btrfs_delayed_item *item;
1515 struct btrfs_key item_key;
1518 node = btrfs_get_or_create_delayed_node(dir);
1520 return PTR_ERR(node);
1522 item_key.objectid = btrfs_ino(dir);
1523 item_key.type = BTRFS_DIR_INDEX_KEY;
1524 item_key.offset = index;
1526 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1531 item = btrfs_alloc_delayed_item(0);
1537 item->key = item_key;
1539 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1541 * we have reserved enough space when we start a new transaction,
1542 * so reserving metadata failure is impossible.
1545 btrfs_err(trans->fs_info,
1546 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1547 btrfs_release_delayed_item(item);
1551 mutex_lock(&node->mutex);
1552 ret = __btrfs_add_delayed_deletion_item(node, item);
1553 if (unlikely(ret)) {
1554 btrfs_err(trans->fs_info,
1555 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1556 index, node->root->root_key.objectid,
1557 node->inode_id, ret);
1558 btrfs_delayed_item_release_metadata(dir->root, item);
1559 btrfs_release_delayed_item(item);
1561 mutex_unlock(&node->mutex);
1563 btrfs_release_delayed_node(node);
1567 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1569 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1575 * Since we have held i_mutex of this directory, it is impossible that
1576 * a new directory index is added into the delayed node and index_cnt
1577 * is updated now. So we needn't lock the delayed node.
1579 if (!delayed_node->index_cnt) {
1580 btrfs_release_delayed_node(delayed_node);
1584 inode->index_cnt = delayed_node->index_cnt;
1585 btrfs_release_delayed_node(delayed_node);
1589 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1590 struct list_head *ins_list,
1591 struct list_head *del_list)
1593 struct btrfs_delayed_node *delayed_node;
1594 struct btrfs_delayed_item *item;
1596 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1601 * We can only do one readdir with delayed items at a time because of
1602 * item->readdir_list.
1604 inode_unlock_shared(inode);
1607 mutex_lock(&delayed_node->mutex);
1608 item = __btrfs_first_delayed_insertion_item(delayed_node);
1610 refcount_inc(&item->refs);
1611 list_add_tail(&item->readdir_list, ins_list);
1612 item = __btrfs_next_delayed_item(item);
1615 item = __btrfs_first_delayed_deletion_item(delayed_node);
1617 refcount_inc(&item->refs);
1618 list_add_tail(&item->readdir_list, del_list);
1619 item = __btrfs_next_delayed_item(item);
1621 mutex_unlock(&delayed_node->mutex);
1623 * This delayed node is still cached in the btrfs inode, so refs
1624 * must be > 1 now, and we needn't check it is going to be freed
1627 * Besides that, this function is used to read dir, we do not
1628 * insert/delete delayed items in this period. So we also needn't
1629 * requeue or dequeue this delayed node.
1631 refcount_dec(&delayed_node->refs);
1636 void btrfs_readdir_put_delayed_items(struct inode *inode,
1637 struct list_head *ins_list,
1638 struct list_head *del_list)
1640 struct btrfs_delayed_item *curr, *next;
1642 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1643 list_del(&curr->readdir_list);
1644 if (refcount_dec_and_test(&curr->refs))
1648 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1649 list_del(&curr->readdir_list);
1650 if (refcount_dec_and_test(&curr->refs))
1655 * The VFS is going to do up_read(), so we need to downgrade back to a
1658 downgrade_write(&inode->i_rwsem);
1661 int btrfs_should_delete_dir_index(struct list_head *del_list,
1664 struct btrfs_delayed_item *curr;
1667 list_for_each_entry(curr, del_list, readdir_list) {
1668 if (curr->key.offset > index)
1670 if (curr->key.offset == index) {
1679 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1682 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1683 struct list_head *ins_list)
1685 struct btrfs_dir_item *di;
1686 struct btrfs_delayed_item *curr, *next;
1687 struct btrfs_key location;
1691 unsigned char d_type;
1693 if (list_empty(ins_list))
1697 * Changing the data of the delayed item is impossible. So
1698 * we needn't lock them. And we have held i_mutex of the
1699 * directory, nobody can delete any directory indexes now.
1701 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1702 list_del(&curr->readdir_list);
1704 if (curr->key.offset < ctx->pos) {
1705 if (refcount_dec_and_test(&curr->refs))
1710 ctx->pos = curr->key.offset;
1712 di = (struct btrfs_dir_item *)curr->data;
1713 name = (char *)(di + 1);
1714 name_len = btrfs_stack_dir_name_len(di);
1716 d_type = fs_ftype_to_dtype(di->type);
1717 btrfs_disk_key_to_cpu(&location, &di->location);
1719 over = !dir_emit(ctx, name, name_len,
1720 location.objectid, d_type);
1722 if (refcount_dec_and_test(&curr->refs))
1732 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1733 struct btrfs_inode_item *inode_item,
1734 struct inode *inode)
1736 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1737 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1738 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1739 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1740 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1741 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1742 btrfs_set_stack_inode_generation(inode_item,
1743 BTRFS_I(inode)->generation);
1744 btrfs_set_stack_inode_sequence(inode_item,
1745 inode_peek_iversion(inode));
1746 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1747 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1748 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1749 btrfs_set_stack_inode_block_group(inode_item, 0);
1751 btrfs_set_stack_timespec_sec(&inode_item->atime,
1752 inode->i_atime.tv_sec);
1753 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1754 inode->i_atime.tv_nsec);
1756 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1757 inode->i_mtime.tv_sec);
1758 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1759 inode->i_mtime.tv_nsec);
1761 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1762 inode->i_ctime.tv_sec);
1763 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1764 inode->i_ctime.tv_nsec);
1766 btrfs_set_stack_timespec_sec(&inode_item->otime,
1767 BTRFS_I(inode)->i_otime.tv_sec);
1768 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1769 BTRFS_I(inode)->i_otime.tv_nsec);
1772 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1774 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1775 struct btrfs_delayed_node *delayed_node;
1776 struct btrfs_inode_item *inode_item;
1778 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1782 mutex_lock(&delayed_node->mutex);
1783 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784 mutex_unlock(&delayed_node->mutex);
1785 btrfs_release_delayed_node(delayed_node);
1789 inode_item = &delayed_node->inode_item;
1791 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1795 round_up(i_size_read(inode), fs_info->sectorsize));
1796 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1797 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1798 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1799 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1800 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1802 inode_set_iversion_queried(inode,
1803 btrfs_stack_inode_sequence(inode_item));
1805 *rdev = btrfs_stack_inode_rdev(inode_item);
1806 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1808 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1809 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1811 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1812 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1814 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1815 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1817 BTRFS_I(inode)->i_otime.tv_sec =
1818 btrfs_stack_timespec_sec(&inode_item->otime);
1819 BTRFS_I(inode)->i_otime.tv_nsec =
1820 btrfs_stack_timespec_nsec(&inode_item->otime);
1822 inode->i_generation = BTRFS_I(inode)->generation;
1823 BTRFS_I(inode)->index_cnt = (u64)-1;
1825 mutex_unlock(&delayed_node->mutex);
1826 btrfs_release_delayed_node(delayed_node);
1830 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1831 struct btrfs_root *root, struct inode *inode)
1833 struct btrfs_delayed_node *delayed_node;
1836 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1837 if (IS_ERR(delayed_node))
1838 return PTR_ERR(delayed_node);
1840 mutex_lock(&delayed_node->mutex);
1841 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1842 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1846 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1851 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1852 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1853 delayed_node->count++;
1854 atomic_inc(&root->fs_info->delayed_root->items);
1856 mutex_unlock(&delayed_node->mutex);
1857 btrfs_release_delayed_node(delayed_node);
1861 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1863 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1864 struct btrfs_delayed_node *delayed_node;
1867 * we don't do delayed inode updates during log recovery because it
1868 * leads to enospc problems. This means we also can't do
1869 * delayed inode refs
1871 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1874 delayed_node = btrfs_get_or_create_delayed_node(inode);
1875 if (IS_ERR(delayed_node))
1876 return PTR_ERR(delayed_node);
1879 * We don't reserve space for inode ref deletion is because:
1880 * - We ONLY do async inode ref deletion for the inode who has only
1881 * one link(i_nlink == 1), it means there is only one inode ref.
1882 * And in most case, the inode ref and the inode item are in the
1883 * same leaf, and we will deal with them at the same time.
1884 * Since we are sure we will reserve the space for the inode item,
1885 * it is unnecessary to reserve space for inode ref deletion.
1886 * - If the inode ref and the inode item are not in the same leaf,
1887 * We also needn't worry about enospc problem, because we reserve
1888 * much more space for the inode update than it needs.
1889 * - At the worst, we can steal some space from the global reservation.
1892 mutex_lock(&delayed_node->mutex);
1893 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1896 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1897 delayed_node->count++;
1898 atomic_inc(&fs_info->delayed_root->items);
1900 mutex_unlock(&delayed_node->mutex);
1901 btrfs_release_delayed_node(delayed_node);
1905 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1907 struct btrfs_root *root = delayed_node->root;
1908 struct btrfs_fs_info *fs_info = root->fs_info;
1909 struct btrfs_delayed_item *curr_item, *prev_item;
1911 mutex_lock(&delayed_node->mutex);
1912 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1914 btrfs_delayed_item_release_metadata(root, curr_item);
1915 prev_item = curr_item;
1916 curr_item = __btrfs_next_delayed_item(prev_item);
1917 btrfs_release_delayed_item(prev_item);
1920 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1922 btrfs_delayed_item_release_metadata(root, curr_item);
1923 prev_item = curr_item;
1924 curr_item = __btrfs_next_delayed_item(prev_item);
1925 btrfs_release_delayed_item(prev_item);
1928 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1929 btrfs_release_delayed_iref(delayed_node);
1931 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1933 btrfs_release_delayed_inode(delayed_node);
1935 mutex_unlock(&delayed_node->mutex);
1938 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1940 struct btrfs_delayed_node *delayed_node;
1942 delayed_node = btrfs_get_delayed_node(inode);
1946 __btrfs_kill_delayed_node(delayed_node);
1947 btrfs_release_delayed_node(delayed_node);
1950 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1953 struct btrfs_delayed_node *delayed_nodes[8];
1957 spin_lock(&root->inode_lock);
1958 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1959 (void **)delayed_nodes, inode_id,
1960 ARRAY_SIZE(delayed_nodes));
1962 spin_unlock(&root->inode_lock);
1966 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1967 for (i = 0; i < n; i++) {
1969 * Don't increase refs in case the node is dead and
1970 * about to be removed from the tree in the loop below
1972 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1973 delayed_nodes[i] = NULL;
1975 spin_unlock(&root->inode_lock);
1977 for (i = 0; i < n; i++) {
1978 if (!delayed_nodes[i])
1980 __btrfs_kill_delayed_node(delayed_nodes[i]);
1981 btrfs_release_delayed_node(delayed_nodes[i]);
1986 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1988 struct btrfs_delayed_node *curr_node, *prev_node;
1990 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1992 __btrfs_kill_delayed_node(curr_node);
1994 prev_node = curr_node;
1995 curr_node = btrfs_next_delayed_node(curr_node);
1996 btrfs_release_delayed_node(prev_node);