Merge branch 'uaccess' into fixes
[linux-2.6-microblaze.git] / fs / btrfs / delayed-inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "delayed-inode.h"
12 #include "disk-io.h"
13 #include "transaction.h"
14 #include "ctree.h"
15 #include "qgroup.h"
16 #include "locking.h"
17
18 #define BTRFS_DELAYED_WRITEBACK         512
19 #define BTRFS_DELAYED_BACKGROUND        128
20 #define BTRFS_DELAYED_BATCH             16
21
22 static struct kmem_cache *delayed_node_cache;
23
24 int __init btrfs_delayed_inode_init(void)
25 {
26         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27                                         sizeof(struct btrfs_delayed_node),
28                                         0,
29                                         SLAB_MEM_SPREAD,
30                                         NULL);
31         if (!delayed_node_cache)
32                 return -ENOMEM;
33         return 0;
34 }
35
36 void __cold btrfs_delayed_inode_exit(void)
37 {
38         kmem_cache_destroy(delayed_node_cache);
39 }
40
41 static inline void btrfs_init_delayed_node(
42                                 struct btrfs_delayed_node *delayed_node,
43                                 struct btrfs_root *root, u64 inode_id)
44 {
45         delayed_node->root = root;
46         delayed_node->inode_id = inode_id;
47         refcount_set(&delayed_node->refs, 0);
48         delayed_node->ins_root = RB_ROOT_CACHED;
49         delayed_node->del_root = RB_ROOT_CACHED;
50         mutex_init(&delayed_node->mutex);
51         INIT_LIST_HEAD(&delayed_node->n_list);
52         INIT_LIST_HEAD(&delayed_node->p_list);
53 }
54
55 static inline int btrfs_is_continuous_delayed_item(
56                                         struct btrfs_delayed_item *item1,
57                                         struct btrfs_delayed_item *item2)
58 {
59         if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60             item1->key.objectid == item2->key.objectid &&
61             item1->key.type == item2->key.type &&
62             item1->key.offset + 1 == item2->key.offset)
63                 return 1;
64         return 0;
65 }
66
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68                 struct btrfs_inode *btrfs_inode)
69 {
70         struct btrfs_root *root = btrfs_inode->root;
71         u64 ino = btrfs_ino(btrfs_inode);
72         struct btrfs_delayed_node *node;
73
74         node = READ_ONCE(btrfs_inode->delayed_node);
75         if (node) {
76                 refcount_inc(&node->refs);
77                 return node;
78         }
79
80         spin_lock(&root->inode_lock);
81         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
82
83         if (node) {
84                 if (btrfs_inode->delayed_node) {
85                         refcount_inc(&node->refs);      /* can be accessed */
86                         BUG_ON(btrfs_inode->delayed_node != node);
87                         spin_unlock(&root->inode_lock);
88                         return node;
89                 }
90
91                 /*
92                  * It's possible that we're racing into the middle of removing
93                  * this node from the radix tree.  In this case, the refcount
94                  * was zero and it should never go back to one.  Just return
95                  * NULL like it was never in the radix at all; our release
96                  * function is in the process of removing it.
97                  *
98                  * Some implementations of refcount_inc refuse to bump the
99                  * refcount once it has hit zero.  If we don't do this dance
100                  * here, refcount_inc() may decide to just WARN_ONCE() instead
101                  * of actually bumping the refcount.
102                  *
103                  * If this node is properly in the radix, we want to bump the
104                  * refcount twice, once for the inode and once for this get
105                  * operation.
106                  */
107                 if (refcount_inc_not_zero(&node->refs)) {
108                         refcount_inc(&node->refs);
109                         btrfs_inode->delayed_node = node;
110                 } else {
111                         node = NULL;
112                 }
113
114                 spin_unlock(&root->inode_lock);
115                 return node;
116         }
117         spin_unlock(&root->inode_lock);
118
119         return NULL;
120 }
121
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124                 struct btrfs_inode *btrfs_inode)
125 {
126         struct btrfs_delayed_node *node;
127         struct btrfs_root *root = btrfs_inode->root;
128         u64 ino = btrfs_ino(btrfs_inode);
129         int ret;
130
131 again:
132         node = btrfs_get_delayed_node(btrfs_inode);
133         if (node)
134                 return node;
135
136         node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
137         if (!node)
138                 return ERR_PTR(-ENOMEM);
139         btrfs_init_delayed_node(node, root, ino);
140
141         /* cached in the btrfs inode and can be accessed */
142         refcount_set(&node->refs, 2);
143
144         ret = radix_tree_preload(GFP_NOFS);
145         if (ret) {
146                 kmem_cache_free(delayed_node_cache, node);
147                 return ERR_PTR(ret);
148         }
149
150         spin_lock(&root->inode_lock);
151         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152         if (ret == -EEXIST) {
153                 spin_unlock(&root->inode_lock);
154                 kmem_cache_free(delayed_node_cache, node);
155                 radix_tree_preload_end();
156                 goto again;
157         }
158         btrfs_inode->delayed_node = node;
159         spin_unlock(&root->inode_lock);
160         radix_tree_preload_end();
161
162         return node;
163 }
164
165 /*
166  * Call it when holding delayed_node->mutex
167  *
168  * If mod = 1, add this node into the prepared list.
169  */
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171                                      struct btrfs_delayed_node *node,
172                                      int mod)
173 {
174         spin_lock(&root->lock);
175         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176                 if (!list_empty(&node->p_list))
177                         list_move_tail(&node->p_list, &root->prepare_list);
178                 else if (mod)
179                         list_add_tail(&node->p_list, &root->prepare_list);
180         } else {
181                 list_add_tail(&node->n_list, &root->node_list);
182                 list_add_tail(&node->p_list, &root->prepare_list);
183                 refcount_inc(&node->refs);      /* inserted into list */
184                 root->nodes++;
185                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186         }
187         spin_unlock(&root->lock);
188 }
189
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192                                        struct btrfs_delayed_node *node)
193 {
194         spin_lock(&root->lock);
195         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196                 root->nodes--;
197                 refcount_dec(&node->refs);      /* not in the list */
198                 list_del_init(&node->n_list);
199                 if (!list_empty(&node->p_list))
200                         list_del_init(&node->p_list);
201                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202         }
203         spin_unlock(&root->lock);
204 }
205
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207                         struct btrfs_delayed_root *delayed_root)
208 {
209         struct list_head *p;
210         struct btrfs_delayed_node *node = NULL;
211
212         spin_lock(&delayed_root->lock);
213         if (list_empty(&delayed_root->node_list))
214                 goto out;
215
216         p = delayed_root->node_list.next;
217         node = list_entry(p, struct btrfs_delayed_node, n_list);
218         refcount_inc(&node->refs);
219 out:
220         spin_unlock(&delayed_root->lock);
221
222         return node;
223 }
224
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226                                                 struct btrfs_delayed_node *node)
227 {
228         struct btrfs_delayed_root *delayed_root;
229         struct list_head *p;
230         struct btrfs_delayed_node *next = NULL;
231
232         delayed_root = node->root->fs_info->delayed_root;
233         spin_lock(&delayed_root->lock);
234         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235                 /* not in the list */
236                 if (list_empty(&delayed_root->node_list))
237                         goto out;
238                 p = delayed_root->node_list.next;
239         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240                 goto out;
241         else
242                 p = node->n_list.next;
243
244         next = list_entry(p, struct btrfs_delayed_node, n_list);
245         refcount_inc(&next->refs);
246 out:
247         spin_unlock(&delayed_root->lock);
248
249         return next;
250 }
251
252 static void __btrfs_release_delayed_node(
253                                 struct btrfs_delayed_node *delayed_node,
254                                 int mod)
255 {
256         struct btrfs_delayed_root *delayed_root;
257
258         if (!delayed_node)
259                 return;
260
261         delayed_root = delayed_node->root->fs_info->delayed_root;
262
263         mutex_lock(&delayed_node->mutex);
264         if (delayed_node->count)
265                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266         else
267                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268         mutex_unlock(&delayed_node->mutex);
269
270         if (refcount_dec_and_test(&delayed_node->refs)) {
271                 struct btrfs_root *root = delayed_node->root;
272
273                 spin_lock(&root->inode_lock);
274                 /*
275                  * Once our refcount goes to zero, nobody is allowed to bump it
276                  * back up.  We can delete it now.
277                  */
278                 ASSERT(refcount_read(&delayed_node->refs) == 0);
279                 radix_tree_delete(&root->delayed_nodes_tree,
280                                   delayed_node->inode_id);
281                 spin_unlock(&root->inode_lock);
282                 kmem_cache_free(delayed_node_cache, delayed_node);
283         }
284 }
285
286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287 {
288         __btrfs_release_delayed_node(node, 0);
289 }
290
291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
292                                         struct btrfs_delayed_root *delayed_root)
293 {
294         struct list_head *p;
295         struct btrfs_delayed_node *node = NULL;
296
297         spin_lock(&delayed_root->lock);
298         if (list_empty(&delayed_root->prepare_list))
299                 goto out;
300
301         p = delayed_root->prepare_list.next;
302         list_del_init(p);
303         node = list_entry(p, struct btrfs_delayed_node, p_list);
304         refcount_inc(&node->refs);
305 out:
306         spin_unlock(&delayed_root->lock);
307
308         return node;
309 }
310
311 static inline void btrfs_release_prepared_delayed_node(
312                                         struct btrfs_delayed_node *node)
313 {
314         __btrfs_release_delayed_node(node, 1);
315 }
316
317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
318 {
319         struct btrfs_delayed_item *item;
320         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321         if (item) {
322                 item->data_len = data_len;
323                 item->ins_or_del = 0;
324                 item->bytes_reserved = 0;
325                 item->delayed_node = NULL;
326                 refcount_set(&item->refs, 1);
327         }
328         return item;
329 }
330
331 /*
332  * __btrfs_lookup_delayed_item - look up the delayed item by key
333  * @delayed_node: pointer to the delayed node
334  * @key:          the key to look up
335  * @prev:         used to store the prev item if the right item isn't found
336  * @next:         used to store the next item if the right item isn't found
337  *
338  * Note: if we don't find the right item, we will return the prev item and
339  * the next item.
340  */
341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342                                 struct rb_root *root,
343                                 struct btrfs_key *key,
344                                 struct btrfs_delayed_item **prev,
345                                 struct btrfs_delayed_item **next)
346 {
347         struct rb_node *node, *prev_node = NULL;
348         struct btrfs_delayed_item *delayed_item = NULL;
349         int ret = 0;
350
351         node = root->rb_node;
352
353         while (node) {
354                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355                                         rb_node);
356                 prev_node = node;
357                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358                 if (ret < 0)
359                         node = node->rb_right;
360                 else if (ret > 0)
361                         node = node->rb_left;
362                 else
363                         return delayed_item;
364         }
365
366         if (prev) {
367                 if (!prev_node)
368                         *prev = NULL;
369                 else if (ret < 0)
370                         *prev = delayed_item;
371                 else if ((node = rb_prev(prev_node)) != NULL) {
372                         *prev = rb_entry(node, struct btrfs_delayed_item,
373                                          rb_node);
374                 } else
375                         *prev = NULL;
376         }
377
378         if (next) {
379                 if (!prev_node)
380                         *next = NULL;
381                 else if (ret > 0)
382                         *next = delayed_item;
383                 else if ((node = rb_next(prev_node)) != NULL) {
384                         *next = rb_entry(node, struct btrfs_delayed_item,
385                                          rb_node);
386                 } else
387                         *next = NULL;
388         }
389         return NULL;
390 }
391
392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
393                                         struct btrfs_delayed_node *delayed_node,
394                                         struct btrfs_key *key)
395 {
396         return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
397                                            NULL, NULL);
398 }
399
400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401                                     struct btrfs_delayed_item *ins,
402                                     int action)
403 {
404         struct rb_node **p, *node;
405         struct rb_node *parent_node = NULL;
406         struct rb_root_cached *root;
407         struct btrfs_delayed_item *item;
408         int cmp;
409         bool leftmost = true;
410
411         if (action == BTRFS_DELAYED_INSERTION_ITEM)
412                 root = &delayed_node->ins_root;
413         else if (action == BTRFS_DELAYED_DELETION_ITEM)
414                 root = &delayed_node->del_root;
415         else
416                 BUG();
417         p = &root->rb_root.rb_node;
418         node = &ins->rb_node;
419
420         while (*p) {
421                 parent_node = *p;
422                 item = rb_entry(parent_node, struct btrfs_delayed_item,
423                                  rb_node);
424
425                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
426                 if (cmp < 0) {
427                         p = &(*p)->rb_right;
428                         leftmost = false;
429                 } else if (cmp > 0) {
430                         p = &(*p)->rb_left;
431                 } else {
432                         return -EEXIST;
433                 }
434         }
435
436         rb_link_node(node, parent_node, p);
437         rb_insert_color_cached(node, root, leftmost);
438         ins->delayed_node = delayed_node;
439         ins->ins_or_del = action;
440
441         if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442             action == BTRFS_DELAYED_INSERTION_ITEM &&
443             ins->key.offset >= delayed_node->index_cnt)
444                         delayed_node->index_cnt = ins->key.offset + 1;
445
446         delayed_node->count++;
447         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448         return 0;
449 }
450
451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452                                               struct btrfs_delayed_item *item)
453 {
454         return __btrfs_add_delayed_item(node, item,
455                                         BTRFS_DELAYED_INSERTION_ITEM);
456 }
457
458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459                                              struct btrfs_delayed_item *item)
460 {
461         return __btrfs_add_delayed_item(node, item,
462                                         BTRFS_DELAYED_DELETION_ITEM);
463 }
464
465 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466 {
467         int seq = atomic_inc_return(&delayed_root->items_seq);
468
469         /* atomic_dec_return implies a barrier */
470         if ((atomic_dec_return(&delayed_root->items) <
471             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472                 cond_wake_up_nomb(&delayed_root->wait);
473 }
474
475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476 {
477         struct rb_root_cached *root;
478         struct btrfs_delayed_root *delayed_root;
479
480         /* Not associated with any delayed_node */
481         if (!delayed_item->delayed_node)
482                 return;
483         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484
485         BUG_ON(!delayed_root);
486         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488
489         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490                 root = &delayed_item->delayed_node->ins_root;
491         else
492                 root = &delayed_item->delayed_node->del_root;
493
494         rb_erase_cached(&delayed_item->rb_node, root);
495         delayed_item->delayed_node->count--;
496
497         finish_one_item(delayed_root);
498 }
499
500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501 {
502         if (item) {
503                 __btrfs_remove_delayed_item(item);
504                 if (refcount_dec_and_test(&item->refs))
505                         kfree(item);
506         }
507 }
508
509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
510                                         struct btrfs_delayed_node *delayed_node)
511 {
512         struct rb_node *p;
513         struct btrfs_delayed_item *item = NULL;
514
515         p = rb_first_cached(&delayed_node->ins_root);
516         if (p)
517                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519         return item;
520 }
521
522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
523                                         struct btrfs_delayed_node *delayed_node)
524 {
525         struct rb_node *p;
526         struct btrfs_delayed_item *item = NULL;
527
528         p = rb_first_cached(&delayed_node->del_root);
529         if (p)
530                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531
532         return item;
533 }
534
535 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
536                                                 struct btrfs_delayed_item *item)
537 {
538         struct rb_node *p;
539         struct btrfs_delayed_item *next = NULL;
540
541         p = rb_next(&item->rb_node);
542         if (p)
543                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544
545         return next;
546 }
547
548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
549                                                struct btrfs_root *root,
550                                                struct btrfs_delayed_item *item)
551 {
552         struct btrfs_block_rsv *src_rsv;
553         struct btrfs_block_rsv *dst_rsv;
554         struct btrfs_fs_info *fs_info = root->fs_info;
555         u64 num_bytes;
556         int ret;
557
558         if (!trans->bytes_reserved)
559                 return 0;
560
561         src_rsv = trans->block_rsv;
562         dst_rsv = &fs_info->delayed_block_rsv;
563
564         num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
565
566         /*
567          * Here we migrate space rsv from transaction rsv, since have already
568          * reserved space when starting a transaction.  So no need to reserve
569          * qgroup space here.
570          */
571         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
572         if (!ret) {
573                 trace_btrfs_space_reservation(fs_info, "delayed_item",
574                                               item->key.objectid,
575                                               num_bytes, 1);
576                 item->bytes_reserved = num_bytes;
577         }
578
579         return ret;
580 }
581
582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
583                                                 struct btrfs_delayed_item *item)
584 {
585         struct btrfs_block_rsv *rsv;
586         struct btrfs_fs_info *fs_info = root->fs_info;
587
588         if (!item->bytes_reserved)
589                 return;
590
591         rsv = &fs_info->delayed_block_rsv;
592         /*
593          * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594          * to release/reserve qgroup space.
595          */
596         trace_btrfs_space_reservation(fs_info, "delayed_item",
597                                       item->key.objectid, item->bytes_reserved,
598                                       0);
599         btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
600 }
601
602 static int btrfs_delayed_inode_reserve_metadata(
603                                         struct btrfs_trans_handle *trans,
604                                         struct btrfs_root *root,
605                                         struct btrfs_inode *inode,
606                                         struct btrfs_delayed_node *node)
607 {
608         struct btrfs_fs_info *fs_info = root->fs_info;
609         struct btrfs_block_rsv *src_rsv;
610         struct btrfs_block_rsv *dst_rsv;
611         u64 num_bytes;
612         int ret;
613
614         src_rsv = trans->block_rsv;
615         dst_rsv = &fs_info->delayed_block_rsv;
616
617         num_bytes = btrfs_calc_metadata_size(fs_info, 1);
618
619         /*
620          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621          * which doesn't reserve space for speed.  This is a problem since we
622          * still need to reserve space for this update, so try to reserve the
623          * space.
624          *
625          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626          * we always reserve enough to update the inode item.
627          */
628         if (!src_rsv || (!trans->bytes_reserved &&
629                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630                 ret = btrfs_qgroup_reserve_meta_prealloc(root,
631                                 fs_info->nodesize, true);
632                 if (ret < 0)
633                         return ret;
634                 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
635                                           BTRFS_RESERVE_NO_FLUSH);
636                 /*
637                  * Since we're under a transaction reserve_metadata_bytes could
638                  * try to commit the transaction which will make it return
639                  * EAGAIN to make us stop the transaction we have, so return
640                  * ENOSPC instead so that btrfs_dirty_inode knows what to do.
641                  */
642                 if (ret == -EAGAIN) {
643                         ret = -ENOSPC;
644                         btrfs_qgroup_free_meta_prealloc(root, num_bytes);
645                 }
646                 if (!ret) {
647                         node->bytes_reserved = num_bytes;
648                         trace_btrfs_space_reservation(fs_info,
649                                                       "delayed_inode",
650                                                       btrfs_ino(inode),
651                                                       num_bytes, 1);
652                 } else {
653                         btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
654                 }
655                 return ret;
656         }
657
658         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
659         if (!ret) {
660                 trace_btrfs_space_reservation(fs_info, "delayed_inode",
661                                               btrfs_ino(inode), num_bytes, 1);
662                 node->bytes_reserved = num_bytes;
663         }
664
665         return ret;
666 }
667
668 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
669                                                 struct btrfs_delayed_node *node,
670                                                 bool qgroup_free)
671 {
672         struct btrfs_block_rsv *rsv;
673
674         if (!node->bytes_reserved)
675                 return;
676
677         rsv = &fs_info->delayed_block_rsv;
678         trace_btrfs_space_reservation(fs_info, "delayed_inode",
679                                       node->inode_id, node->bytes_reserved, 0);
680         btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
681         if (qgroup_free)
682                 btrfs_qgroup_free_meta_prealloc(node->root,
683                                 node->bytes_reserved);
684         else
685                 btrfs_qgroup_convert_reserved_meta(node->root,
686                                 node->bytes_reserved);
687         node->bytes_reserved = 0;
688 }
689
690 /*
691  * This helper will insert some continuous items into the same leaf according
692  * to the free space of the leaf.
693  */
694 static int btrfs_batch_insert_items(struct btrfs_root *root,
695                                     struct btrfs_path *path,
696                                     struct btrfs_delayed_item *item)
697 {
698         struct btrfs_delayed_item *curr, *next;
699         int free_space;
700         int total_data_size = 0, total_size = 0;
701         struct extent_buffer *leaf;
702         char *data_ptr;
703         struct btrfs_key *keys;
704         u32 *data_size;
705         struct list_head head;
706         int slot;
707         int nitems;
708         int i;
709         int ret = 0;
710
711         BUG_ON(!path->nodes[0]);
712
713         leaf = path->nodes[0];
714         free_space = btrfs_leaf_free_space(leaf);
715         INIT_LIST_HEAD(&head);
716
717         next = item;
718         nitems = 0;
719
720         /*
721          * count the number of the continuous items that we can insert in batch
722          */
723         while (total_size + next->data_len + sizeof(struct btrfs_item) <=
724                free_space) {
725                 total_data_size += next->data_len;
726                 total_size += next->data_len + sizeof(struct btrfs_item);
727                 list_add_tail(&next->tree_list, &head);
728                 nitems++;
729
730                 curr = next;
731                 next = __btrfs_next_delayed_item(curr);
732                 if (!next)
733                         break;
734
735                 if (!btrfs_is_continuous_delayed_item(curr, next))
736                         break;
737         }
738
739         if (!nitems) {
740                 ret = 0;
741                 goto out;
742         }
743
744         /*
745          * we need allocate some memory space, but it might cause the task
746          * to sleep, so we set all locked nodes in the path to blocking locks
747          * first.
748          */
749         btrfs_set_path_blocking(path);
750
751         keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
752         if (!keys) {
753                 ret = -ENOMEM;
754                 goto out;
755         }
756
757         data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
758         if (!data_size) {
759                 ret = -ENOMEM;
760                 goto error;
761         }
762
763         /* get keys of all the delayed items */
764         i = 0;
765         list_for_each_entry(next, &head, tree_list) {
766                 keys[i] = next->key;
767                 data_size[i] = next->data_len;
768                 i++;
769         }
770
771         /* insert the keys of the items */
772         setup_items_for_insert(root, path, keys, data_size,
773                                total_data_size, total_size, nitems);
774
775         /* insert the dir index items */
776         slot = path->slots[0];
777         list_for_each_entry_safe(curr, next, &head, tree_list) {
778                 data_ptr = btrfs_item_ptr(leaf, slot, char);
779                 write_extent_buffer(leaf, &curr->data,
780                                     (unsigned long)data_ptr,
781                                     curr->data_len);
782                 slot++;
783
784                 btrfs_delayed_item_release_metadata(root, curr);
785
786                 list_del(&curr->tree_list);
787                 btrfs_release_delayed_item(curr);
788         }
789
790 error:
791         kfree(data_size);
792         kfree(keys);
793 out:
794         return ret;
795 }
796
797 /*
798  * This helper can just do simple insertion that needn't extend item for new
799  * data, such as directory name index insertion, inode insertion.
800  */
801 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
802                                      struct btrfs_root *root,
803                                      struct btrfs_path *path,
804                                      struct btrfs_delayed_item *delayed_item)
805 {
806         struct extent_buffer *leaf;
807         unsigned int nofs_flag;
808         char *ptr;
809         int ret;
810
811         nofs_flag = memalloc_nofs_save();
812         ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
813                                       delayed_item->data_len);
814         memalloc_nofs_restore(nofs_flag);
815         if (ret < 0 && ret != -EEXIST)
816                 return ret;
817
818         leaf = path->nodes[0];
819
820         ptr = btrfs_item_ptr(leaf, path->slots[0], char);
821
822         write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
823                             delayed_item->data_len);
824         btrfs_mark_buffer_dirty(leaf);
825
826         btrfs_delayed_item_release_metadata(root, delayed_item);
827         return 0;
828 }
829
830 /*
831  * we insert an item first, then if there are some continuous items, we try
832  * to insert those items into the same leaf.
833  */
834 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
835                                       struct btrfs_path *path,
836                                       struct btrfs_root *root,
837                                       struct btrfs_delayed_node *node)
838 {
839         struct btrfs_delayed_item *curr, *prev;
840         int ret = 0;
841
842 do_again:
843         mutex_lock(&node->mutex);
844         curr = __btrfs_first_delayed_insertion_item(node);
845         if (!curr)
846                 goto insert_end;
847
848         ret = btrfs_insert_delayed_item(trans, root, path, curr);
849         if (ret < 0) {
850                 btrfs_release_path(path);
851                 goto insert_end;
852         }
853
854         prev = curr;
855         curr = __btrfs_next_delayed_item(prev);
856         if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
857                 /* insert the continuous items into the same leaf */
858                 path->slots[0]++;
859                 btrfs_batch_insert_items(root, path, curr);
860         }
861         btrfs_release_delayed_item(prev);
862         btrfs_mark_buffer_dirty(path->nodes[0]);
863
864         btrfs_release_path(path);
865         mutex_unlock(&node->mutex);
866         goto do_again;
867
868 insert_end:
869         mutex_unlock(&node->mutex);
870         return ret;
871 }
872
873 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
874                                     struct btrfs_root *root,
875                                     struct btrfs_path *path,
876                                     struct btrfs_delayed_item *item)
877 {
878         struct btrfs_delayed_item *curr, *next;
879         struct extent_buffer *leaf;
880         struct btrfs_key key;
881         struct list_head head;
882         int nitems, i, last_item;
883         int ret = 0;
884
885         BUG_ON(!path->nodes[0]);
886
887         leaf = path->nodes[0];
888
889         i = path->slots[0];
890         last_item = btrfs_header_nritems(leaf) - 1;
891         if (i > last_item)
892                 return -ENOENT; /* FIXME: Is errno suitable? */
893
894         next = item;
895         INIT_LIST_HEAD(&head);
896         btrfs_item_key_to_cpu(leaf, &key, i);
897         nitems = 0;
898         /*
899          * count the number of the dir index items that we can delete in batch
900          */
901         while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
902                 list_add_tail(&next->tree_list, &head);
903                 nitems++;
904
905                 curr = next;
906                 next = __btrfs_next_delayed_item(curr);
907                 if (!next)
908                         break;
909
910                 if (!btrfs_is_continuous_delayed_item(curr, next))
911                         break;
912
913                 i++;
914                 if (i > last_item)
915                         break;
916                 btrfs_item_key_to_cpu(leaf, &key, i);
917         }
918
919         if (!nitems)
920                 return 0;
921
922         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
923         if (ret)
924                 goto out;
925
926         list_for_each_entry_safe(curr, next, &head, tree_list) {
927                 btrfs_delayed_item_release_metadata(root, curr);
928                 list_del(&curr->tree_list);
929                 btrfs_release_delayed_item(curr);
930         }
931
932 out:
933         return ret;
934 }
935
936 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
937                                       struct btrfs_path *path,
938                                       struct btrfs_root *root,
939                                       struct btrfs_delayed_node *node)
940 {
941         struct btrfs_delayed_item *curr, *prev;
942         unsigned int nofs_flag;
943         int ret = 0;
944
945 do_again:
946         mutex_lock(&node->mutex);
947         curr = __btrfs_first_delayed_deletion_item(node);
948         if (!curr)
949                 goto delete_fail;
950
951         nofs_flag = memalloc_nofs_save();
952         ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
953         memalloc_nofs_restore(nofs_flag);
954         if (ret < 0)
955                 goto delete_fail;
956         else if (ret > 0) {
957                 /*
958                  * can't find the item which the node points to, so this node
959                  * is invalid, just drop it.
960                  */
961                 prev = curr;
962                 curr = __btrfs_next_delayed_item(prev);
963                 btrfs_release_delayed_item(prev);
964                 ret = 0;
965                 btrfs_release_path(path);
966                 if (curr) {
967                         mutex_unlock(&node->mutex);
968                         goto do_again;
969                 } else
970                         goto delete_fail;
971         }
972
973         btrfs_batch_delete_items(trans, root, path, curr);
974         btrfs_release_path(path);
975         mutex_unlock(&node->mutex);
976         goto do_again;
977
978 delete_fail:
979         btrfs_release_path(path);
980         mutex_unlock(&node->mutex);
981         return ret;
982 }
983
984 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
985 {
986         struct btrfs_delayed_root *delayed_root;
987
988         if (delayed_node &&
989             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
990                 BUG_ON(!delayed_node->root);
991                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
992                 delayed_node->count--;
993
994                 delayed_root = delayed_node->root->fs_info->delayed_root;
995                 finish_one_item(delayed_root);
996         }
997 }
998
999 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1000 {
1001         struct btrfs_delayed_root *delayed_root;
1002
1003         ASSERT(delayed_node->root);
1004         clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1005         delayed_node->count--;
1006
1007         delayed_root = delayed_node->root->fs_info->delayed_root;
1008         finish_one_item(delayed_root);
1009 }
1010
1011 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1012                                         struct btrfs_root *root,
1013                                         struct btrfs_path *path,
1014                                         struct btrfs_delayed_node *node)
1015 {
1016         struct btrfs_fs_info *fs_info = root->fs_info;
1017         struct btrfs_key key;
1018         struct btrfs_inode_item *inode_item;
1019         struct extent_buffer *leaf;
1020         unsigned int nofs_flag;
1021         int mod;
1022         int ret;
1023
1024         key.objectid = node->inode_id;
1025         key.type = BTRFS_INODE_ITEM_KEY;
1026         key.offset = 0;
1027
1028         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1029                 mod = -1;
1030         else
1031                 mod = 1;
1032
1033         nofs_flag = memalloc_nofs_save();
1034         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035         memalloc_nofs_restore(nofs_flag);
1036         if (ret > 0) {
1037                 btrfs_release_path(path);
1038                 return -ENOENT;
1039         } else if (ret < 0) {
1040                 return ret;
1041         }
1042
1043         leaf = path->nodes[0];
1044         inode_item = btrfs_item_ptr(leaf, path->slots[0],
1045                                     struct btrfs_inode_item);
1046         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1047                             sizeof(struct btrfs_inode_item));
1048         btrfs_mark_buffer_dirty(leaf);
1049
1050         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1051                 goto no_iref;
1052
1053         path->slots[0]++;
1054         if (path->slots[0] >= btrfs_header_nritems(leaf))
1055                 goto search;
1056 again:
1057         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1058         if (key.objectid != node->inode_id)
1059                 goto out;
1060
1061         if (key.type != BTRFS_INODE_REF_KEY &&
1062             key.type != BTRFS_INODE_EXTREF_KEY)
1063                 goto out;
1064
1065         /*
1066          * Delayed iref deletion is for the inode who has only one link,
1067          * so there is only one iref. The case that several irefs are
1068          * in the same item doesn't exist.
1069          */
1070         btrfs_del_item(trans, root, path);
1071 out:
1072         btrfs_release_delayed_iref(node);
1073 no_iref:
1074         btrfs_release_path(path);
1075 err_out:
1076         btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1077         btrfs_release_delayed_inode(node);
1078
1079         return ret;
1080
1081 search:
1082         btrfs_release_path(path);
1083
1084         key.type = BTRFS_INODE_EXTREF_KEY;
1085         key.offset = -1;
1086
1087         nofs_flag = memalloc_nofs_save();
1088         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1089         memalloc_nofs_restore(nofs_flag);
1090         if (ret < 0)
1091                 goto err_out;
1092         ASSERT(ret);
1093
1094         ret = 0;
1095         leaf = path->nodes[0];
1096         path->slots[0]--;
1097         goto again;
1098 }
1099
1100 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1101                                              struct btrfs_root *root,
1102                                              struct btrfs_path *path,
1103                                              struct btrfs_delayed_node *node)
1104 {
1105         int ret;
1106
1107         mutex_lock(&node->mutex);
1108         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1109                 mutex_unlock(&node->mutex);
1110                 return 0;
1111         }
1112
1113         ret = __btrfs_update_delayed_inode(trans, root, path, node);
1114         mutex_unlock(&node->mutex);
1115         return ret;
1116 }
1117
1118 static inline int
1119 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1120                                    struct btrfs_path *path,
1121                                    struct btrfs_delayed_node *node)
1122 {
1123         int ret;
1124
1125         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1126         if (ret)
1127                 return ret;
1128
1129         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1130         if (ret)
1131                 return ret;
1132
1133         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1134         return ret;
1135 }
1136
1137 /*
1138  * Called when committing the transaction.
1139  * Returns 0 on success.
1140  * Returns < 0 on error and returns with an aborted transaction with any
1141  * outstanding delayed items cleaned up.
1142  */
1143 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1144 {
1145         struct btrfs_fs_info *fs_info = trans->fs_info;
1146         struct btrfs_delayed_root *delayed_root;
1147         struct btrfs_delayed_node *curr_node, *prev_node;
1148         struct btrfs_path *path;
1149         struct btrfs_block_rsv *block_rsv;
1150         int ret = 0;
1151         bool count = (nr > 0);
1152
1153         if (TRANS_ABORTED(trans))
1154                 return -EIO;
1155
1156         path = btrfs_alloc_path();
1157         if (!path)
1158                 return -ENOMEM;
1159         path->leave_spinning = 1;
1160
1161         block_rsv = trans->block_rsv;
1162         trans->block_rsv = &fs_info->delayed_block_rsv;
1163
1164         delayed_root = fs_info->delayed_root;
1165
1166         curr_node = btrfs_first_delayed_node(delayed_root);
1167         while (curr_node && (!count || (count && nr--))) {
1168                 ret = __btrfs_commit_inode_delayed_items(trans, path,
1169                                                          curr_node);
1170                 if (ret) {
1171                         btrfs_release_delayed_node(curr_node);
1172                         curr_node = NULL;
1173                         btrfs_abort_transaction(trans, ret);
1174                         break;
1175                 }
1176
1177                 prev_node = curr_node;
1178                 curr_node = btrfs_next_delayed_node(curr_node);
1179                 btrfs_release_delayed_node(prev_node);
1180         }
1181
1182         if (curr_node)
1183                 btrfs_release_delayed_node(curr_node);
1184         btrfs_free_path(path);
1185         trans->block_rsv = block_rsv;
1186
1187         return ret;
1188 }
1189
1190 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1191 {
1192         return __btrfs_run_delayed_items(trans, -1);
1193 }
1194
1195 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1196 {
1197         return __btrfs_run_delayed_items(trans, nr);
1198 }
1199
1200 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1201                                      struct btrfs_inode *inode)
1202 {
1203         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1204         struct btrfs_path *path;
1205         struct btrfs_block_rsv *block_rsv;
1206         int ret;
1207
1208         if (!delayed_node)
1209                 return 0;
1210
1211         mutex_lock(&delayed_node->mutex);
1212         if (!delayed_node->count) {
1213                 mutex_unlock(&delayed_node->mutex);
1214                 btrfs_release_delayed_node(delayed_node);
1215                 return 0;
1216         }
1217         mutex_unlock(&delayed_node->mutex);
1218
1219         path = btrfs_alloc_path();
1220         if (!path) {
1221                 btrfs_release_delayed_node(delayed_node);
1222                 return -ENOMEM;
1223         }
1224         path->leave_spinning = 1;
1225
1226         block_rsv = trans->block_rsv;
1227         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1228
1229         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1230
1231         btrfs_release_delayed_node(delayed_node);
1232         btrfs_free_path(path);
1233         trans->block_rsv = block_rsv;
1234
1235         return ret;
1236 }
1237
1238 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1239 {
1240         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1241         struct btrfs_trans_handle *trans;
1242         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1243         struct btrfs_path *path;
1244         struct btrfs_block_rsv *block_rsv;
1245         int ret;
1246
1247         if (!delayed_node)
1248                 return 0;
1249
1250         mutex_lock(&delayed_node->mutex);
1251         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1252                 mutex_unlock(&delayed_node->mutex);
1253                 btrfs_release_delayed_node(delayed_node);
1254                 return 0;
1255         }
1256         mutex_unlock(&delayed_node->mutex);
1257
1258         trans = btrfs_join_transaction(delayed_node->root);
1259         if (IS_ERR(trans)) {
1260                 ret = PTR_ERR(trans);
1261                 goto out;
1262         }
1263
1264         path = btrfs_alloc_path();
1265         if (!path) {
1266                 ret = -ENOMEM;
1267                 goto trans_out;
1268         }
1269         path->leave_spinning = 1;
1270
1271         block_rsv = trans->block_rsv;
1272         trans->block_rsv = &fs_info->delayed_block_rsv;
1273
1274         mutex_lock(&delayed_node->mutex);
1275         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1276                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1277                                                    path, delayed_node);
1278         else
1279                 ret = 0;
1280         mutex_unlock(&delayed_node->mutex);
1281
1282         btrfs_free_path(path);
1283         trans->block_rsv = block_rsv;
1284 trans_out:
1285         btrfs_end_transaction(trans);
1286         btrfs_btree_balance_dirty(fs_info);
1287 out:
1288         btrfs_release_delayed_node(delayed_node);
1289
1290         return ret;
1291 }
1292
1293 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1294 {
1295         struct btrfs_delayed_node *delayed_node;
1296
1297         delayed_node = READ_ONCE(inode->delayed_node);
1298         if (!delayed_node)
1299                 return;
1300
1301         inode->delayed_node = NULL;
1302         btrfs_release_delayed_node(delayed_node);
1303 }
1304
1305 struct btrfs_async_delayed_work {
1306         struct btrfs_delayed_root *delayed_root;
1307         int nr;
1308         struct btrfs_work work;
1309 };
1310
1311 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1312 {
1313         struct btrfs_async_delayed_work *async_work;
1314         struct btrfs_delayed_root *delayed_root;
1315         struct btrfs_trans_handle *trans;
1316         struct btrfs_path *path;
1317         struct btrfs_delayed_node *delayed_node = NULL;
1318         struct btrfs_root *root;
1319         struct btrfs_block_rsv *block_rsv;
1320         int total_done = 0;
1321
1322         async_work = container_of(work, struct btrfs_async_delayed_work, work);
1323         delayed_root = async_work->delayed_root;
1324
1325         path = btrfs_alloc_path();
1326         if (!path)
1327                 goto out;
1328
1329         do {
1330                 if (atomic_read(&delayed_root->items) <
1331                     BTRFS_DELAYED_BACKGROUND / 2)
1332                         break;
1333
1334                 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1335                 if (!delayed_node)
1336                         break;
1337
1338                 path->leave_spinning = 1;
1339                 root = delayed_node->root;
1340
1341                 trans = btrfs_join_transaction(root);
1342                 if (IS_ERR(trans)) {
1343                         btrfs_release_path(path);
1344                         btrfs_release_prepared_delayed_node(delayed_node);
1345                         total_done++;
1346                         continue;
1347                 }
1348
1349                 block_rsv = trans->block_rsv;
1350                 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1351
1352                 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1353
1354                 trans->block_rsv = block_rsv;
1355                 btrfs_end_transaction(trans);
1356                 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1357
1358                 btrfs_release_path(path);
1359                 btrfs_release_prepared_delayed_node(delayed_node);
1360                 total_done++;
1361
1362         } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1363                  || total_done < async_work->nr);
1364
1365         btrfs_free_path(path);
1366 out:
1367         wake_up(&delayed_root->wait);
1368         kfree(async_work);
1369 }
1370
1371
1372 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1373                                      struct btrfs_fs_info *fs_info, int nr)
1374 {
1375         struct btrfs_async_delayed_work *async_work;
1376
1377         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378         if (!async_work)
1379                 return -ENOMEM;
1380
1381         async_work->delayed_root = delayed_root;
1382         btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1383                         NULL);
1384         async_work->nr = nr;
1385
1386         btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387         return 0;
1388 }
1389
1390 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391 {
1392         WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393 }
1394
1395 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396 {
1397         int val = atomic_read(&delayed_root->items_seq);
1398
1399         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400                 return 1;
1401
1402         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403                 return 1;
1404
1405         return 0;
1406 }
1407
1408 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409 {
1410         struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412         if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1413                 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1414                 return;
1415
1416         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1417                 int seq;
1418                 int ret;
1419
1420                 seq = atomic_read(&delayed_root->items_seq);
1421
1422                 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1423                 if (ret)
1424                         return;
1425
1426                 wait_event_interruptible(delayed_root->wait,
1427                                          could_end_wait(delayed_root, seq));
1428                 return;
1429         }
1430
1431         btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1432 }
1433
1434 /* Will return 0 or -ENOMEM */
1435 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1436                                    const char *name, int name_len,
1437                                    struct btrfs_inode *dir,
1438                                    struct btrfs_disk_key *disk_key, u8 type,
1439                                    u64 index)
1440 {
1441         struct btrfs_delayed_node *delayed_node;
1442         struct btrfs_delayed_item *delayed_item;
1443         struct btrfs_dir_item *dir_item;
1444         int ret;
1445
1446         delayed_node = btrfs_get_or_create_delayed_node(dir);
1447         if (IS_ERR(delayed_node))
1448                 return PTR_ERR(delayed_node);
1449
1450         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451         if (!delayed_item) {
1452                 ret = -ENOMEM;
1453                 goto release_node;
1454         }
1455
1456         delayed_item->key.objectid = btrfs_ino(dir);
1457         delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458         delayed_item->key.offset = index;
1459
1460         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461         dir_item->location = *disk_key;
1462         btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463         btrfs_set_stack_dir_data_len(dir_item, 0);
1464         btrfs_set_stack_dir_name_len(dir_item, name_len);
1465         btrfs_set_stack_dir_type(dir_item, type);
1466         memcpy((char *)(dir_item + 1), name, name_len);
1467
1468         ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1469         /*
1470          * we have reserved enough space when we start a new transaction,
1471          * so reserving metadata failure is impossible
1472          */
1473         BUG_ON(ret);
1474
1475         mutex_lock(&delayed_node->mutex);
1476         ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1477         if (unlikely(ret)) {
1478                 btrfs_err(trans->fs_info,
1479                           "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1480                           name_len, name, delayed_node->root->root_key.objectid,
1481                           delayed_node->inode_id, ret);
1482                 BUG();
1483         }
1484         mutex_unlock(&delayed_node->mutex);
1485
1486 release_node:
1487         btrfs_release_delayed_node(delayed_node);
1488         return ret;
1489 }
1490
1491 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1492                                                struct btrfs_delayed_node *node,
1493                                                struct btrfs_key *key)
1494 {
1495         struct btrfs_delayed_item *item;
1496
1497         mutex_lock(&node->mutex);
1498         item = __btrfs_lookup_delayed_insertion_item(node, key);
1499         if (!item) {
1500                 mutex_unlock(&node->mutex);
1501                 return 1;
1502         }
1503
1504         btrfs_delayed_item_release_metadata(node->root, item);
1505         btrfs_release_delayed_item(item);
1506         mutex_unlock(&node->mutex);
1507         return 0;
1508 }
1509
1510 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1511                                    struct btrfs_inode *dir, u64 index)
1512 {
1513         struct btrfs_delayed_node *node;
1514         struct btrfs_delayed_item *item;
1515         struct btrfs_key item_key;
1516         int ret;
1517
1518         node = btrfs_get_or_create_delayed_node(dir);
1519         if (IS_ERR(node))
1520                 return PTR_ERR(node);
1521
1522         item_key.objectid = btrfs_ino(dir);
1523         item_key.type = BTRFS_DIR_INDEX_KEY;
1524         item_key.offset = index;
1525
1526         ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1527                                                   &item_key);
1528         if (!ret)
1529                 goto end;
1530
1531         item = btrfs_alloc_delayed_item(0);
1532         if (!item) {
1533                 ret = -ENOMEM;
1534                 goto end;
1535         }
1536
1537         item->key = item_key;
1538
1539         ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1540         /*
1541          * we have reserved enough space when we start a new transaction,
1542          * so reserving metadata failure is impossible.
1543          */
1544         if (ret < 0) {
1545                 btrfs_err(trans->fs_info,
1546 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1547                 btrfs_release_delayed_item(item);
1548                 goto end;
1549         }
1550
1551         mutex_lock(&node->mutex);
1552         ret = __btrfs_add_delayed_deletion_item(node, item);
1553         if (unlikely(ret)) {
1554                 btrfs_err(trans->fs_info,
1555                           "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1556                           index, node->root->root_key.objectid,
1557                           node->inode_id, ret);
1558                 btrfs_delayed_item_release_metadata(dir->root, item);
1559                 btrfs_release_delayed_item(item);
1560         }
1561         mutex_unlock(&node->mutex);
1562 end:
1563         btrfs_release_delayed_node(node);
1564         return ret;
1565 }
1566
1567 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1568 {
1569         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1570
1571         if (!delayed_node)
1572                 return -ENOENT;
1573
1574         /*
1575          * Since we have held i_mutex of this directory, it is impossible that
1576          * a new directory index is added into the delayed node and index_cnt
1577          * is updated now. So we needn't lock the delayed node.
1578          */
1579         if (!delayed_node->index_cnt) {
1580                 btrfs_release_delayed_node(delayed_node);
1581                 return -EINVAL;
1582         }
1583
1584         inode->index_cnt = delayed_node->index_cnt;
1585         btrfs_release_delayed_node(delayed_node);
1586         return 0;
1587 }
1588
1589 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1590                                      struct list_head *ins_list,
1591                                      struct list_head *del_list)
1592 {
1593         struct btrfs_delayed_node *delayed_node;
1594         struct btrfs_delayed_item *item;
1595
1596         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1597         if (!delayed_node)
1598                 return false;
1599
1600         /*
1601          * We can only do one readdir with delayed items at a time because of
1602          * item->readdir_list.
1603          */
1604         inode_unlock_shared(inode);
1605         inode_lock(inode);
1606
1607         mutex_lock(&delayed_node->mutex);
1608         item = __btrfs_first_delayed_insertion_item(delayed_node);
1609         while (item) {
1610                 refcount_inc(&item->refs);
1611                 list_add_tail(&item->readdir_list, ins_list);
1612                 item = __btrfs_next_delayed_item(item);
1613         }
1614
1615         item = __btrfs_first_delayed_deletion_item(delayed_node);
1616         while (item) {
1617                 refcount_inc(&item->refs);
1618                 list_add_tail(&item->readdir_list, del_list);
1619                 item = __btrfs_next_delayed_item(item);
1620         }
1621         mutex_unlock(&delayed_node->mutex);
1622         /*
1623          * This delayed node is still cached in the btrfs inode, so refs
1624          * must be > 1 now, and we needn't check it is going to be freed
1625          * or not.
1626          *
1627          * Besides that, this function is used to read dir, we do not
1628          * insert/delete delayed items in this period. So we also needn't
1629          * requeue or dequeue this delayed node.
1630          */
1631         refcount_dec(&delayed_node->refs);
1632
1633         return true;
1634 }
1635
1636 void btrfs_readdir_put_delayed_items(struct inode *inode,
1637                                      struct list_head *ins_list,
1638                                      struct list_head *del_list)
1639 {
1640         struct btrfs_delayed_item *curr, *next;
1641
1642         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1643                 list_del(&curr->readdir_list);
1644                 if (refcount_dec_and_test(&curr->refs))
1645                         kfree(curr);
1646         }
1647
1648         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1649                 list_del(&curr->readdir_list);
1650                 if (refcount_dec_and_test(&curr->refs))
1651                         kfree(curr);
1652         }
1653
1654         /*
1655          * The VFS is going to do up_read(), so we need to downgrade back to a
1656          * read lock.
1657          */
1658         downgrade_write(&inode->i_rwsem);
1659 }
1660
1661 int btrfs_should_delete_dir_index(struct list_head *del_list,
1662                                   u64 index)
1663 {
1664         struct btrfs_delayed_item *curr;
1665         int ret = 0;
1666
1667         list_for_each_entry(curr, del_list, readdir_list) {
1668                 if (curr->key.offset > index)
1669                         break;
1670                 if (curr->key.offset == index) {
1671                         ret = 1;
1672                         break;
1673                 }
1674         }
1675         return ret;
1676 }
1677
1678 /*
1679  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1680  *
1681  */
1682 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1683                                     struct list_head *ins_list)
1684 {
1685         struct btrfs_dir_item *di;
1686         struct btrfs_delayed_item *curr, *next;
1687         struct btrfs_key location;
1688         char *name;
1689         int name_len;
1690         int over = 0;
1691         unsigned char d_type;
1692
1693         if (list_empty(ins_list))
1694                 return 0;
1695
1696         /*
1697          * Changing the data of the delayed item is impossible. So
1698          * we needn't lock them. And we have held i_mutex of the
1699          * directory, nobody can delete any directory indexes now.
1700          */
1701         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1702                 list_del(&curr->readdir_list);
1703
1704                 if (curr->key.offset < ctx->pos) {
1705                         if (refcount_dec_and_test(&curr->refs))
1706                                 kfree(curr);
1707                         continue;
1708                 }
1709
1710                 ctx->pos = curr->key.offset;
1711
1712                 di = (struct btrfs_dir_item *)curr->data;
1713                 name = (char *)(di + 1);
1714                 name_len = btrfs_stack_dir_name_len(di);
1715
1716                 d_type = fs_ftype_to_dtype(di->type);
1717                 btrfs_disk_key_to_cpu(&location, &di->location);
1718
1719                 over = !dir_emit(ctx, name, name_len,
1720                                location.objectid, d_type);
1721
1722                 if (refcount_dec_and_test(&curr->refs))
1723                         kfree(curr);
1724
1725                 if (over)
1726                         return 1;
1727                 ctx->pos++;
1728         }
1729         return 0;
1730 }
1731
1732 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1733                                   struct btrfs_inode_item *inode_item,
1734                                   struct inode *inode)
1735 {
1736         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1737         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1738         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1739         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1740         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1741         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1742         btrfs_set_stack_inode_generation(inode_item,
1743                                          BTRFS_I(inode)->generation);
1744         btrfs_set_stack_inode_sequence(inode_item,
1745                                        inode_peek_iversion(inode));
1746         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1747         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1748         btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1749         btrfs_set_stack_inode_block_group(inode_item, 0);
1750
1751         btrfs_set_stack_timespec_sec(&inode_item->atime,
1752                                      inode->i_atime.tv_sec);
1753         btrfs_set_stack_timespec_nsec(&inode_item->atime,
1754                                       inode->i_atime.tv_nsec);
1755
1756         btrfs_set_stack_timespec_sec(&inode_item->mtime,
1757                                      inode->i_mtime.tv_sec);
1758         btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1759                                       inode->i_mtime.tv_nsec);
1760
1761         btrfs_set_stack_timespec_sec(&inode_item->ctime,
1762                                      inode->i_ctime.tv_sec);
1763         btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1764                                       inode->i_ctime.tv_nsec);
1765
1766         btrfs_set_stack_timespec_sec(&inode_item->otime,
1767                                      BTRFS_I(inode)->i_otime.tv_sec);
1768         btrfs_set_stack_timespec_nsec(&inode_item->otime,
1769                                      BTRFS_I(inode)->i_otime.tv_nsec);
1770 }
1771
1772 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1773 {
1774         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1775         struct btrfs_delayed_node *delayed_node;
1776         struct btrfs_inode_item *inode_item;
1777
1778         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1779         if (!delayed_node)
1780                 return -ENOENT;
1781
1782         mutex_lock(&delayed_node->mutex);
1783         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784                 mutex_unlock(&delayed_node->mutex);
1785                 btrfs_release_delayed_node(delayed_node);
1786                 return -ENOENT;
1787         }
1788
1789         inode_item = &delayed_node->inode_item;
1790
1791         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793         btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794         btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1795                         round_up(i_size_read(inode), fs_info->sectorsize));
1796         inode->i_mode = btrfs_stack_inode_mode(inode_item);
1797         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1798         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1799         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1800         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1801
1802         inode_set_iversion_queried(inode,
1803                                    btrfs_stack_inode_sequence(inode_item));
1804         inode->i_rdev = 0;
1805         *rdev = btrfs_stack_inode_rdev(inode_item);
1806         BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1807
1808         inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1809         inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1810
1811         inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1812         inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1813
1814         inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1815         inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1816
1817         BTRFS_I(inode)->i_otime.tv_sec =
1818                 btrfs_stack_timespec_sec(&inode_item->otime);
1819         BTRFS_I(inode)->i_otime.tv_nsec =
1820                 btrfs_stack_timespec_nsec(&inode_item->otime);
1821
1822         inode->i_generation = BTRFS_I(inode)->generation;
1823         BTRFS_I(inode)->index_cnt = (u64)-1;
1824
1825         mutex_unlock(&delayed_node->mutex);
1826         btrfs_release_delayed_node(delayed_node);
1827         return 0;
1828 }
1829
1830 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1831                                struct btrfs_root *root, struct inode *inode)
1832 {
1833         struct btrfs_delayed_node *delayed_node;
1834         int ret = 0;
1835
1836         delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1837         if (IS_ERR(delayed_node))
1838                 return PTR_ERR(delayed_node);
1839
1840         mutex_lock(&delayed_node->mutex);
1841         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1842                 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1843                 goto release_node;
1844         }
1845
1846         ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1847                                                    delayed_node);
1848         if (ret)
1849                 goto release_node;
1850
1851         fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1852         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1853         delayed_node->count++;
1854         atomic_inc(&root->fs_info->delayed_root->items);
1855 release_node:
1856         mutex_unlock(&delayed_node->mutex);
1857         btrfs_release_delayed_node(delayed_node);
1858         return ret;
1859 }
1860
1861 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1862 {
1863         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1864         struct btrfs_delayed_node *delayed_node;
1865
1866         /*
1867          * we don't do delayed inode updates during log recovery because it
1868          * leads to enospc problems.  This means we also can't do
1869          * delayed inode refs
1870          */
1871         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1872                 return -EAGAIN;
1873
1874         delayed_node = btrfs_get_or_create_delayed_node(inode);
1875         if (IS_ERR(delayed_node))
1876                 return PTR_ERR(delayed_node);
1877
1878         /*
1879          * We don't reserve space for inode ref deletion is because:
1880          * - We ONLY do async inode ref deletion for the inode who has only
1881          *   one link(i_nlink == 1), it means there is only one inode ref.
1882          *   And in most case, the inode ref and the inode item are in the
1883          *   same leaf, and we will deal with them at the same time.
1884          *   Since we are sure we will reserve the space for the inode item,
1885          *   it is unnecessary to reserve space for inode ref deletion.
1886          * - If the inode ref and the inode item are not in the same leaf,
1887          *   We also needn't worry about enospc problem, because we reserve
1888          *   much more space for the inode update than it needs.
1889          * - At the worst, we can steal some space from the global reservation.
1890          *   It is very rare.
1891          */
1892         mutex_lock(&delayed_node->mutex);
1893         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1894                 goto release_node;
1895
1896         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1897         delayed_node->count++;
1898         atomic_inc(&fs_info->delayed_root->items);
1899 release_node:
1900         mutex_unlock(&delayed_node->mutex);
1901         btrfs_release_delayed_node(delayed_node);
1902         return 0;
1903 }
1904
1905 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1906 {
1907         struct btrfs_root *root = delayed_node->root;
1908         struct btrfs_fs_info *fs_info = root->fs_info;
1909         struct btrfs_delayed_item *curr_item, *prev_item;
1910
1911         mutex_lock(&delayed_node->mutex);
1912         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1913         while (curr_item) {
1914                 btrfs_delayed_item_release_metadata(root, curr_item);
1915                 prev_item = curr_item;
1916                 curr_item = __btrfs_next_delayed_item(prev_item);
1917                 btrfs_release_delayed_item(prev_item);
1918         }
1919
1920         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1921         while (curr_item) {
1922                 btrfs_delayed_item_release_metadata(root, curr_item);
1923                 prev_item = curr_item;
1924                 curr_item = __btrfs_next_delayed_item(prev_item);
1925                 btrfs_release_delayed_item(prev_item);
1926         }
1927
1928         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1929                 btrfs_release_delayed_iref(delayed_node);
1930
1931         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932                 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1933                 btrfs_release_delayed_inode(delayed_node);
1934         }
1935         mutex_unlock(&delayed_node->mutex);
1936 }
1937
1938 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1939 {
1940         struct btrfs_delayed_node *delayed_node;
1941
1942         delayed_node = btrfs_get_delayed_node(inode);
1943         if (!delayed_node)
1944                 return;
1945
1946         __btrfs_kill_delayed_node(delayed_node);
1947         btrfs_release_delayed_node(delayed_node);
1948 }
1949
1950 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1951 {
1952         u64 inode_id = 0;
1953         struct btrfs_delayed_node *delayed_nodes[8];
1954         int i, n;
1955
1956         while (1) {
1957                 spin_lock(&root->inode_lock);
1958                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1959                                            (void **)delayed_nodes, inode_id,
1960                                            ARRAY_SIZE(delayed_nodes));
1961                 if (!n) {
1962                         spin_unlock(&root->inode_lock);
1963                         break;
1964                 }
1965
1966                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1967                 for (i = 0; i < n; i++) {
1968                         /*
1969                          * Don't increase refs in case the node is dead and
1970                          * about to be removed from the tree in the loop below
1971                          */
1972                         if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1973                                 delayed_nodes[i] = NULL;
1974                 }
1975                 spin_unlock(&root->inode_lock);
1976
1977                 for (i = 0; i < n; i++) {
1978                         if (!delayed_nodes[i])
1979                                 continue;
1980                         __btrfs_kill_delayed_node(delayed_nodes[i]);
1981                         btrfs_release_delayed_node(delayed_nodes[i]);
1982                 }
1983         }
1984 }
1985
1986 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1987 {
1988         struct btrfs_delayed_node *curr_node, *prev_node;
1989
1990         curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1991         while (curr_node) {
1992                 __btrfs_kill_delayed_node(curr_node);
1993
1994                 prev_node = curr_node;
1995                 curr_node = btrfs_next_delayed_node(curr_node);
1996                 btrfs_release_delayed_node(prev_node);
1997         }
1998 }
1999