local64.h: make <asm/local64.h> mandatory
[linux-2.6-microblaze.git] / fs / btrfs / ordered-data.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "ctree.h"
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
15 #include "disk-io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
18 #include "qgroup.h"
19
20 static struct kmem_cache *btrfs_ordered_extent_cache;
21
22 static u64 entry_end(struct btrfs_ordered_extent *entry)
23 {
24         if (entry->file_offset + entry->num_bytes < entry->file_offset)
25                 return (u64)-1;
26         return entry->file_offset + entry->num_bytes;
27 }
28
29 /* returns NULL if the insertion worked, or it returns the node it did find
30  * in the tree
31  */
32 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
33                                    struct rb_node *node)
34 {
35         struct rb_node **p = &root->rb_node;
36         struct rb_node *parent = NULL;
37         struct btrfs_ordered_extent *entry;
38
39         while (*p) {
40                 parent = *p;
41                 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
42
43                 if (file_offset < entry->file_offset)
44                         p = &(*p)->rb_left;
45                 else if (file_offset >= entry_end(entry))
46                         p = &(*p)->rb_right;
47                 else
48                         return parent;
49         }
50
51         rb_link_node(node, parent, p);
52         rb_insert_color(node, root);
53         return NULL;
54 }
55
56 /*
57  * look for a given offset in the tree, and if it can't be found return the
58  * first lesser offset
59  */
60 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61                                      struct rb_node **prev_ret)
62 {
63         struct rb_node *n = root->rb_node;
64         struct rb_node *prev = NULL;
65         struct rb_node *test;
66         struct btrfs_ordered_extent *entry;
67         struct btrfs_ordered_extent *prev_entry = NULL;
68
69         while (n) {
70                 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
71                 prev = n;
72                 prev_entry = entry;
73
74                 if (file_offset < entry->file_offset)
75                         n = n->rb_left;
76                 else if (file_offset >= entry_end(entry))
77                         n = n->rb_right;
78                 else
79                         return n;
80         }
81         if (!prev_ret)
82                 return NULL;
83
84         while (prev && file_offset >= entry_end(prev_entry)) {
85                 test = rb_next(prev);
86                 if (!test)
87                         break;
88                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89                                       rb_node);
90                 if (file_offset < entry_end(prev_entry))
91                         break;
92
93                 prev = test;
94         }
95         if (prev)
96                 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97                                       rb_node);
98         while (prev && file_offset < entry_end(prev_entry)) {
99                 test = rb_prev(prev);
100                 if (!test)
101                         break;
102                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103                                       rb_node);
104                 prev = test;
105         }
106         *prev_ret = prev;
107         return NULL;
108 }
109
110 /*
111  * helper to check if a given offset is inside a given entry
112  */
113 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
114 {
115         if (file_offset < entry->file_offset ||
116             entry->file_offset + entry->num_bytes <= file_offset)
117                 return 0;
118         return 1;
119 }
120
121 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
122                           u64 len)
123 {
124         if (file_offset + len <= entry->file_offset ||
125             entry->file_offset + entry->num_bytes <= file_offset)
126                 return 0;
127         return 1;
128 }
129
130 /*
131  * look find the first ordered struct that has this offset, otherwise
132  * the first one less than this offset
133  */
134 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
135                                           u64 file_offset)
136 {
137         struct rb_root *root = &tree->tree;
138         struct rb_node *prev = NULL;
139         struct rb_node *ret;
140         struct btrfs_ordered_extent *entry;
141
142         if (tree->last) {
143                 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
144                                  rb_node);
145                 if (offset_in_entry(entry, file_offset))
146                         return tree->last;
147         }
148         ret = __tree_search(root, file_offset, &prev);
149         if (!ret)
150                 ret = prev;
151         if (ret)
152                 tree->last = ret;
153         return ret;
154 }
155
156 /*
157  * Allocate and add a new ordered_extent into the per-inode tree.
158  *
159  * The tree is given a single reference on the ordered extent that was
160  * inserted.
161  */
162 static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163                                       u64 disk_bytenr, u64 num_bytes,
164                                       u64 disk_num_bytes, int type, int dio,
165                                       int compress_type)
166 {
167         struct btrfs_root *root = inode->root;
168         struct btrfs_fs_info *fs_info = root->fs_info;
169         struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
170         struct rb_node *node;
171         struct btrfs_ordered_extent *entry;
172         int ret;
173
174         if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175                 /* For nocow write, we can release the qgroup rsv right now */
176                 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
177                 if (ret < 0)
178                         return ret;
179                 ret = 0;
180         } else {
181                 /*
182                  * The ordered extent has reserved qgroup space, release now
183                  * and pass the reserved number for qgroup_record to free.
184                  */
185                 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
186                 if (ret < 0)
187                         return ret;
188         }
189         entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
190         if (!entry)
191                 return -ENOMEM;
192
193         entry->file_offset = file_offset;
194         entry->disk_bytenr = disk_bytenr;
195         entry->num_bytes = num_bytes;
196         entry->disk_num_bytes = disk_num_bytes;
197         entry->bytes_left = num_bytes;
198         entry->inode = igrab(&inode->vfs_inode);
199         entry->compress_type = compress_type;
200         entry->truncated_len = (u64)-1;
201         entry->qgroup_rsv = ret;
202         if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203                 set_bit(type, &entry->flags);
204
205         if (dio) {
206                 percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
207                                          fs_info->delalloc_batch);
208                 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
209         }
210
211         /* one ref for the tree */
212         refcount_set(&entry->refs, 1);
213         init_waitqueue_head(&entry->wait);
214         INIT_LIST_HEAD(&entry->list);
215         INIT_LIST_HEAD(&entry->log_list);
216         INIT_LIST_HEAD(&entry->root_extent_list);
217         INIT_LIST_HEAD(&entry->work_list);
218         init_completion(&entry->completion);
219
220         trace_btrfs_ordered_extent_add(inode, entry);
221
222         spin_lock_irq(&tree->lock);
223         node = tree_insert(&tree->tree, file_offset,
224                            &entry->rb_node);
225         if (node)
226                 btrfs_panic(fs_info, -EEXIST,
227                                 "inconsistency in ordered tree at offset %llu",
228                                 file_offset);
229         spin_unlock_irq(&tree->lock);
230
231         spin_lock(&root->ordered_extent_lock);
232         list_add_tail(&entry->root_extent_list,
233                       &root->ordered_extents);
234         root->nr_ordered_extents++;
235         if (root->nr_ordered_extents == 1) {
236                 spin_lock(&fs_info->ordered_root_lock);
237                 BUG_ON(!list_empty(&root->ordered_root));
238                 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
239                 spin_unlock(&fs_info->ordered_root_lock);
240         }
241         spin_unlock(&root->ordered_extent_lock);
242
243         /*
244          * We don't need the count_max_extents here, we can assume that all of
245          * that work has been done at higher layers, so this is truly the
246          * smallest the extent is going to get.
247          */
248         spin_lock(&inode->lock);
249         btrfs_mod_outstanding_extents(inode, 1);
250         spin_unlock(&inode->lock);
251
252         return 0;
253 }
254
255 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
256                              u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
257                              int type)
258 {
259         return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
260                                           num_bytes, disk_num_bytes, type, 0,
261                                           BTRFS_COMPRESS_NONE);
262 }
263
264 int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
265                                  u64 disk_bytenr, u64 num_bytes,
266                                  u64 disk_num_bytes, int type)
267 {
268         return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
269                                           num_bytes, disk_num_bytes, type, 1,
270                                           BTRFS_COMPRESS_NONE);
271 }
272
273 int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
274                                       u64 disk_bytenr, u64 num_bytes,
275                                       u64 disk_num_bytes, int type,
276                                       int compress_type)
277 {
278         return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
279                                           num_bytes, disk_num_bytes, type, 0,
280                                           compress_type);
281 }
282
283 /*
284  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
285  * when an ordered extent is finished.  If the list covers more than one
286  * ordered extent, it is split across multiples.
287  */
288 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
289                            struct btrfs_ordered_sum *sum)
290 {
291         struct btrfs_ordered_inode_tree *tree;
292
293         tree = &BTRFS_I(entry->inode)->ordered_tree;
294         spin_lock_irq(&tree->lock);
295         list_add_tail(&sum->list, &entry->list);
296         spin_unlock_irq(&tree->lock);
297 }
298
299 /*
300  * this is used to account for finished IO across a given range
301  * of the file.  The IO may span ordered extents.  If
302  * a given ordered_extent is completely done, 1 is returned, otherwise
303  * 0.
304  *
305  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
306  * to make sure this function only returns 1 once for a given ordered extent.
307  *
308  * file_offset is updated to one byte past the range that is recorded as
309  * complete.  This allows you to walk forward in the file.
310  */
311 int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
312                                    struct btrfs_ordered_extent **cached,
313                                    u64 *file_offset, u64 io_size, int uptodate)
314 {
315         struct btrfs_fs_info *fs_info = inode->root->fs_info;
316         struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
317         struct rb_node *node;
318         struct btrfs_ordered_extent *entry = NULL;
319         int ret;
320         unsigned long flags;
321         u64 dec_end;
322         u64 dec_start;
323         u64 to_dec;
324
325         spin_lock_irqsave(&tree->lock, flags);
326         node = tree_search(tree, *file_offset);
327         if (!node) {
328                 ret = 1;
329                 goto out;
330         }
331
332         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
333         if (!offset_in_entry(entry, *file_offset)) {
334                 ret = 1;
335                 goto out;
336         }
337
338         dec_start = max(*file_offset, entry->file_offset);
339         dec_end = min(*file_offset + io_size,
340                       entry->file_offset + entry->num_bytes);
341         *file_offset = dec_end;
342         if (dec_start > dec_end) {
343                 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
344                            dec_start, dec_end);
345         }
346         to_dec = dec_end - dec_start;
347         if (to_dec > entry->bytes_left) {
348                 btrfs_crit(fs_info,
349                            "bad ordered accounting left %llu size %llu",
350                            entry->bytes_left, to_dec);
351         }
352         entry->bytes_left -= to_dec;
353         if (!uptodate)
354                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
355
356         if (entry->bytes_left == 0) {
357                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
358                 /* test_and_set_bit implies a barrier */
359                 cond_wake_up_nomb(&entry->wait);
360         } else {
361                 ret = 1;
362         }
363 out:
364         if (!ret && cached && entry) {
365                 *cached = entry;
366                 refcount_inc(&entry->refs);
367         }
368         spin_unlock_irqrestore(&tree->lock, flags);
369         return ret == 0;
370 }
371
372 /*
373  * this is used to account for finished IO across a given range
374  * of the file.  The IO should not span ordered extents.  If
375  * a given ordered_extent is completely done, 1 is returned, otherwise
376  * 0.
377  *
378  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
379  * to make sure this function only returns 1 once for a given ordered extent.
380  */
381 int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
382                                    struct btrfs_ordered_extent **cached,
383                                    u64 file_offset, u64 io_size, int uptodate)
384 {
385         struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
386         struct rb_node *node;
387         struct btrfs_ordered_extent *entry = NULL;
388         unsigned long flags;
389         int ret;
390
391         spin_lock_irqsave(&tree->lock, flags);
392         if (cached && *cached) {
393                 entry = *cached;
394                 goto have_entry;
395         }
396
397         node = tree_search(tree, file_offset);
398         if (!node) {
399                 ret = 1;
400                 goto out;
401         }
402
403         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
404 have_entry:
405         if (!offset_in_entry(entry, file_offset)) {
406                 ret = 1;
407                 goto out;
408         }
409
410         if (io_size > entry->bytes_left) {
411                 btrfs_crit(inode->root->fs_info,
412                            "bad ordered accounting left %llu size %llu",
413                        entry->bytes_left, io_size);
414         }
415         entry->bytes_left -= io_size;
416         if (!uptodate)
417                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
418
419         if (entry->bytes_left == 0) {
420                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421                 /* test_and_set_bit implies a barrier */
422                 cond_wake_up_nomb(&entry->wait);
423         } else {
424                 ret = 1;
425         }
426 out:
427         if (!ret && cached && entry) {
428                 *cached = entry;
429                 refcount_inc(&entry->refs);
430         }
431         spin_unlock_irqrestore(&tree->lock, flags);
432         return ret == 0;
433 }
434
435 /*
436  * used to drop a reference on an ordered extent.  This will free
437  * the extent if the last reference is dropped
438  */
439 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
440 {
441         struct list_head *cur;
442         struct btrfs_ordered_sum *sum;
443
444         trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
445
446         if (refcount_dec_and_test(&entry->refs)) {
447                 ASSERT(list_empty(&entry->root_extent_list));
448                 ASSERT(list_empty(&entry->log_list));
449                 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
450                 if (entry->inode)
451                         btrfs_add_delayed_iput(entry->inode);
452                 while (!list_empty(&entry->list)) {
453                         cur = entry->list.next;
454                         sum = list_entry(cur, struct btrfs_ordered_sum, list);
455                         list_del(&sum->list);
456                         kvfree(sum);
457                 }
458                 kmem_cache_free(btrfs_ordered_extent_cache, entry);
459         }
460 }
461
462 /*
463  * remove an ordered extent from the tree.  No references are dropped
464  * and waiters are woken up.
465  */
466 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
467                                  struct btrfs_ordered_extent *entry)
468 {
469         struct btrfs_ordered_inode_tree *tree;
470         struct btrfs_root *root = btrfs_inode->root;
471         struct btrfs_fs_info *fs_info = root->fs_info;
472         struct rb_node *node;
473         bool pending;
474
475         /* This is paired with btrfs_add_ordered_extent. */
476         spin_lock(&btrfs_inode->lock);
477         btrfs_mod_outstanding_extents(btrfs_inode, -1);
478         spin_unlock(&btrfs_inode->lock);
479         if (root != fs_info->tree_root)
480                 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
481                                                 false);
482
483         if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
484                 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
485                                          fs_info->delalloc_batch);
486
487         tree = &btrfs_inode->ordered_tree;
488         spin_lock_irq(&tree->lock);
489         node = &entry->rb_node;
490         rb_erase(node, &tree->tree);
491         RB_CLEAR_NODE(node);
492         if (tree->last == node)
493                 tree->last = NULL;
494         set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
495         pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
496         spin_unlock_irq(&tree->lock);
497
498         /*
499          * The current running transaction is waiting on us, we need to let it
500          * know that we're complete and wake it up.
501          */
502         if (pending) {
503                 struct btrfs_transaction *trans;
504
505                 /*
506                  * The checks for trans are just a formality, it should be set,
507                  * but if it isn't we don't want to deref/assert under the spin
508                  * lock, so be nice and check if trans is set, but ASSERT() so
509                  * if it isn't set a developer will notice.
510                  */
511                 spin_lock(&fs_info->trans_lock);
512                 trans = fs_info->running_transaction;
513                 if (trans)
514                         refcount_inc(&trans->use_count);
515                 spin_unlock(&fs_info->trans_lock);
516
517                 ASSERT(trans);
518                 if (trans) {
519                         if (atomic_dec_and_test(&trans->pending_ordered))
520                                 wake_up(&trans->pending_wait);
521                         btrfs_put_transaction(trans);
522                 }
523         }
524
525         spin_lock(&root->ordered_extent_lock);
526         list_del_init(&entry->root_extent_list);
527         root->nr_ordered_extents--;
528
529         trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
530
531         if (!root->nr_ordered_extents) {
532                 spin_lock(&fs_info->ordered_root_lock);
533                 BUG_ON(list_empty(&root->ordered_root));
534                 list_del_init(&root->ordered_root);
535                 spin_unlock(&fs_info->ordered_root_lock);
536         }
537         spin_unlock(&root->ordered_extent_lock);
538         wake_up(&entry->wait);
539 }
540
541 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
542 {
543         struct btrfs_ordered_extent *ordered;
544
545         ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
546         btrfs_start_ordered_extent(ordered, 1);
547         complete(&ordered->completion);
548 }
549
550 /*
551  * wait for all the ordered extents in a root.  This is done when balancing
552  * space between drives.
553  */
554 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
555                                const u64 range_start, const u64 range_len)
556 {
557         struct btrfs_fs_info *fs_info = root->fs_info;
558         LIST_HEAD(splice);
559         LIST_HEAD(skipped);
560         LIST_HEAD(works);
561         struct btrfs_ordered_extent *ordered, *next;
562         u64 count = 0;
563         const u64 range_end = range_start + range_len;
564
565         mutex_lock(&root->ordered_extent_mutex);
566         spin_lock(&root->ordered_extent_lock);
567         list_splice_init(&root->ordered_extents, &splice);
568         while (!list_empty(&splice) && nr) {
569                 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
570                                            root_extent_list);
571
572                 if (range_end <= ordered->disk_bytenr ||
573                     ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
574                         list_move_tail(&ordered->root_extent_list, &skipped);
575                         cond_resched_lock(&root->ordered_extent_lock);
576                         continue;
577                 }
578
579                 list_move_tail(&ordered->root_extent_list,
580                                &root->ordered_extents);
581                 refcount_inc(&ordered->refs);
582                 spin_unlock(&root->ordered_extent_lock);
583
584                 btrfs_init_work(&ordered->flush_work,
585                                 btrfs_run_ordered_extent_work, NULL, NULL);
586                 list_add_tail(&ordered->work_list, &works);
587                 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
588
589                 cond_resched();
590                 spin_lock(&root->ordered_extent_lock);
591                 if (nr != U64_MAX)
592                         nr--;
593                 count++;
594         }
595         list_splice_tail(&skipped, &root->ordered_extents);
596         list_splice_tail(&splice, &root->ordered_extents);
597         spin_unlock(&root->ordered_extent_lock);
598
599         list_for_each_entry_safe(ordered, next, &works, work_list) {
600                 list_del_init(&ordered->work_list);
601                 wait_for_completion(&ordered->completion);
602                 btrfs_put_ordered_extent(ordered);
603                 cond_resched();
604         }
605         mutex_unlock(&root->ordered_extent_mutex);
606
607         return count;
608 }
609
610 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
611                              const u64 range_start, const u64 range_len)
612 {
613         struct btrfs_root *root;
614         struct list_head splice;
615         u64 done;
616
617         INIT_LIST_HEAD(&splice);
618
619         mutex_lock(&fs_info->ordered_operations_mutex);
620         spin_lock(&fs_info->ordered_root_lock);
621         list_splice_init(&fs_info->ordered_roots, &splice);
622         while (!list_empty(&splice) && nr) {
623                 root = list_first_entry(&splice, struct btrfs_root,
624                                         ordered_root);
625                 root = btrfs_grab_root(root);
626                 BUG_ON(!root);
627                 list_move_tail(&root->ordered_root,
628                                &fs_info->ordered_roots);
629                 spin_unlock(&fs_info->ordered_root_lock);
630
631                 done = btrfs_wait_ordered_extents(root, nr,
632                                                   range_start, range_len);
633                 btrfs_put_root(root);
634
635                 spin_lock(&fs_info->ordered_root_lock);
636                 if (nr != U64_MAX) {
637                         nr -= done;
638                 }
639         }
640         list_splice_tail(&splice, &fs_info->ordered_roots);
641         spin_unlock(&fs_info->ordered_root_lock);
642         mutex_unlock(&fs_info->ordered_operations_mutex);
643 }
644
645 /*
646  * Used to start IO or wait for a given ordered extent to finish.
647  *
648  * If wait is one, this effectively waits on page writeback for all the pages
649  * in the extent, and it waits on the io completion code to insert
650  * metadata into the btree corresponding to the extent
651  */
652 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
653 {
654         u64 start = entry->file_offset;
655         u64 end = start + entry->num_bytes - 1;
656         struct btrfs_inode *inode = BTRFS_I(entry->inode);
657
658         trace_btrfs_ordered_extent_start(inode, entry);
659
660         /*
661          * pages in the range can be dirty, clean or writeback.  We
662          * start IO on any dirty ones so the wait doesn't stall waiting
663          * for the flusher thread to find them
664          */
665         if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
666                 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
667         if (wait) {
668                 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
669                                                  &entry->flags));
670         }
671 }
672
673 /*
674  * Used to wait on ordered extents across a large range of bytes.
675  */
676 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
677 {
678         int ret = 0;
679         int ret_wb = 0;
680         u64 end;
681         u64 orig_end;
682         struct btrfs_ordered_extent *ordered;
683
684         if (start + len < start) {
685                 orig_end = INT_LIMIT(loff_t);
686         } else {
687                 orig_end = start + len - 1;
688                 if (orig_end > INT_LIMIT(loff_t))
689                         orig_end = INT_LIMIT(loff_t);
690         }
691
692         /* start IO across the range first to instantiate any delalloc
693          * extents
694          */
695         ret = btrfs_fdatawrite_range(inode, start, orig_end);
696         if (ret)
697                 return ret;
698
699         /*
700          * If we have a writeback error don't return immediately. Wait first
701          * for any ordered extents that haven't completed yet. This is to make
702          * sure no one can dirty the same page ranges and call writepages()
703          * before the ordered extents complete - to avoid failures (-EEXIST)
704          * when adding the new ordered extents to the ordered tree.
705          */
706         ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
707
708         end = orig_end;
709         while (1) {
710                 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
711                 if (!ordered)
712                         break;
713                 if (ordered->file_offset > orig_end) {
714                         btrfs_put_ordered_extent(ordered);
715                         break;
716                 }
717                 if (ordered->file_offset + ordered->num_bytes <= start) {
718                         btrfs_put_ordered_extent(ordered);
719                         break;
720                 }
721                 btrfs_start_ordered_extent(ordered, 1);
722                 end = ordered->file_offset;
723                 /*
724                  * If the ordered extent had an error save the error but don't
725                  * exit without waiting first for all other ordered extents in
726                  * the range to complete.
727                  */
728                 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
729                         ret = -EIO;
730                 btrfs_put_ordered_extent(ordered);
731                 if (end == 0 || end == start)
732                         break;
733                 end--;
734         }
735         return ret_wb ? ret_wb : ret;
736 }
737
738 /*
739  * find an ordered extent corresponding to file_offset.  return NULL if
740  * nothing is found, otherwise take a reference on the extent and return it
741  */
742 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
743                                                          u64 file_offset)
744 {
745         struct btrfs_ordered_inode_tree *tree;
746         struct rb_node *node;
747         struct btrfs_ordered_extent *entry = NULL;
748
749         tree = &inode->ordered_tree;
750         spin_lock_irq(&tree->lock);
751         node = tree_search(tree, file_offset);
752         if (!node)
753                 goto out;
754
755         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
756         if (!offset_in_entry(entry, file_offset))
757                 entry = NULL;
758         if (entry)
759                 refcount_inc(&entry->refs);
760 out:
761         spin_unlock_irq(&tree->lock);
762         return entry;
763 }
764
765 /* Since the DIO code tries to lock a wide area we need to look for any ordered
766  * extents that exist in the range, rather than just the start of the range.
767  */
768 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
769                 struct btrfs_inode *inode, u64 file_offset, u64 len)
770 {
771         struct btrfs_ordered_inode_tree *tree;
772         struct rb_node *node;
773         struct btrfs_ordered_extent *entry = NULL;
774
775         tree = &inode->ordered_tree;
776         spin_lock_irq(&tree->lock);
777         node = tree_search(tree, file_offset);
778         if (!node) {
779                 node = tree_search(tree, file_offset + len);
780                 if (!node)
781                         goto out;
782         }
783
784         while (1) {
785                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
786                 if (range_overlaps(entry, file_offset, len))
787                         break;
788
789                 if (entry->file_offset >= file_offset + len) {
790                         entry = NULL;
791                         break;
792                 }
793                 entry = NULL;
794                 node = rb_next(node);
795                 if (!node)
796                         break;
797         }
798 out:
799         if (entry)
800                 refcount_inc(&entry->refs);
801         spin_unlock_irq(&tree->lock);
802         return entry;
803 }
804
805 /*
806  * Adds all ordered extents to the given list. The list ends up sorted by the
807  * file_offset of the ordered extents.
808  */
809 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
810                                            struct list_head *list)
811 {
812         struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
813         struct rb_node *n;
814
815         ASSERT(inode_is_locked(&inode->vfs_inode));
816
817         spin_lock_irq(&tree->lock);
818         for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
819                 struct btrfs_ordered_extent *ordered;
820
821                 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
822
823                 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
824                         continue;
825
826                 ASSERT(list_empty(&ordered->log_list));
827                 list_add_tail(&ordered->log_list, list);
828                 refcount_inc(&ordered->refs);
829         }
830         spin_unlock_irq(&tree->lock);
831 }
832
833 /*
834  * lookup and return any extent before 'file_offset'.  NULL is returned
835  * if none is found
836  */
837 struct btrfs_ordered_extent *
838 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
839 {
840         struct btrfs_ordered_inode_tree *tree;
841         struct rb_node *node;
842         struct btrfs_ordered_extent *entry = NULL;
843
844         tree = &inode->ordered_tree;
845         spin_lock_irq(&tree->lock);
846         node = tree_search(tree, file_offset);
847         if (!node)
848                 goto out;
849
850         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
851         refcount_inc(&entry->refs);
852 out:
853         spin_unlock_irq(&tree->lock);
854         return entry;
855 }
856
857 /*
858  * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
859  * ordered extents in it are run to completion.
860  *
861  * @inode:        Inode whose ordered tree is to be searched
862  * @start:        Beginning of range to flush
863  * @end:          Last byte of range to lock
864  * @cached_state: If passed, will return the extent state responsible for the
865  * locked range. It's the caller's responsibility to free the cached state.
866  *
867  * This function always returns with the given range locked, ensuring after it's
868  * called no order extent can be pending.
869  */
870 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
871                                         u64 end,
872                                         struct extent_state **cached_state)
873 {
874         struct btrfs_ordered_extent *ordered;
875         struct extent_state *cache = NULL;
876         struct extent_state **cachedp = &cache;
877
878         if (cached_state)
879                 cachedp = cached_state;
880
881         while (1) {
882                 lock_extent_bits(&inode->io_tree, start, end, cachedp);
883                 ordered = btrfs_lookup_ordered_range(inode, start,
884                                                      end - start + 1);
885                 if (!ordered) {
886                         /*
887                          * If no external cached_state has been passed then
888                          * decrement the extra ref taken for cachedp since we
889                          * aren't exposing it outside of this function
890                          */
891                         if (!cached_state)
892                                 refcount_dec(&cache->refs);
893                         break;
894                 }
895                 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
896                 btrfs_start_ordered_extent(ordered, 1);
897                 btrfs_put_ordered_extent(ordered);
898         }
899 }
900
901 int __init ordered_data_init(void)
902 {
903         btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
904                                      sizeof(struct btrfs_ordered_extent), 0,
905                                      SLAB_MEM_SPREAD,
906                                      NULL);
907         if (!btrfs_ordered_extent_cache)
908                 return -ENOMEM;
909
910         return 0;
911 }
912
913 void __cold ordered_data_exit(void)
914 {
915         kmem_cache_destroy(btrfs_ordered_extent_cache);
916 }