1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
20 static struct kmem_cache *btrfs_ordered_extent_cache;
22 static u64 entry_end(struct btrfs_ordered_extent *entry)
24 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return entry->file_offset + entry->num_bytes;
29 /* returns NULL if the insertion worked, or it returns the node it did find
32 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43 if (file_offset < entry->file_offset)
45 else if (file_offset >= entry_end(entry))
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
57 * look for a given offset in the tree, and if it can't be found return the
60 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
74 if (file_offset < entry->file_offset)
76 else if (file_offset >= entry_end(entry))
84 while (prev && file_offset >= entry_end(prev_entry)) {
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 if (file_offset < entry_end(prev_entry))
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 while (prev && file_offset < entry_end(prev_entry)) {
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
111 * helper to check if a given offset is inside a given entry
113 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
115 if (file_offset < entry->file_offset ||
116 entry->file_offset + entry->num_bytes <= file_offset)
121 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
124 if (file_offset + len <= entry->file_offset ||
125 entry->file_offset + entry->num_bytes <= file_offset)
131 * look find the first ordered struct that has this offset, otherwise
132 * the first one less than this offset
134 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
137 struct rb_root *root = &tree->tree;
138 struct rb_node *prev = NULL;
140 struct btrfs_ordered_extent *entry;
143 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
145 if (offset_in_entry(entry, file_offset))
148 ret = __tree_search(root, file_offset, &prev);
157 * Allocate and add a new ordered_extent into the per-inode tree.
159 * The tree is given a single reference on the ordered extent that was
162 static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163 u64 disk_bytenr, u64 num_bytes,
164 u64 disk_num_bytes, int type, int dio,
167 struct btrfs_root *root = inode->root;
168 struct btrfs_fs_info *fs_info = root->fs_info;
169 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
170 struct rb_node *node;
171 struct btrfs_ordered_extent *entry;
174 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175 /* For nocow write, we can release the qgroup rsv right now */
176 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
182 * The ordered extent has reserved qgroup space, release now
183 * and pass the reserved number for qgroup_record to free.
185 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
189 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
193 entry->file_offset = file_offset;
194 entry->disk_bytenr = disk_bytenr;
195 entry->num_bytes = num_bytes;
196 entry->disk_num_bytes = disk_num_bytes;
197 entry->bytes_left = num_bytes;
198 entry->inode = igrab(&inode->vfs_inode);
199 entry->compress_type = compress_type;
200 entry->truncated_len = (u64)-1;
201 entry->qgroup_rsv = ret;
202 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203 set_bit(type, &entry->flags);
206 percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
207 fs_info->delalloc_batch);
208 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
211 /* one ref for the tree */
212 refcount_set(&entry->refs, 1);
213 init_waitqueue_head(&entry->wait);
214 INIT_LIST_HEAD(&entry->list);
215 INIT_LIST_HEAD(&entry->root_extent_list);
216 INIT_LIST_HEAD(&entry->work_list);
217 init_completion(&entry->completion);
219 trace_btrfs_ordered_extent_add(&inode->vfs_inode, entry);
221 spin_lock_irq(&tree->lock);
222 node = tree_insert(&tree->tree, file_offset,
225 btrfs_panic(fs_info, -EEXIST,
226 "inconsistency in ordered tree at offset %llu",
228 spin_unlock_irq(&tree->lock);
230 spin_lock(&root->ordered_extent_lock);
231 list_add_tail(&entry->root_extent_list,
232 &root->ordered_extents);
233 root->nr_ordered_extents++;
234 if (root->nr_ordered_extents == 1) {
235 spin_lock(&fs_info->ordered_root_lock);
236 BUG_ON(!list_empty(&root->ordered_root));
237 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
238 spin_unlock(&fs_info->ordered_root_lock);
240 spin_unlock(&root->ordered_extent_lock);
243 * We don't need the count_max_extents here, we can assume that all of
244 * that work has been done at higher layers, so this is truly the
245 * smallest the extent is going to get.
247 spin_lock(&inode->lock);
248 btrfs_mod_outstanding_extents(inode, 1);
249 spin_unlock(&inode->lock);
254 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
255 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
258 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
259 num_bytes, disk_num_bytes, type, 0,
260 BTRFS_COMPRESS_NONE);
263 int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
264 u64 disk_bytenr, u64 num_bytes,
265 u64 disk_num_bytes, int type)
267 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
268 num_bytes, disk_num_bytes, type, 1,
269 BTRFS_COMPRESS_NONE);
272 int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
273 u64 disk_bytenr, u64 num_bytes,
274 u64 disk_num_bytes, int type,
277 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
278 num_bytes, disk_num_bytes, type, 0,
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
287 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
288 struct btrfs_ordered_sum *sum)
290 struct btrfs_ordered_inode_tree *tree;
292 tree = &BTRFS_I(entry->inode)->ordered_tree;
293 spin_lock_irq(&tree->lock);
294 list_add_tail(&sum->list, &entry->list);
295 spin_unlock_irq(&tree->lock);
299 * this is used to account for finished IO across a given range
300 * of the file. The IO may span ordered extents. If
301 * a given ordered_extent is completely done, 1 is returned, otherwise
304 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
305 * to make sure this function only returns 1 once for a given ordered extent.
307 * file_offset is updated to one byte past the range that is recorded as
308 * complete. This allows you to walk forward in the file.
310 int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
311 struct btrfs_ordered_extent **cached,
312 u64 *file_offset, u64 io_size, int uptodate)
314 struct btrfs_fs_info *fs_info = inode->root->fs_info;
315 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
316 struct rb_node *node;
317 struct btrfs_ordered_extent *entry = NULL;
324 spin_lock_irqsave(&tree->lock, flags);
325 node = tree_search(tree, *file_offset);
331 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
332 if (!offset_in_entry(entry, *file_offset)) {
337 dec_start = max(*file_offset, entry->file_offset);
338 dec_end = min(*file_offset + io_size,
339 entry->file_offset + entry->num_bytes);
340 *file_offset = dec_end;
341 if (dec_start > dec_end) {
342 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
345 to_dec = dec_end - dec_start;
346 if (to_dec > entry->bytes_left) {
348 "bad ordered accounting left %llu size %llu",
349 entry->bytes_left, to_dec);
351 entry->bytes_left -= to_dec;
353 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
355 if (entry->bytes_left == 0) {
356 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
357 /* test_and_set_bit implies a barrier */
358 cond_wake_up_nomb(&entry->wait);
363 if (!ret && cached && entry) {
365 refcount_inc(&entry->refs);
367 spin_unlock_irqrestore(&tree->lock, flags);
372 * this is used to account for finished IO across a given range
373 * of the file. The IO should not span ordered extents. If
374 * a given ordered_extent is completely done, 1 is returned, otherwise
377 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
378 * to make sure this function only returns 1 once for a given ordered extent.
380 int btrfs_dec_test_ordered_pending(struct inode *inode,
381 struct btrfs_ordered_extent **cached,
382 u64 file_offset, u64 io_size, int uptodate)
384 struct btrfs_ordered_inode_tree *tree;
385 struct rb_node *node;
386 struct btrfs_ordered_extent *entry = NULL;
390 tree = &BTRFS_I(inode)->ordered_tree;
391 spin_lock_irqsave(&tree->lock, flags);
392 if (cached && *cached) {
397 node = tree_search(tree, file_offset);
403 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
405 if (!offset_in_entry(entry, file_offset)) {
410 if (io_size > entry->bytes_left) {
411 btrfs_crit(BTRFS_I(inode)->root->fs_info,
412 "bad ordered accounting left %llu size %llu",
413 entry->bytes_left, io_size);
415 entry->bytes_left -= io_size;
417 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
419 if (entry->bytes_left == 0) {
420 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421 /* test_and_set_bit implies a barrier */
422 cond_wake_up_nomb(&entry->wait);
427 if (!ret && cached && entry) {
429 refcount_inc(&entry->refs);
431 spin_unlock_irqrestore(&tree->lock, flags);
436 * used to drop a reference on an ordered extent. This will free
437 * the extent if the last reference is dropped
439 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
441 struct list_head *cur;
442 struct btrfs_ordered_sum *sum;
444 trace_btrfs_ordered_extent_put(entry->inode, entry);
446 if (refcount_dec_and_test(&entry->refs)) {
447 ASSERT(list_empty(&entry->root_extent_list));
448 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
450 btrfs_add_delayed_iput(entry->inode);
451 while (!list_empty(&entry->list)) {
452 cur = entry->list.next;
453 sum = list_entry(cur, struct btrfs_ordered_sum, list);
454 list_del(&sum->list);
457 kmem_cache_free(btrfs_ordered_extent_cache, entry);
462 * remove an ordered extent from the tree. No references are dropped
463 * and waiters are woken up.
465 void btrfs_remove_ordered_extent(struct inode *inode,
466 struct btrfs_ordered_extent *entry)
468 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
469 struct btrfs_ordered_inode_tree *tree;
470 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
471 struct btrfs_root *root = btrfs_inode->root;
472 struct rb_node *node;
474 /* This is paired with btrfs_add_ordered_extent. */
475 spin_lock(&btrfs_inode->lock);
476 btrfs_mod_outstanding_extents(btrfs_inode, -1);
477 spin_unlock(&btrfs_inode->lock);
478 if (root != fs_info->tree_root)
479 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
482 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
483 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
484 fs_info->delalloc_batch);
486 tree = &btrfs_inode->ordered_tree;
487 spin_lock_irq(&tree->lock);
488 node = &entry->rb_node;
489 rb_erase(node, &tree->tree);
491 if (tree->last == node)
493 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
494 spin_unlock_irq(&tree->lock);
496 spin_lock(&root->ordered_extent_lock);
497 list_del_init(&entry->root_extent_list);
498 root->nr_ordered_extents--;
500 trace_btrfs_ordered_extent_remove(inode, entry);
502 if (!root->nr_ordered_extents) {
503 spin_lock(&fs_info->ordered_root_lock);
504 BUG_ON(list_empty(&root->ordered_root));
505 list_del_init(&root->ordered_root);
506 spin_unlock(&fs_info->ordered_root_lock);
508 spin_unlock(&root->ordered_extent_lock);
509 wake_up(&entry->wait);
512 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
514 struct btrfs_ordered_extent *ordered;
516 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
517 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
518 complete(&ordered->completion);
522 * wait for all the ordered extents in a root. This is done when balancing
523 * space between drives.
525 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
526 const u64 range_start, const u64 range_len)
528 struct btrfs_fs_info *fs_info = root->fs_info;
532 struct btrfs_ordered_extent *ordered, *next;
534 const u64 range_end = range_start + range_len;
536 mutex_lock(&root->ordered_extent_mutex);
537 spin_lock(&root->ordered_extent_lock);
538 list_splice_init(&root->ordered_extents, &splice);
539 while (!list_empty(&splice) && nr) {
540 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
543 if (range_end <= ordered->disk_bytenr ||
544 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
545 list_move_tail(&ordered->root_extent_list, &skipped);
546 cond_resched_lock(&root->ordered_extent_lock);
550 list_move_tail(&ordered->root_extent_list,
551 &root->ordered_extents);
552 refcount_inc(&ordered->refs);
553 spin_unlock(&root->ordered_extent_lock);
555 btrfs_init_work(&ordered->flush_work,
556 btrfs_run_ordered_extent_work, NULL, NULL);
557 list_add_tail(&ordered->work_list, &works);
558 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
561 spin_lock(&root->ordered_extent_lock);
566 list_splice_tail(&skipped, &root->ordered_extents);
567 list_splice_tail(&splice, &root->ordered_extents);
568 spin_unlock(&root->ordered_extent_lock);
570 list_for_each_entry_safe(ordered, next, &works, work_list) {
571 list_del_init(&ordered->work_list);
572 wait_for_completion(&ordered->completion);
573 btrfs_put_ordered_extent(ordered);
576 mutex_unlock(&root->ordered_extent_mutex);
581 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
582 const u64 range_start, const u64 range_len)
584 struct btrfs_root *root;
585 struct list_head splice;
588 INIT_LIST_HEAD(&splice);
590 mutex_lock(&fs_info->ordered_operations_mutex);
591 spin_lock(&fs_info->ordered_root_lock);
592 list_splice_init(&fs_info->ordered_roots, &splice);
593 while (!list_empty(&splice) && nr) {
594 root = list_first_entry(&splice, struct btrfs_root,
596 root = btrfs_grab_root(root);
598 list_move_tail(&root->ordered_root,
599 &fs_info->ordered_roots);
600 spin_unlock(&fs_info->ordered_root_lock);
602 done = btrfs_wait_ordered_extents(root, nr,
603 range_start, range_len);
604 btrfs_put_root(root);
606 spin_lock(&fs_info->ordered_root_lock);
611 list_splice_tail(&splice, &fs_info->ordered_roots);
612 spin_unlock(&fs_info->ordered_root_lock);
613 mutex_unlock(&fs_info->ordered_operations_mutex);
617 * Used to start IO or wait for a given ordered extent to finish.
619 * If wait is one, this effectively waits on page writeback for all the pages
620 * in the extent, and it waits on the io completion code to insert
621 * metadata into the btree corresponding to the extent
623 void btrfs_start_ordered_extent(struct inode *inode,
624 struct btrfs_ordered_extent *entry,
627 u64 start = entry->file_offset;
628 u64 end = start + entry->num_bytes - 1;
630 trace_btrfs_ordered_extent_start(inode, entry);
633 * pages in the range can be dirty, clean or writeback. We
634 * start IO on any dirty ones so the wait doesn't stall waiting
635 * for the flusher thread to find them
637 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
638 filemap_fdatawrite_range(inode->i_mapping, start, end);
640 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
646 * Used to wait on ordered extents across a large range of bytes.
648 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
654 struct btrfs_ordered_extent *ordered;
656 if (start + len < start) {
657 orig_end = INT_LIMIT(loff_t);
659 orig_end = start + len - 1;
660 if (orig_end > INT_LIMIT(loff_t))
661 orig_end = INT_LIMIT(loff_t);
664 /* start IO across the range first to instantiate any delalloc
667 ret = btrfs_fdatawrite_range(inode, start, orig_end);
672 * If we have a writeback error don't return immediately. Wait first
673 * for any ordered extents that haven't completed yet. This is to make
674 * sure no one can dirty the same page ranges and call writepages()
675 * before the ordered extents complete - to avoid failures (-EEXIST)
676 * when adding the new ordered extents to the ordered tree.
678 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
682 ordered = btrfs_lookup_first_ordered_extent(inode, end);
685 if (ordered->file_offset > orig_end) {
686 btrfs_put_ordered_extent(ordered);
689 if (ordered->file_offset + ordered->num_bytes <= start) {
690 btrfs_put_ordered_extent(ordered);
693 btrfs_start_ordered_extent(inode, ordered, 1);
694 end = ordered->file_offset;
696 * If the ordered extent had an error save the error but don't
697 * exit without waiting first for all other ordered extents in
698 * the range to complete.
700 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
702 btrfs_put_ordered_extent(ordered);
703 if (end == 0 || end == start)
707 return ret_wb ? ret_wb : ret;
711 * find an ordered extent corresponding to file_offset. return NULL if
712 * nothing is found, otherwise take a reference on the extent and return it
714 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
717 struct btrfs_ordered_inode_tree *tree;
718 struct rb_node *node;
719 struct btrfs_ordered_extent *entry = NULL;
721 tree = &inode->ordered_tree;
722 spin_lock_irq(&tree->lock);
723 node = tree_search(tree, file_offset);
727 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
728 if (!offset_in_entry(entry, file_offset))
731 refcount_inc(&entry->refs);
733 spin_unlock_irq(&tree->lock);
737 /* Since the DIO code tries to lock a wide area we need to look for any ordered
738 * extents that exist in the range, rather than just the start of the range.
740 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
741 struct btrfs_inode *inode, u64 file_offset, u64 len)
743 struct btrfs_ordered_inode_tree *tree;
744 struct rb_node *node;
745 struct btrfs_ordered_extent *entry = NULL;
747 tree = &inode->ordered_tree;
748 spin_lock_irq(&tree->lock);
749 node = tree_search(tree, file_offset);
751 node = tree_search(tree, file_offset + len);
757 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
758 if (range_overlaps(entry, file_offset, len))
761 if (entry->file_offset >= file_offset + len) {
766 node = rb_next(node);
772 refcount_inc(&entry->refs);
773 spin_unlock_irq(&tree->lock);
778 * lookup and return any extent before 'file_offset'. NULL is returned
781 struct btrfs_ordered_extent *
782 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
784 struct btrfs_ordered_inode_tree *tree;
785 struct rb_node *node;
786 struct btrfs_ordered_extent *entry = NULL;
788 tree = &BTRFS_I(inode)->ordered_tree;
789 spin_lock_irq(&tree->lock);
790 node = tree_search(tree, file_offset);
794 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
795 refcount_inc(&entry->refs);
797 spin_unlock_irq(&tree->lock);
802 * search the ordered extents for one corresponding to 'offset' and
803 * try to find a checksum. This is used because we allow pages to
804 * be reclaimed before their checksum is actually put into the btree
806 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
809 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
810 struct btrfs_ordered_sum *ordered_sum;
811 struct btrfs_ordered_extent *ordered;
812 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
813 unsigned long num_sectors;
815 u32 sectorsize = btrfs_inode_sectorsize(inode);
816 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
819 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), offset);
823 spin_lock_irq(&tree->lock);
824 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
825 if (disk_bytenr >= ordered_sum->bytenr &&
826 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
827 i = (disk_bytenr - ordered_sum->bytenr) >>
828 inode->i_sb->s_blocksize_bits;
829 num_sectors = ordered_sum->len >>
830 inode->i_sb->s_blocksize_bits;
831 num_sectors = min_t(int, len - index, num_sectors - i);
832 memcpy(sum + index, ordered_sum->sums + i * csum_size,
833 num_sectors * csum_size);
835 index += (int)num_sectors * csum_size;
838 disk_bytenr += num_sectors * sectorsize;
842 spin_unlock_irq(&tree->lock);
843 btrfs_put_ordered_extent(ordered);
848 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
849 * ordered extents in it are run to completion.
851 * @inode: Inode whose ordered tree is to be searched
852 * @start: Beginning of range to flush
853 * @end: Last byte of range to lock
854 * @cached_state: If passed, will return the extent state responsible for the
855 * locked range. It's the caller's responsibility to free the cached state.
857 * This function always returns with the given range locked, ensuring after it's
858 * called no order extent can be pending.
860 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
862 struct extent_state **cached_state)
864 struct btrfs_ordered_extent *ordered;
865 struct extent_state *cache = NULL;
866 struct extent_state **cachedp = &cache;
869 cachedp = cached_state;
872 lock_extent_bits(&inode->io_tree, start, end, cachedp);
873 ordered = btrfs_lookup_ordered_range(inode, start,
877 * If no external cached_state has been passed then
878 * decrement the extra ref taken for cachedp since we
879 * aren't exposing it outside of this function
882 refcount_dec(&cache->refs);
885 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
886 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
887 btrfs_put_ordered_extent(ordered);
891 int __init ordered_data_init(void)
893 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
894 sizeof(struct btrfs_ordered_extent), 0,
897 if (!btrfs_ordered_extent_cache)
903 void __cold ordered_data_exit(void)
905 kmem_cache_destroy(btrfs_ordered_extent_cache);