1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
20 static struct kmem_cache *btrfs_ordered_extent_cache;
22 static u64 entry_end(struct btrfs_ordered_extent *entry)
24 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return entry->file_offset + entry->num_bytes;
29 /* returns NULL if the insertion worked, or it returns the node it did find
32 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43 if (file_offset < entry->file_offset)
45 else if (file_offset >= entry_end(entry))
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
57 * look for a given offset in the tree, and if it can't be found return the
60 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
74 if (file_offset < entry->file_offset)
76 else if (file_offset >= entry_end(entry))
84 while (prev && file_offset >= entry_end(prev_entry)) {
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 if (file_offset < entry_end(prev_entry))
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 while (prev && file_offset < entry_end(prev_entry)) {
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
113 if (file_offset + len <= entry->file_offset ||
114 entry->file_offset + entry->num_bytes <= file_offset)
120 * look find the first ordered struct that has this offset, otherwise
121 * the first one less than this offset
123 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
126 struct rb_root *root = &tree->tree;
127 struct rb_node *prev = NULL;
129 struct btrfs_ordered_extent *entry;
132 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
137 ret = __tree_search(root, file_offset, &prev);
146 * Allocate and add a new ordered_extent into the per-inode tree.
148 * The tree is given a single reference on the ordered extent that was
151 static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
152 u64 disk_bytenr, u64 num_bytes,
153 u64 disk_num_bytes, int type, int dio,
156 struct btrfs_root *root = inode->root;
157 struct btrfs_fs_info *fs_info = root->fs_info;
158 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
159 struct rb_node *node;
160 struct btrfs_ordered_extent *entry;
163 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
164 /* For nocow write, we can release the qgroup rsv right now */
165 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
171 * The ordered extent has reserved qgroup space, release now
172 * and pass the reserved number for qgroup_record to free.
174 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
178 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
182 entry->file_offset = file_offset;
183 entry->disk_bytenr = disk_bytenr;
184 entry->num_bytes = num_bytes;
185 entry->disk_num_bytes = disk_num_bytes;
186 entry->bytes_left = num_bytes;
187 entry->inode = igrab(&inode->vfs_inode);
188 entry->compress_type = compress_type;
189 entry->truncated_len = (u64)-1;
190 entry->qgroup_rsv = ret;
191 entry->physical = (u64)-1;
193 entry->partno = (u8)-1;
195 ASSERT(type == BTRFS_ORDERED_REGULAR ||
196 type == BTRFS_ORDERED_NOCOW ||
197 type == BTRFS_ORDERED_PREALLOC ||
198 type == BTRFS_ORDERED_COMPRESSED);
199 set_bit(type, &entry->flags);
201 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
202 fs_info->delalloc_batch);
205 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
207 /* one ref for the tree */
208 refcount_set(&entry->refs, 1);
209 init_waitqueue_head(&entry->wait);
210 INIT_LIST_HEAD(&entry->list);
211 INIT_LIST_HEAD(&entry->log_list);
212 INIT_LIST_HEAD(&entry->root_extent_list);
213 INIT_LIST_HEAD(&entry->work_list);
214 init_completion(&entry->completion);
216 trace_btrfs_ordered_extent_add(inode, entry);
218 spin_lock_irq(&tree->lock);
219 node = tree_insert(&tree->tree, file_offset,
222 btrfs_panic(fs_info, -EEXIST,
223 "inconsistency in ordered tree at offset %llu",
225 spin_unlock_irq(&tree->lock);
227 spin_lock(&root->ordered_extent_lock);
228 list_add_tail(&entry->root_extent_list,
229 &root->ordered_extents);
230 root->nr_ordered_extents++;
231 if (root->nr_ordered_extents == 1) {
232 spin_lock(&fs_info->ordered_root_lock);
233 BUG_ON(!list_empty(&root->ordered_root));
234 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
235 spin_unlock(&fs_info->ordered_root_lock);
237 spin_unlock(&root->ordered_extent_lock);
240 * We don't need the count_max_extents here, we can assume that all of
241 * that work has been done at higher layers, so this is truly the
242 * smallest the extent is going to get.
244 spin_lock(&inode->lock);
245 btrfs_mod_outstanding_extents(inode, 1);
246 spin_unlock(&inode->lock);
251 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
252 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
255 ASSERT(type == BTRFS_ORDERED_REGULAR ||
256 type == BTRFS_ORDERED_NOCOW ||
257 type == BTRFS_ORDERED_PREALLOC);
258 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
259 num_bytes, disk_num_bytes, type, 0,
260 BTRFS_COMPRESS_NONE);
263 int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
264 u64 disk_bytenr, u64 num_bytes,
265 u64 disk_num_bytes, int type)
267 ASSERT(type == BTRFS_ORDERED_REGULAR ||
268 type == BTRFS_ORDERED_NOCOW ||
269 type == BTRFS_ORDERED_PREALLOC);
270 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
271 num_bytes, disk_num_bytes, type, 1,
272 BTRFS_COMPRESS_NONE);
275 int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
276 u64 disk_bytenr, u64 num_bytes,
277 u64 disk_num_bytes, int compress_type)
279 ASSERT(compress_type != BTRFS_COMPRESS_NONE);
280 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
281 num_bytes, disk_num_bytes,
282 BTRFS_ORDERED_COMPRESSED, 0,
287 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
288 * when an ordered extent is finished. If the list covers more than one
289 * ordered extent, it is split across multiples.
291 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
292 struct btrfs_ordered_sum *sum)
294 struct btrfs_ordered_inode_tree *tree;
296 tree = &BTRFS_I(entry->inode)->ordered_tree;
297 spin_lock_irq(&tree->lock);
298 list_add_tail(&sum->list, &entry->list);
299 spin_unlock_irq(&tree->lock);
303 * Mark all ordered extents io inside the specified range finished.
305 * @page: The invovled page for the opeartion.
306 * For uncompressed buffered IO, the page status also needs to be
307 * updated to indicate whether the pending ordered io is finished.
308 * Can be NULL for direct IO and compressed write.
309 * For these cases, callers are ensured they won't execute the
310 * endio function twice.
311 * @finish_func: The function to be executed when all the IO of an ordered
312 * extent are finished.
314 * This function is called for endio, thus the range must have ordered
315 * extent(s) coveri it.
317 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
318 struct page *page, u64 file_offset,
319 u64 num_bytes, btrfs_func_t finish_func,
322 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
323 struct btrfs_fs_info *fs_info = inode->root->fs_info;
324 struct btrfs_workqueue *wq;
325 struct rb_node *node;
326 struct btrfs_ordered_extent *entry = NULL;
328 u64 cur = file_offset;
330 if (btrfs_is_free_space_inode(inode))
331 wq = fs_info->endio_freespace_worker;
333 wq = fs_info->endio_write_workers;
336 ASSERT(page->mapping && page_offset(page) <= file_offset &&
337 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
339 spin_lock_irqsave(&tree->lock, flags);
340 while (cur < file_offset + num_bytes) {
345 node = tree_search(tree, cur);
346 /* No ordered extents at all */
350 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
351 entry_end = entry->file_offset + entry->num_bytes;
357 if (cur >= entry_end) {
358 node = rb_next(node);
359 /* No more ordered extents, exit */
362 entry = rb_entry(node, struct btrfs_ordered_extent,
365 /* Go to next ordered extent and continue */
366 cur = entry->file_offset;
372 * Go to the start of OE.
374 if (cur < entry->file_offset) {
375 cur = entry->file_offset;
380 * Now we are definitely inside one ordered extent.
386 end = min(entry->file_offset + entry->num_bytes,
387 file_offset + num_bytes) - 1;
388 ASSERT(end + 1 - cur < U32_MAX);
393 * Ordered (Private2) bit indicates whether we still
394 * have pending io unfinished for the ordered extent.
396 * If there's no such bit, we need to skip to next range.
398 if (!PageOrdered(page)) {
402 ClearPageOrdered(page);
405 /* Now we're fine to update the accounting */
406 if (unlikely(len > entry->bytes_left)) {
409 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
410 inode->root->root_key.objectid,
414 len, entry->bytes_left);
415 entry->bytes_left = 0;
417 entry->bytes_left -= len;
421 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
424 * All the IO of the ordered extent is finished, we need to queue
425 * the finish_func to be executed.
427 if (entry->bytes_left == 0) {
428 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
429 cond_wake_up(&entry->wait);
430 refcount_inc(&entry->refs);
431 spin_unlock_irqrestore(&tree->lock, flags);
432 btrfs_init_work(&entry->work, finish_func, NULL, NULL);
433 btrfs_queue_work(wq, &entry->work);
434 spin_lock_irqsave(&tree->lock, flags);
438 spin_unlock_irqrestore(&tree->lock, flags);
442 * Finish IO for one ordered extent across a given range. The range can only
443 * contain one ordered extent.
445 * @cached: The cached ordered extent. If not NULL, we can skip the tree
446 * search and use the ordered extent directly.
447 * Will be also used to store the finished ordered extent.
448 * @file_offset: File offset for the finished IO
449 * @io_size: Length of the finish IO range
450 * @uptodate: If the IO finishes without problem
452 * Return true if the ordered extent is finished in the range, and update
454 * Return false otherwise.
456 * NOTE: The range can NOT cross multiple ordered extents.
457 * Thus caller should ensure the range doesn't cross ordered extents.
459 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
460 struct btrfs_ordered_extent **cached,
461 u64 file_offset, u64 io_size, int uptodate)
463 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
464 struct rb_node *node;
465 struct btrfs_ordered_extent *entry = NULL;
467 bool finished = false;
469 spin_lock_irqsave(&tree->lock, flags);
470 if (cached && *cached) {
475 node = tree_search(tree, file_offset);
479 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
481 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
484 if (io_size > entry->bytes_left)
485 btrfs_crit(inode->root->fs_info,
486 "bad ordered accounting left %llu size %llu",
487 entry->bytes_left, io_size);
489 entry->bytes_left -= io_size;
491 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
493 if (entry->bytes_left == 0) {
495 * Ensure only one caller can set the flag and finished_ret
498 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
499 /* test_and_set_bit implies a barrier */
500 cond_wake_up_nomb(&entry->wait);
503 if (finished && cached && entry) {
505 refcount_inc(&entry->refs);
507 spin_unlock_irqrestore(&tree->lock, flags);
512 * used to drop a reference on an ordered extent. This will free
513 * the extent if the last reference is dropped
515 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
517 struct list_head *cur;
518 struct btrfs_ordered_sum *sum;
520 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
522 if (refcount_dec_and_test(&entry->refs)) {
523 ASSERT(list_empty(&entry->root_extent_list));
524 ASSERT(list_empty(&entry->log_list));
525 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
527 btrfs_add_delayed_iput(entry->inode);
528 while (!list_empty(&entry->list)) {
529 cur = entry->list.next;
530 sum = list_entry(cur, struct btrfs_ordered_sum, list);
531 list_del(&sum->list);
534 kmem_cache_free(btrfs_ordered_extent_cache, entry);
539 * remove an ordered extent from the tree. No references are dropped
540 * and waiters are woken up.
542 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
543 struct btrfs_ordered_extent *entry)
545 struct btrfs_ordered_inode_tree *tree;
546 struct btrfs_root *root = btrfs_inode->root;
547 struct btrfs_fs_info *fs_info = root->fs_info;
548 struct rb_node *node;
551 /* This is paired with btrfs_add_ordered_extent. */
552 spin_lock(&btrfs_inode->lock);
553 btrfs_mod_outstanding_extents(btrfs_inode, -1);
554 spin_unlock(&btrfs_inode->lock);
555 if (root != fs_info->tree_root)
556 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
559 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
560 fs_info->delalloc_batch);
562 tree = &btrfs_inode->ordered_tree;
563 spin_lock_irq(&tree->lock);
564 node = &entry->rb_node;
565 rb_erase(node, &tree->tree);
567 if (tree->last == node)
569 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
570 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
571 spin_unlock_irq(&tree->lock);
574 * The current running transaction is waiting on us, we need to let it
575 * know that we're complete and wake it up.
578 struct btrfs_transaction *trans;
581 * The checks for trans are just a formality, it should be set,
582 * but if it isn't we don't want to deref/assert under the spin
583 * lock, so be nice and check if trans is set, but ASSERT() so
584 * if it isn't set a developer will notice.
586 spin_lock(&fs_info->trans_lock);
587 trans = fs_info->running_transaction;
589 refcount_inc(&trans->use_count);
590 spin_unlock(&fs_info->trans_lock);
594 if (atomic_dec_and_test(&trans->pending_ordered))
595 wake_up(&trans->pending_wait);
596 btrfs_put_transaction(trans);
600 spin_lock(&root->ordered_extent_lock);
601 list_del_init(&entry->root_extent_list);
602 root->nr_ordered_extents--;
604 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
606 if (!root->nr_ordered_extents) {
607 spin_lock(&fs_info->ordered_root_lock);
608 BUG_ON(list_empty(&root->ordered_root));
609 list_del_init(&root->ordered_root);
610 spin_unlock(&fs_info->ordered_root_lock);
612 spin_unlock(&root->ordered_extent_lock);
613 wake_up(&entry->wait);
616 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
618 struct btrfs_ordered_extent *ordered;
620 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
621 btrfs_start_ordered_extent(ordered, 1);
622 complete(&ordered->completion);
626 * wait for all the ordered extents in a root. This is done when balancing
627 * space between drives.
629 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
630 const u64 range_start, const u64 range_len)
632 struct btrfs_fs_info *fs_info = root->fs_info;
636 struct btrfs_ordered_extent *ordered, *next;
638 const u64 range_end = range_start + range_len;
640 mutex_lock(&root->ordered_extent_mutex);
641 spin_lock(&root->ordered_extent_lock);
642 list_splice_init(&root->ordered_extents, &splice);
643 while (!list_empty(&splice) && nr) {
644 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
647 if (range_end <= ordered->disk_bytenr ||
648 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
649 list_move_tail(&ordered->root_extent_list, &skipped);
650 cond_resched_lock(&root->ordered_extent_lock);
654 list_move_tail(&ordered->root_extent_list,
655 &root->ordered_extents);
656 refcount_inc(&ordered->refs);
657 spin_unlock(&root->ordered_extent_lock);
659 btrfs_init_work(&ordered->flush_work,
660 btrfs_run_ordered_extent_work, NULL, NULL);
661 list_add_tail(&ordered->work_list, &works);
662 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
665 spin_lock(&root->ordered_extent_lock);
670 list_splice_tail(&skipped, &root->ordered_extents);
671 list_splice_tail(&splice, &root->ordered_extents);
672 spin_unlock(&root->ordered_extent_lock);
674 list_for_each_entry_safe(ordered, next, &works, work_list) {
675 list_del_init(&ordered->work_list);
676 wait_for_completion(&ordered->completion);
677 btrfs_put_ordered_extent(ordered);
680 mutex_unlock(&root->ordered_extent_mutex);
685 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
686 const u64 range_start, const u64 range_len)
688 struct btrfs_root *root;
689 struct list_head splice;
692 INIT_LIST_HEAD(&splice);
694 mutex_lock(&fs_info->ordered_operations_mutex);
695 spin_lock(&fs_info->ordered_root_lock);
696 list_splice_init(&fs_info->ordered_roots, &splice);
697 while (!list_empty(&splice) && nr) {
698 root = list_first_entry(&splice, struct btrfs_root,
700 root = btrfs_grab_root(root);
702 list_move_tail(&root->ordered_root,
703 &fs_info->ordered_roots);
704 spin_unlock(&fs_info->ordered_root_lock);
706 done = btrfs_wait_ordered_extents(root, nr,
707 range_start, range_len);
708 btrfs_put_root(root);
710 spin_lock(&fs_info->ordered_root_lock);
715 list_splice_tail(&splice, &fs_info->ordered_roots);
716 spin_unlock(&fs_info->ordered_root_lock);
717 mutex_unlock(&fs_info->ordered_operations_mutex);
721 * Used to start IO or wait for a given ordered extent to finish.
723 * If wait is one, this effectively waits on page writeback for all the pages
724 * in the extent, and it waits on the io completion code to insert
725 * metadata into the btree corresponding to the extent
727 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
729 u64 start = entry->file_offset;
730 u64 end = start + entry->num_bytes - 1;
731 struct btrfs_inode *inode = BTRFS_I(entry->inode);
733 trace_btrfs_ordered_extent_start(inode, entry);
736 * pages in the range can be dirty, clean or writeback. We
737 * start IO on any dirty ones so the wait doesn't stall waiting
738 * for the flusher thread to find them
740 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
741 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
743 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
749 * Used to wait on ordered extents across a large range of bytes.
751 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
757 struct btrfs_ordered_extent *ordered;
759 if (start + len < start) {
760 orig_end = INT_LIMIT(loff_t);
762 orig_end = start + len - 1;
763 if (orig_end > INT_LIMIT(loff_t))
764 orig_end = INT_LIMIT(loff_t);
767 /* start IO across the range first to instantiate any delalloc
770 ret = btrfs_fdatawrite_range(inode, start, orig_end);
775 * If we have a writeback error don't return immediately. Wait first
776 * for any ordered extents that haven't completed yet. This is to make
777 * sure no one can dirty the same page ranges and call writepages()
778 * before the ordered extents complete - to avoid failures (-EEXIST)
779 * when adding the new ordered extents to the ordered tree.
781 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
785 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
788 if (ordered->file_offset > orig_end) {
789 btrfs_put_ordered_extent(ordered);
792 if (ordered->file_offset + ordered->num_bytes <= start) {
793 btrfs_put_ordered_extent(ordered);
796 btrfs_start_ordered_extent(ordered, 1);
797 end = ordered->file_offset;
799 * If the ordered extent had an error save the error but don't
800 * exit without waiting first for all other ordered extents in
801 * the range to complete.
803 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
805 btrfs_put_ordered_extent(ordered);
806 if (end == 0 || end == start)
810 return ret_wb ? ret_wb : ret;
814 * find an ordered extent corresponding to file_offset. return NULL if
815 * nothing is found, otherwise take a reference on the extent and return it
817 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
820 struct btrfs_ordered_inode_tree *tree;
821 struct rb_node *node;
822 struct btrfs_ordered_extent *entry = NULL;
825 tree = &inode->ordered_tree;
826 spin_lock_irqsave(&tree->lock, flags);
827 node = tree_search(tree, file_offset);
831 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
832 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
835 refcount_inc(&entry->refs);
837 spin_unlock_irqrestore(&tree->lock, flags);
841 /* Since the DIO code tries to lock a wide area we need to look for any ordered
842 * extents that exist in the range, rather than just the start of the range.
844 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
845 struct btrfs_inode *inode, u64 file_offset, u64 len)
847 struct btrfs_ordered_inode_tree *tree;
848 struct rb_node *node;
849 struct btrfs_ordered_extent *entry = NULL;
851 tree = &inode->ordered_tree;
852 spin_lock_irq(&tree->lock);
853 node = tree_search(tree, file_offset);
855 node = tree_search(tree, file_offset + len);
861 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
862 if (range_overlaps(entry, file_offset, len))
865 if (entry->file_offset >= file_offset + len) {
870 node = rb_next(node);
876 refcount_inc(&entry->refs);
877 spin_unlock_irq(&tree->lock);
882 * Adds all ordered extents to the given list. The list ends up sorted by the
883 * file_offset of the ordered extents.
885 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
886 struct list_head *list)
888 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
891 ASSERT(inode_is_locked(&inode->vfs_inode));
893 spin_lock_irq(&tree->lock);
894 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
895 struct btrfs_ordered_extent *ordered;
897 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
899 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
902 ASSERT(list_empty(&ordered->log_list));
903 list_add_tail(&ordered->log_list, list);
904 refcount_inc(&ordered->refs);
906 spin_unlock_irq(&tree->lock);
910 * lookup and return any extent before 'file_offset'. NULL is returned
913 struct btrfs_ordered_extent *
914 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
916 struct btrfs_ordered_inode_tree *tree;
917 struct rb_node *node;
918 struct btrfs_ordered_extent *entry = NULL;
920 tree = &inode->ordered_tree;
921 spin_lock_irq(&tree->lock);
922 node = tree_search(tree, file_offset);
926 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
927 refcount_inc(&entry->refs);
929 spin_unlock_irq(&tree->lock);
934 * Lookup the first ordered extent that overlaps the range
935 * [@file_offset, @file_offset + @len).
937 * The difference between this and btrfs_lookup_first_ordered_extent() is
938 * that this one won't return any ordered extent that does not overlap the range.
939 * And the difference against btrfs_lookup_ordered_extent() is, this function
940 * ensures the first ordered extent gets returned.
942 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
943 struct btrfs_inode *inode, u64 file_offset, u64 len)
945 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
946 struct rb_node *node;
948 struct rb_node *prev;
949 struct rb_node *next;
950 struct btrfs_ordered_extent *entry = NULL;
952 spin_lock_irq(&tree->lock);
953 node = tree->tree.rb_node;
955 * Here we don't want to use tree_search() which will use tree->last
956 * and screw up the search order.
957 * And __tree_search() can't return the adjacent ordered extents
958 * either, thus here we do our own search.
961 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
963 if (file_offset < entry->file_offset) {
964 node = node->rb_left;
965 } else if (file_offset >= entry_end(entry)) {
966 node = node->rb_right;
969 * Direct hit, got an ordered extent that starts at
980 cur = &entry->rb_node;
981 /* We got an entry around @file_offset, check adjacent entries */
982 if (entry->file_offset < file_offset) {
990 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
991 if (range_overlaps(entry, file_offset, len))
995 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
996 if (range_overlaps(entry, file_offset, len))
999 /* No ordered extent in the range */
1003 refcount_inc(&entry->refs);
1004 spin_unlock_irq(&tree->lock);
1009 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1010 * ordered extents in it are run to completion.
1012 * @inode: Inode whose ordered tree is to be searched
1013 * @start: Beginning of range to flush
1014 * @end: Last byte of range to lock
1015 * @cached_state: If passed, will return the extent state responsible for the
1016 * locked range. It's the caller's responsibility to free the cached state.
1018 * This function always returns with the given range locked, ensuring after it's
1019 * called no order extent can be pending.
1021 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1023 struct extent_state **cached_state)
1025 struct btrfs_ordered_extent *ordered;
1026 struct extent_state *cache = NULL;
1027 struct extent_state **cachedp = &cache;
1030 cachedp = cached_state;
1033 lock_extent_bits(&inode->io_tree, start, end, cachedp);
1034 ordered = btrfs_lookup_ordered_range(inode, start,
1038 * If no external cached_state has been passed then
1039 * decrement the extra ref taken for cachedp since we
1040 * aren't exposing it outside of this function
1043 refcount_dec(&cache->refs);
1046 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
1047 btrfs_start_ordered_extent(ordered, 1);
1048 btrfs_put_ordered_extent(ordered);
1052 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1055 struct inode *inode = ordered->inode;
1056 u64 file_offset = ordered->file_offset + pos;
1057 u64 disk_bytenr = ordered->disk_bytenr + pos;
1058 u64 num_bytes = len;
1059 u64 disk_num_bytes = len;
1061 unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
1062 int compress_type = ordered->compress_type;
1063 unsigned long weight;
1066 weight = hweight_long(flags_masked);
1067 WARN_ON_ONCE(weight > 1);
1071 type = __ffs(flags_masked);
1073 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
1075 ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
1076 file_offset, disk_bytenr, num_bytes,
1077 disk_num_bytes, compress_type);
1078 } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
1079 ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
1080 disk_bytenr, num_bytes, disk_num_bytes, type);
1082 ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
1083 disk_bytenr, num_bytes, disk_num_bytes, type);
1089 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1092 struct inode *inode = ordered->inode;
1093 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1094 struct rb_node *node;
1095 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1098 spin_lock_irq(&tree->lock);
1099 /* Remove from tree once */
1100 node = &ordered->rb_node;
1101 rb_erase(node, &tree->tree);
1102 RB_CLEAR_NODE(node);
1103 if (tree->last == node)
1106 ordered->file_offset += pre;
1107 ordered->disk_bytenr += pre;
1108 ordered->num_bytes -= (pre + post);
1109 ordered->disk_num_bytes -= (pre + post);
1110 ordered->bytes_left -= (pre + post);
1112 /* Re-insert the node */
1113 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1115 btrfs_panic(fs_info, -EEXIST,
1116 "zoned: inconsistency in ordered tree at offset %llu",
1117 ordered->file_offset);
1119 spin_unlock_irq(&tree->lock);
1122 ret = clone_ordered_extent(ordered, 0, pre);
1123 if (ret == 0 && post)
1124 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1130 int __init ordered_data_init(void)
1132 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1133 sizeof(struct btrfs_ordered_extent), 0,
1136 if (!btrfs_ordered_extent_cache)
1142 void __cold ordered_data_exit(void)
1144 kmem_cache_destroy(btrfs_ordered_extent_cache);