1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
11 #include "transaction.h"
12 #include "btrfs_inode.h"
13 #include "extent_io.h"
15 #include "compression.h"
17 static struct kmem_cache *btrfs_ordered_extent_cache;
19 static u64 entry_end(struct btrfs_ordered_extent *entry)
21 if (entry->file_offset + entry->len < entry->file_offset)
23 return entry->file_offset + entry->len;
26 /* returns NULL if the insertion worked, or it returns the node it did find
29 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
32 struct rb_node **p = &root->rb_node;
33 struct rb_node *parent = NULL;
34 struct btrfs_ordered_extent *entry;
38 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
40 if (file_offset < entry->file_offset)
42 else if (file_offset >= entry_end(entry))
48 rb_link_node(node, parent, p);
49 rb_insert_color(node, root);
53 static void ordered_data_tree_panic(struct inode *inode, int errno,
56 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
57 btrfs_panic(fs_info, errno,
58 "Inconsistency in ordered tree at offset %llu", offset);
62 * look for a given offset in the tree, and if it can't be found return the
65 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
66 struct rb_node **prev_ret)
68 struct rb_node *n = root->rb_node;
69 struct rb_node *prev = NULL;
71 struct btrfs_ordered_extent *entry;
72 struct btrfs_ordered_extent *prev_entry = NULL;
75 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
79 if (file_offset < entry->file_offset)
81 else if (file_offset >= entry_end(entry))
89 while (prev && file_offset >= entry_end(prev_entry)) {
93 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
95 if (file_offset < entry_end(prev_entry))
101 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
103 while (prev && file_offset < entry_end(prev_entry)) {
104 test = rb_prev(prev);
107 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
116 * helper to check if a given offset is inside a given entry
118 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
120 if (file_offset < entry->file_offset ||
121 entry->file_offset + entry->len <= file_offset)
126 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
129 if (file_offset + len <= entry->file_offset ||
130 entry->file_offset + entry->len <= file_offset)
136 * look find the first ordered struct that has this offset, otherwise
137 * the first one less than this offset
139 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
142 struct rb_root *root = &tree->tree;
143 struct rb_node *prev = NULL;
145 struct btrfs_ordered_extent *entry;
148 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
150 if (offset_in_entry(entry, file_offset))
153 ret = __tree_search(root, file_offset, &prev);
161 /* allocate and add a new ordered_extent into the per-inode tree.
162 * file_offset is the logical offset in the file
164 * start is the disk block number of an extent already reserved in the
165 * extent allocation tree
167 * len is the length of the extent
169 * The tree is given a single reference on the ordered extent that was
172 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
173 u64 start, u64 len, u64 disk_len,
174 int type, int dio, int compress_type)
176 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
177 struct btrfs_root *root = BTRFS_I(inode)->root;
178 struct btrfs_ordered_inode_tree *tree;
179 struct rb_node *node;
180 struct btrfs_ordered_extent *entry;
182 tree = &BTRFS_I(inode)->ordered_tree;
183 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
187 entry->file_offset = file_offset;
188 entry->start = start;
190 entry->disk_len = disk_len;
191 entry->bytes_left = len;
192 entry->inode = igrab(inode);
193 entry->compress_type = compress_type;
194 entry->truncated_len = (u64)-1;
195 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
196 set_bit(type, &entry->flags);
199 percpu_counter_add_batch(&fs_info->dio_bytes, len,
200 fs_info->delalloc_batch);
201 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
204 /* one ref for the tree */
205 refcount_set(&entry->refs, 1);
206 init_waitqueue_head(&entry->wait);
207 INIT_LIST_HEAD(&entry->list);
208 INIT_LIST_HEAD(&entry->root_extent_list);
209 INIT_LIST_HEAD(&entry->work_list);
210 init_completion(&entry->completion);
211 INIT_LIST_HEAD(&entry->log_list);
212 INIT_LIST_HEAD(&entry->trans_list);
214 trace_btrfs_ordered_extent_add(inode, entry);
216 spin_lock_irq(&tree->lock);
217 node = tree_insert(&tree->tree, file_offset,
220 ordered_data_tree_panic(inode, -EEXIST, file_offset);
221 spin_unlock_irq(&tree->lock);
223 spin_lock(&root->ordered_extent_lock);
224 list_add_tail(&entry->root_extent_list,
225 &root->ordered_extents);
226 root->nr_ordered_extents++;
227 if (root->nr_ordered_extents == 1) {
228 spin_lock(&fs_info->ordered_root_lock);
229 BUG_ON(!list_empty(&root->ordered_root));
230 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
231 spin_unlock(&fs_info->ordered_root_lock);
233 spin_unlock(&root->ordered_extent_lock);
236 * We don't need the count_max_extents here, we can assume that all of
237 * that work has been done at higher layers, so this is truly the
238 * smallest the extent is going to get.
240 spin_lock(&BTRFS_I(inode)->lock);
241 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
242 spin_unlock(&BTRFS_I(inode)->lock);
247 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
248 u64 start, u64 len, u64 disk_len, int type)
250 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
252 BTRFS_COMPRESS_NONE);
255 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
256 u64 start, u64 len, u64 disk_len, int type)
258 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
260 BTRFS_COMPRESS_NONE);
263 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
264 u64 start, u64 len, u64 disk_len,
265 int type, int compress_type)
267 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
273 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
274 * when an ordered extent is finished. If the list covers more than one
275 * ordered extent, it is split across multiples.
277 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
278 struct btrfs_ordered_sum *sum)
280 struct btrfs_ordered_inode_tree *tree;
282 tree = &BTRFS_I(entry->inode)->ordered_tree;
283 spin_lock_irq(&tree->lock);
284 list_add_tail(&sum->list, &entry->list);
285 spin_unlock_irq(&tree->lock);
289 * this is used to account for finished IO across a given range
290 * of the file. The IO may span ordered extents. If
291 * a given ordered_extent is completely done, 1 is returned, otherwise
294 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
295 * to make sure this function only returns 1 once for a given ordered extent.
297 * file_offset is updated to one byte past the range that is recorded as
298 * complete. This allows you to walk forward in the file.
300 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
301 struct btrfs_ordered_extent **cached,
302 u64 *file_offset, u64 io_size, int uptodate)
304 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
305 struct btrfs_ordered_inode_tree *tree;
306 struct rb_node *node;
307 struct btrfs_ordered_extent *entry = NULL;
314 tree = &BTRFS_I(inode)->ordered_tree;
315 spin_lock_irqsave(&tree->lock, flags);
316 node = tree_search(tree, *file_offset);
322 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
323 if (!offset_in_entry(entry, *file_offset)) {
328 dec_start = max(*file_offset, entry->file_offset);
329 dec_end = min(*file_offset + io_size, entry->file_offset +
331 *file_offset = dec_end;
332 if (dec_start > dec_end) {
333 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
336 to_dec = dec_end - dec_start;
337 if (to_dec > entry->bytes_left) {
339 "bad ordered accounting left %llu size %llu",
340 entry->bytes_left, to_dec);
342 entry->bytes_left -= to_dec;
344 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
346 if (entry->bytes_left == 0) {
347 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
348 /* test_and_set_bit implies a barrier */
349 cond_wake_up_nomb(&entry->wait);
354 if (!ret && cached && entry) {
356 refcount_inc(&entry->refs);
358 spin_unlock_irqrestore(&tree->lock, flags);
363 * this is used to account for finished IO across a given range
364 * of the file. The IO should not span ordered extents. If
365 * a given ordered_extent is completely done, 1 is returned, otherwise
368 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
369 * to make sure this function only returns 1 once for a given ordered extent.
371 int btrfs_dec_test_ordered_pending(struct inode *inode,
372 struct btrfs_ordered_extent **cached,
373 u64 file_offset, u64 io_size, int uptodate)
375 struct btrfs_ordered_inode_tree *tree;
376 struct rb_node *node;
377 struct btrfs_ordered_extent *entry = NULL;
381 tree = &BTRFS_I(inode)->ordered_tree;
382 spin_lock_irqsave(&tree->lock, flags);
383 if (cached && *cached) {
388 node = tree_search(tree, file_offset);
394 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
396 if (!offset_in_entry(entry, file_offset)) {
401 if (io_size > entry->bytes_left) {
402 btrfs_crit(BTRFS_I(inode)->root->fs_info,
403 "bad ordered accounting left %llu size %llu",
404 entry->bytes_left, io_size);
406 entry->bytes_left -= io_size;
408 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
410 if (entry->bytes_left == 0) {
411 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
412 /* test_and_set_bit implies a barrier */
413 cond_wake_up_nomb(&entry->wait);
418 if (!ret && cached && entry) {
420 refcount_inc(&entry->refs);
422 spin_unlock_irqrestore(&tree->lock, flags);
427 * used to drop a reference on an ordered extent. This will free
428 * the extent if the last reference is dropped
430 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
432 struct list_head *cur;
433 struct btrfs_ordered_sum *sum;
435 trace_btrfs_ordered_extent_put(entry->inode, entry);
437 if (refcount_dec_and_test(&entry->refs)) {
438 ASSERT(list_empty(&entry->log_list));
439 ASSERT(list_empty(&entry->trans_list));
440 ASSERT(list_empty(&entry->root_extent_list));
441 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
443 btrfs_add_delayed_iput(entry->inode);
444 while (!list_empty(&entry->list)) {
445 cur = entry->list.next;
446 sum = list_entry(cur, struct btrfs_ordered_sum, list);
447 list_del(&sum->list);
450 kmem_cache_free(btrfs_ordered_extent_cache, entry);
455 * remove an ordered extent from the tree. No references are dropped
456 * and waiters are woken up.
458 void btrfs_remove_ordered_extent(struct inode *inode,
459 struct btrfs_ordered_extent *entry)
461 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
462 struct btrfs_ordered_inode_tree *tree;
463 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
464 struct btrfs_root *root = btrfs_inode->root;
465 struct rb_node *node;
467 /* This is paired with btrfs_add_ordered_extent. */
468 spin_lock(&btrfs_inode->lock);
469 btrfs_mod_outstanding_extents(btrfs_inode, -1);
470 spin_unlock(&btrfs_inode->lock);
471 if (root != fs_info->tree_root)
472 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
474 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
475 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len,
476 fs_info->delalloc_batch);
478 tree = &btrfs_inode->ordered_tree;
479 spin_lock_irq(&tree->lock);
480 node = &entry->rb_node;
481 rb_erase(node, &tree->tree);
483 if (tree->last == node)
485 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
486 spin_unlock_irq(&tree->lock);
488 spin_lock(&root->ordered_extent_lock);
489 list_del_init(&entry->root_extent_list);
490 root->nr_ordered_extents--;
492 trace_btrfs_ordered_extent_remove(inode, entry);
494 if (!root->nr_ordered_extents) {
495 spin_lock(&fs_info->ordered_root_lock);
496 BUG_ON(list_empty(&root->ordered_root));
497 list_del_init(&root->ordered_root);
498 spin_unlock(&fs_info->ordered_root_lock);
500 spin_unlock(&root->ordered_extent_lock);
501 wake_up(&entry->wait);
504 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
506 struct btrfs_ordered_extent *ordered;
508 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
509 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
510 complete(&ordered->completion);
514 * wait for all the ordered extents in a root. This is done when balancing
515 * space between drives.
517 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
518 const u64 range_start, const u64 range_len)
520 struct btrfs_fs_info *fs_info = root->fs_info;
524 struct btrfs_ordered_extent *ordered, *next;
526 const u64 range_end = range_start + range_len;
528 mutex_lock(&root->ordered_extent_mutex);
529 spin_lock(&root->ordered_extent_lock);
530 list_splice_init(&root->ordered_extents, &splice);
531 while (!list_empty(&splice) && nr) {
532 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
535 if (range_end <= ordered->start ||
536 ordered->start + ordered->disk_len <= range_start) {
537 list_move_tail(&ordered->root_extent_list, &skipped);
538 cond_resched_lock(&root->ordered_extent_lock);
542 list_move_tail(&ordered->root_extent_list,
543 &root->ordered_extents);
544 refcount_inc(&ordered->refs);
545 spin_unlock(&root->ordered_extent_lock);
547 btrfs_init_work(&ordered->flush_work,
548 btrfs_flush_delalloc_helper,
549 btrfs_run_ordered_extent_work, NULL, NULL);
550 list_add_tail(&ordered->work_list, &works);
551 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
554 spin_lock(&root->ordered_extent_lock);
559 list_splice_tail(&skipped, &root->ordered_extents);
560 list_splice_tail(&splice, &root->ordered_extents);
561 spin_unlock(&root->ordered_extent_lock);
563 list_for_each_entry_safe(ordered, next, &works, work_list) {
564 list_del_init(&ordered->work_list);
565 wait_for_completion(&ordered->completion);
566 btrfs_put_ordered_extent(ordered);
569 mutex_unlock(&root->ordered_extent_mutex);
574 u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
575 const u64 range_start, const u64 range_len)
577 struct btrfs_root *root;
578 struct list_head splice;
582 INIT_LIST_HEAD(&splice);
584 mutex_lock(&fs_info->ordered_operations_mutex);
585 spin_lock(&fs_info->ordered_root_lock);
586 list_splice_init(&fs_info->ordered_roots, &splice);
587 while (!list_empty(&splice) && nr) {
588 root = list_first_entry(&splice, struct btrfs_root,
590 root = btrfs_grab_fs_root(root);
592 list_move_tail(&root->ordered_root,
593 &fs_info->ordered_roots);
594 spin_unlock(&fs_info->ordered_root_lock);
596 done = btrfs_wait_ordered_extents(root, nr,
597 range_start, range_len);
598 btrfs_put_fs_root(root);
601 spin_lock(&fs_info->ordered_root_lock);
606 list_splice_tail(&splice, &fs_info->ordered_roots);
607 spin_unlock(&fs_info->ordered_root_lock);
608 mutex_unlock(&fs_info->ordered_operations_mutex);
614 * Used to start IO or wait for a given ordered extent to finish.
616 * If wait is one, this effectively waits on page writeback for all the pages
617 * in the extent, and it waits on the io completion code to insert
618 * metadata into the btree corresponding to the extent
620 void btrfs_start_ordered_extent(struct inode *inode,
621 struct btrfs_ordered_extent *entry,
624 u64 start = entry->file_offset;
625 u64 end = start + entry->len - 1;
627 trace_btrfs_ordered_extent_start(inode, entry);
630 * pages in the range can be dirty, clean or writeback. We
631 * start IO on any dirty ones so the wait doesn't stall waiting
632 * for the flusher thread to find them
634 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
635 filemap_fdatawrite_range(inode->i_mapping, start, end);
637 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
643 * Used to wait on ordered extents across a large range of bytes.
645 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
651 struct btrfs_ordered_extent *ordered;
653 if (start + len < start) {
654 orig_end = INT_LIMIT(loff_t);
656 orig_end = start + len - 1;
657 if (orig_end > INT_LIMIT(loff_t))
658 orig_end = INT_LIMIT(loff_t);
661 /* start IO across the range first to instantiate any delalloc
664 ret = btrfs_fdatawrite_range(inode, start, orig_end);
669 * If we have a writeback error don't return immediately. Wait first
670 * for any ordered extents that haven't completed yet. This is to make
671 * sure no one can dirty the same page ranges and call writepages()
672 * before the ordered extents complete - to avoid failures (-EEXIST)
673 * when adding the new ordered extents to the ordered tree.
675 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
679 ordered = btrfs_lookup_first_ordered_extent(inode, end);
682 if (ordered->file_offset > orig_end) {
683 btrfs_put_ordered_extent(ordered);
686 if (ordered->file_offset + ordered->len <= start) {
687 btrfs_put_ordered_extent(ordered);
690 btrfs_start_ordered_extent(inode, ordered, 1);
691 end = ordered->file_offset;
692 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
694 btrfs_put_ordered_extent(ordered);
695 if (ret || end == 0 || end == start)
699 return ret_wb ? ret_wb : ret;
703 * find an ordered extent corresponding to file_offset. return NULL if
704 * nothing is found, otherwise take a reference on the extent and return it
706 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
709 struct btrfs_ordered_inode_tree *tree;
710 struct rb_node *node;
711 struct btrfs_ordered_extent *entry = NULL;
713 tree = &BTRFS_I(inode)->ordered_tree;
714 spin_lock_irq(&tree->lock);
715 node = tree_search(tree, file_offset);
719 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
720 if (!offset_in_entry(entry, file_offset))
723 refcount_inc(&entry->refs);
725 spin_unlock_irq(&tree->lock);
729 /* Since the DIO code tries to lock a wide area we need to look for any ordered
730 * extents that exist in the range, rather than just the start of the range.
732 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
733 struct btrfs_inode *inode, u64 file_offset, u64 len)
735 struct btrfs_ordered_inode_tree *tree;
736 struct rb_node *node;
737 struct btrfs_ordered_extent *entry = NULL;
739 tree = &inode->ordered_tree;
740 spin_lock_irq(&tree->lock);
741 node = tree_search(tree, file_offset);
743 node = tree_search(tree, file_offset + len);
749 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
750 if (range_overlaps(entry, file_offset, len))
753 if (entry->file_offset >= file_offset + len) {
758 node = rb_next(node);
764 refcount_inc(&entry->refs);
765 spin_unlock_irq(&tree->lock);
770 * lookup and return any extent before 'file_offset'. NULL is returned
773 struct btrfs_ordered_extent *
774 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
776 struct btrfs_ordered_inode_tree *tree;
777 struct rb_node *node;
778 struct btrfs_ordered_extent *entry = NULL;
780 tree = &BTRFS_I(inode)->ordered_tree;
781 spin_lock_irq(&tree->lock);
782 node = tree_search(tree, file_offset);
786 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
787 refcount_inc(&entry->refs);
789 spin_unlock_irq(&tree->lock);
794 * After an extent is done, call this to conditionally update the on disk
795 * i_size. i_size is updated to cover any fully written part of the file.
797 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
798 struct btrfs_ordered_extent *ordered)
800 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
803 u64 i_size = i_size_read(inode);
804 struct rb_node *node;
805 struct rb_node *prev = NULL;
806 struct btrfs_ordered_extent *test;
808 u64 orig_offset = offset;
810 spin_lock_irq(&tree->lock);
812 offset = entry_end(ordered);
813 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
815 ordered->file_offset +
816 ordered->truncated_len);
818 offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
820 disk_i_size = BTRFS_I(inode)->disk_i_size;
824 * If ordered is not NULL, then this is called from endio and
825 * disk_i_size will be updated by either truncate itself or any
826 * in-flight IOs which are inside the disk_i_size.
828 * Because btrfs_setsize() may set i_size with disk_i_size if truncate
829 * fails somehow, we need to make sure we have a precise disk_i_size by
830 * updating it as usual.
833 if (!ordered && disk_i_size > i_size) {
834 BTRFS_I(inode)->disk_i_size = orig_offset;
840 * if the disk i_size is already at the inode->i_size, or
841 * this ordered extent is inside the disk i_size, we're done
843 if (disk_i_size == i_size)
847 * We still need to update disk_i_size if outstanding_isize is greater
850 if (offset <= disk_i_size &&
851 (!ordered || ordered->outstanding_isize <= disk_i_size))
855 * walk backward from this ordered extent to disk_i_size.
856 * if we find an ordered extent then we can't update disk i_size
860 node = rb_prev(&ordered->rb_node);
862 prev = tree_search(tree, offset);
864 * we insert file extents without involving ordered struct,
865 * so there should be no ordered struct cover this offset
868 test = rb_entry(prev, struct btrfs_ordered_extent,
870 BUG_ON(offset_in_entry(test, offset));
874 for (; node; node = rb_prev(node)) {
875 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
877 /* We treat this entry as if it doesn't exist */
878 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
881 if (entry_end(test) <= disk_i_size)
883 if (test->file_offset >= i_size)
887 * We don't update disk_i_size now, so record this undealt
888 * i_size. Or we will not know the real i_size.
890 if (test->outstanding_isize < offset)
891 test->outstanding_isize = offset;
893 ordered->outstanding_isize > test->outstanding_isize)
894 test->outstanding_isize = ordered->outstanding_isize;
897 new_i_size = min_t(u64, offset, i_size);
900 * Some ordered extents may completed before the current one, and
901 * we hold the real i_size in ->outstanding_isize.
903 if (ordered && ordered->outstanding_isize > new_i_size)
904 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
905 BTRFS_I(inode)->disk_i_size = new_i_size;
909 * We need to do this because we can't remove ordered extents until
910 * after the i_disk_size has been updated and then the inode has been
911 * updated to reflect the change, so we need to tell anybody who finds
912 * this ordered extent that we've already done all the real work, we
913 * just haven't completed all the other work.
916 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
917 spin_unlock_irq(&tree->lock);
922 * search the ordered extents for one corresponding to 'offset' and
923 * try to find a checksum. This is used because we allow pages to
924 * be reclaimed before their checksum is actually put into the btree
926 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
929 struct btrfs_ordered_sum *ordered_sum;
930 struct btrfs_ordered_extent *ordered;
931 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
932 unsigned long num_sectors;
934 u32 sectorsize = btrfs_inode_sectorsize(inode);
937 ordered = btrfs_lookup_ordered_extent(inode, offset);
941 spin_lock_irq(&tree->lock);
942 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
943 if (disk_bytenr >= ordered_sum->bytenr &&
944 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
945 i = (disk_bytenr - ordered_sum->bytenr) >>
946 inode->i_sb->s_blocksize_bits;
947 num_sectors = ordered_sum->len >>
948 inode->i_sb->s_blocksize_bits;
949 num_sectors = min_t(int, len - index, num_sectors - i);
950 memcpy(sum + index, ordered_sum->sums + i,
953 index += (int)num_sectors;
956 disk_bytenr += num_sectors * sectorsize;
960 spin_unlock_irq(&tree->lock);
961 btrfs_put_ordered_extent(ordered);
965 int __init ordered_data_init(void)
967 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
968 sizeof(struct btrfs_ordered_extent), 0,
971 if (!btrfs_ordered_extent_cache)
977 void __cold ordered_data_exit(void)
979 kmem_cache_destroy(btrfs_ordered_extent_cache);