1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/blkdev.h>
4 #include <linux/iversion.h>
8 #include "compression.h"
9 #include "delalloc-space.h"
12 #include "transaction.h"
14 #include "accessors.h"
15 #include "file-item.h"
19 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
21 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
30 inode_inc_iversion(inode);
31 if (!no_time_update) {
32 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
35 * We round up to the block size at eof when determining which
36 * extents to clone above, but shouldn't round up the file size.
38 if (endoff > destoff + olen)
39 endoff = destoff + olen;
40 if (endoff > inode->i_size) {
41 i_size_write(inode, endoff);
42 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
45 ret = btrfs_update_inode(trans, BTRFS_I(inode));
47 btrfs_abort_transaction(trans, ret);
48 btrfs_end_transaction(trans);
51 ret = btrfs_end_transaction(trans);
56 static int copy_inline_to_page(struct btrfs_inode *inode,
57 const u64 file_offset,
63 struct btrfs_fs_info *fs_info = inode->root->fs_info;
64 const u32 block_size = fs_info->sectorsize;
65 const u64 range_end = file_offset + block_size - 1;
66 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
67 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
68 struct extent_changeset *data_reserved = NULL;
69 struct page *page = NULL;
70 struct address_space *mapping = inode->vfs_inode.i_mapping;
73 ASSERT(IS_ALIGNED(file_offset, block_size));
76 * We have flushed and locked the ranges of the source and destination
77 * inodes, we also have locked the inodes, so we are safe to do a
78 * reservation here. Also we must not do the reservation while holding
79 * a transaction open, otherwise we would deadlock.
81 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
86 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
87 btrfs_alloc_write_mask(mapping));
93 ret = set_page_extent_mapped(page);
97 clear_extent_bit(&inode->io_tree, file_offset, range_end,
98 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
100 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
105 * After dirtying the page our caller will need to start a transaction,
106 * and if we are low on metadata free space, that can cause flushing of
107 * delalloc for all inodes in order to get metadata space released.
108 * However we are holding the range locked for the whole duration of
109 * the clone/dedupe operation, so we may deadlock if that happens and no
110 * other task releases enough space. So mark this inode as not being
111 * possible to flush to avoid such deadlock. We will clear that flag
112 * when we finish cloning all extents, since a transaction is started
113 * after finding each extent to clone.
115 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
117 if (comp_type == BTRFS_COMPRESS_NONE) {
118 memcpy_to_page(page, offset_in_page(file_offset), data_start,
121 ret = btrfs_decompress(comp_type, data_start, page,
122 offset_in_page(file_offset),
126 flush_dcache_page(page);
130 * If our inline data is smaller then the block/page size, then the
131 * remaining of the block/page is equivalent to zeroes. We had something
132 * like the following done:
134 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
135 * $ sync # (or fsync)
136 * $ xfs_io -c "falloc 0 4K" file
137 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
139 * So what's in the range [500, 4095] corresponds to zeroes.
141 if (datal < block_size)
142 memzero_page(page, datal, block_size - datal);
144 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
145 btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
146 btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
153 btrfs_delalloc_release_space(inode, data_reserved, file_offset,
155 btrfs_delalloc_release_extents(inode, block_size);
157 extent_changeset_free(data_reserved);
163 * Deal with cloning of inline extents. We try to copy the inline extent from
164 * the source inode to destination inode when possible. When not possible we
165 * copy the inline extent's data into the respective page of the inode.
167 static int clone_copy_inline_extent(struct inode *dst,
168 struct btrfs_path *path,
169 struct btrfs_key *new_key,
170 const u64 drop_start,
175 struct btrfs_trans_handle **trans_out)
177 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
178 struct btrfs_root *root = BTRFS_I(dst)->root;
179 const u64 aligned_end = ALIGN(new_key->offset + datal,
180 fs_info->sectorsize);
181 struct btrfs_trans_handle *trans = NULL;
182 struct btrfs_drop_extents_args drop_args = { 0 };
184 struct btrfs_key key;
186 if (new_key->offset > 0) {
187 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
188 inline_data, size, datal, comp_type);
192 key.objectid = btrfs_ino(BTRFS_I(dst));
193 key.type = BTRFS_EXTENT_DATA_KEY;
195 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
198 } else if (ret > 0) {
199 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
200 ret = btrfs_next_leaf(root, path);
204 goto copy_inline_extent;
206 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
207 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
208 key.type == BTRFS_EXTENT_DATA_KEY) {
210 * There's an implicit hole at file offset 0, copy the
211 * inline extent's data to the page.
213 ASSERT(key.offset > 0);
216 } else if (i_size_read(dst) <= datal) {
217 struct btrfs_file_extent_item *ei;
219 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
220 struct btrfs_file_extent_item);
222 * If it's an inline extent replace it with the source inline
223 * extent, otherwise copy the source inline extent data into
224 * the respective page at the destination inode.
226 if (btrfs_file_extent_type(path->nodes[0], ei) ==
227 BTRFS_FILE_EXTENT_INLINE)
228 goto copy_inline_extent;
235 * We have no extent items, or we have an extent at offset 0 which may
236 * or may not be inlined. All these cases are dealt the same way.
238 if (i_size_read(dst) > datal) {
240 * At the destination offset 0 we have either a hole, a regular
241 * extent or an inline extent larger then the one we want to
242 * clone. Deal with all these cases by copying the inline extent
243 * data into the respective page at the destination inode.
249 * Release path before starting a new transaction so we don't hold locks
250 * that would confuse lockdep.
252 btrfs_release_path(path);
254 * If we end up here it means were copy the inline extent into a leaf
255 * of the destination inode. We know we will drop or adjust at most one
256 * extent item in the destination root.
258 * 1 unit - adjusting old extent (we may have to split it)
259 * 1 unit - add new extent
260 * 1 unit - inode update
262 trans = btrfs_start_transaction(root, 3);
264 ret = PTR_ERR(trans);
268 drop_args.path = path;
269 drop_args.start = drop_start;
270 drop_args.end = aligned_end;
271 drop_args.drop_cache = true;
272 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
275 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
279 write_extent_buffer(path->nodes[0], inline_data,
280 btrfs_item_ptr_offset(path->nodes[0],
283 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
284 btrfs_set_inode_full_sync(BTRFS_I(dst));
285 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
287 if (!ret && !trans) {
289 * No transaction here means we copied the inline extent into a
290 * page of the destination inode.
292 * 1 unit to update inode item
294 trans = btrfs_start_transaction(root, 1);
296 ret = PTR_ERR(trans);
301 btrfs_abort_transaction(trans, ret);
302 btrfs_end_transaction(trans);
311 * Release our path because we don't need it anymore and also because
312 * copy_inline_to_page() needs to reserve data and metadata, which may
313 * need to flush delalloc when we are low on available space and
314 * therefore cause a deadlock if writeback of an inline extent needs to
315 * write to the same leaf or an ordered extent completion needs to write
318 btrfs_release_path(path);
320 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
321 inline_data, size, datal, comp_type);
326 * Clone a range from inode file to another.
328 * @src: Inode to clone from
329 * @inode: Inode to clone to
330 * @off: Offset within source to start clone from
331 * @olen: Original length, passed by user, of range to clone
332 * @olen_aligned: Block-aligned value of olen
333 * @destoff: Offset within @inode to start clone
334 * @no_time_update: Whether to update mtime/ctime on the target inode
336 static int btrfs_clone(struct inode *src, struct inode *inode,
337 const u64 off, const u64 olen, const u64 olen_aligned,
338 const u64 destoff, int no_time_update)
340 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
341 struct btrfs_path *path = NULL;
342 struct extent_buffer *leaf;
343 struct btrfs_trans_handle *trans;
345 struct btrfs_key key;
349 const u64 len = olen_aligned;
350 u64 last_dest_end = destoff;
351 u64 prev_extent_end = off;
354 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
358 path = btrfs_alloc_path();
364 path->reada = READA_FORWARD;
366 key.objectid = btrfs_ino(BTRFS_I(src));
367 key.type = BTRFS_EXTENT_DATA_KEY;
371 struct btrfs_file_extent_item *extent;
375 struct btrfs_key new_key;
376 u64 disko = 0, diskl = 0;
377 u64 datao = 0, datal = 0;
381 /* Note the key will change type as we walk through the tree */
382 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
387 * First search, if no extent item that starts at offset off was
388 * found but the previous item is an extent item, it's possible
389 * it might overlap our target range, therefore process it.
391 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
392 btrfs_item_key_to_cpu(path->nodes[0], &key,
394 if (key.type == BTRFS_EXTENT_DATA_KEY)
398 nritems = btrfs_header_nritems(path->nodes[0]);
400 if (path->slots[0] >= nritems) {
401 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
406 nritems = btrfs_header_nritems(path->nodes[0]);
408 leaf = path->nodes[0];
409 slot = path->slots[0];
411 btrfs_item_key_to_cpu(leaf, &key, slot);
412 if (key.type > BTRFS_EXTENT_DATA_KEY ||
413 key.objectid != btrfs_ino(BTRFS_I(src)))
416 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
418 extent = btrfs_item_ptr(leaf, slot,
419 struct btrfs_file_extent_item);
420 extent_gen = btrfs_file_extent_generation(leaf, extent);
421 comp = btrfs_file_extent_compression(leaf, extent);
422 type = btrfs_file_extent_type(leaf, extent);
423 if (type == BTRFS_FILE_EXTENT_REG ||
424 type == BTRFS_FILE_EXTENT_PREALLOC) {
425 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
426 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
427 datao = btrfs_file_extent_offset(leaf, extent);
428 datal = btrfs_file_extent_num_bytes(leaf, extent);
429 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
430 /* Take upper bound, may be compressed */
431 datal = btrfs_file_extent_ram_bytes(leaf, extent);
435 * The first search might have left us at an extent item that
436 * ends before our target range's start, can happen if we have
437 * holes and NO_HOLES feature enabled.
439 * Subsequent searches may leave us on a file range we have
440 * processed before - this happens due to a race with ordered
441 * extent completion for a file range that is outside our source
442 * range, but that range was part of a file extent item that
443 * also covered a leading part of our source range.
445 if (key.offset + datal <= prev_extent_end) {
448 } else if (key.offset >= off + len) {
452 prev_extent_end = key.offset + datal;
453 size = btrfs_item_size(leaf, slot);
454 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
457 btrfs_release_path(path);
459 memcpy(&new_key, &key, sizeof(new_key));
460 new_key.objectid = btrfs_ino(BTRFS_I(inode));
461 if (off <= key.offset)
462 new_key.offset = key.offset + destoff - off;
464 new_key.offset = destoff;
467 * Deal with a hole that doesn't have an extent item that
468 * represents it (NO_HOLES feature enabled).
469 * This hole is either in the middle of the cloning range or at
470 * the beginning (fully overlaps it or partially overlaps it).
472 if (new_key.offset != last_dest_end)
473 drop_start = last_dest_end;
475 drop_start = new_key.offset;
477 if (type == BTRFS_FILE_EXTENT_REG ||
478 type == BTRFS_FILE_EXTENT_PREALLOC) {
479 struct btrfs_replace_extent_info clone_info;
482 * a | --- range to clone ---| b
483 * | ------------- extent ------------- |
486 /* Subtract range b */
487 if (key.offset + datal > off + len)
488 datal = off + len - key.offset;
490 /* Subtract range a */
491 if (off > key.offset) {
492 datao += off - key.offset;
493 datal -= off - key.offset;
496 clone_info.disk_offset = disko;
497 clone_info.disk_len = diskl;
498 clone_info.data_offset = datao;
499 clone_info.data_len = datal;
500 clone_info.file_offset = new_key.offset;
501 clone_info.extent_buf = buf;
502 clone_info.is_new_extent = false;
503 clone_info.update_times = !no_time_update;
504 ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
505 drop_start, new_key.offset + datal - 1,
506 &clone_info, &trans);
510 ASSERT(type == BTRFS_FILE_EXTENT_INLINE);
512 * Inline extents always have to start at file offset 0
513 * and can never be bigger then the sector size. We can
514 * never clone only parts of an inline extent, since all
515 * reflink operations must start at a sector size aligned
516 * offset, and the length must be aligned too or end at
517 * the i_size (which implies the whole inlined data).
519 ASSERT(key.offset == 0);
520 ASSERT(datal <= fs_info->sectorsize);
521 if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) ||
522 WARN_ON(key.offset != 0) ||
523 WARN_ON(datal > fs_info->sectorsize)) {
528 ret = clone_copy_inline_extent(inode, path, &new_key,
529 drop_start, datal, size,
535 btrfs_release_path(path);
538 * Whenever we share an extent we update the last_reflink_trans
539 * of each inode to the current transaction. This is needed to
540 * make sure fsync does not log multiple checksum items with
541 * overlapping ranges (because some extent items might refer
542 * only to sections of the original extent). For the destination
543 * inode we do this regardless of the generation of the extents
544 * or even if they are inline extents or explicit holes, to make
545 * sure a full fsync does not skip them. For the source inode,
546 * we only need to update last_reflink_trans in case it's a new
547 * extent that is not a hole or an inline extent, to deal with
548 * the checksums problem on fsync.
550 if (extent_gen == trans->transid && disko > 0)
551 BTRFS_I(src)->last_reflink_trans = trans->transid;
553 BTRFS_I(inode)->last_reflink_trans = trans->transid;
555 last_dest_end = ALIGN(new_key.offset + datal,
556 fs_info->sectorsize);
557 ret = clone_finish_inode_update(trans, inode, last_dest_end,
558 destoff, olen, no_time_update);
561 if (new_key.offset + datal >= destoff + len)
564 btrfs_release_path(path);
565 key.offset = prev_extent_end;
567 if (fatal_signal_pending(current)) {
576 if (last_dest_end < destoff + len) {
578 * We have an implicit hole that fully or partially overlaps our
579 * cloning range at its end. This means that we either have the
580 * NO_HOLES feature enabled or the implicit hole happened due to
581 * mixing buffered and direct IO writes against this file.
583 btrfs_release_path(path);
586 * When using NO_HOLES and we are cloning a range that covers
587 * only a hole (no extents) into a range beyond the current
588 * i_size, punching a hole in the target range will not create
589 * an extent map defining a hole, because the range starts at or
590 * beyond current i_size. If the file previously had an i_size
591 * greater than the new i_size set by this clone operation, we
592 * need to make sure the next fsync is a full fsync, so that it
593 * detects and logs a hole covering a range from the current
594 * i_size to the new i_size. If the clone range covers extents,
595 * besides a hole, then we know the full sync flag was already
596 * set by previous calls to btrfs_replace_file_extents() that
597 * replaced file extent items.
599 if (last_dest_end >= i_size_read(inode))
600 btrfs_set_inode_full_sync(BTRFS_I(inode));
602 ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
603 last_dest_end, destoff + len - 1, NULL, &trans);
607 ret = clone_finish_inode_update(trans, inode, destoff + len,
608 destoff, olen, no_time_update);
612 btrfs_free_path(path);
614 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
619 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
620 struct inode *inode2, u64 loff2, u64 len)
622 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL);
623 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL);
626 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
627 struct inode *inode2, u64 loff2, u64 len)
629 u64 range1_end = loff1 + len - 1;
630 u64 range2_end = loff2 + len - 1;
632 if (inode1 < inode2) {
633 swap(inode1, inode2);
635 swap(range1_end, range2_end);
636 } else if (inode1 == inode2 && loff2 < loff1) {
638 swap(range1_end, range2_end);
641 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL);
642 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL);
644 btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end);
645 btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end);
648 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
651 swap(inode1, inode2);
652 down_write(&BTRFS_I(inode1)->i_mmap_lock);
653 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING);
656 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
658 up_write(&BTRFS_I(inode1)->i_mmap_lock);
659 up_write(&BTRFS_I(inode2)->i_mmap_lock);
662 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
663 struct inode *dst, u64 dst_loff)
665 struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
666 const u64 bs = fs_info->sb->s_blocksize;
670 * Lock destination range to serialize with concurrent readahead() and
671 * source range to serialize with relocation.
673 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
674 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
675 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
677 btrfs_btree_balance_dirty(fs_info);
682 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
683 struct inode *dst, u64 dst_loff)
686 u64 i, tail_len, chunk_count;
687 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
689 spin_lock(&root_dst->root_item_lock);
690 if (root_dst->send_in_progress) {
691 btrfs_warn_rl(root_dst->fs_info,
692 "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
693 root_dst->root_key.objectid,
694 root_dst->send_in_progress);
695 spin_unlock(&root_dst->root_item_lock);
698 root_dst->dedupe_in_progress++;
699 spin_unlock(&root_dst->root_item_lock);
701 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
702 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
704 for (i = 0; i < chunk_count; i++) {
705 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
710 loff += BTRFS_MAX_DEDUPE_LEN;
711 dst_loff += BTRFS_MAX_DEDUPE_LEN;
715 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
717 spin_lock(&root_dst->root_item_lock);
718 root_dst->dedupe_in_progress--;
719 spin_unlock(&root_dst->root_item_lock);
724 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
725 u64 off, u64 olen, u64 destoff)
727 struct inode *inode = file_inode(file);
728 struct inode *src = file_inode(file_src);
729 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
733 u64 bs = fs_info->sb->s_blocksize;
736 * VFS's generic_remap_file_range_prep() protects us from cloning the
737 * eof block into the middle of a file, which would result in corruption
738 * if the file size is not blocksize aligned. So we don't need to check
739 * for that case here.
741 if (off + len == src->i_size)
742 len = ALIGN(src->i_size, bs) - off;
744 if (destoff > inode->i_size) {
745 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
747 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
751 * We may have truncated the last block if the inode's size is
752 * not sector size aligned, so we need to wait for writeback to
753 * complete before proceeding further, otherwise we can race
754 * with cloning and attempt to increment a reference to an
755 * extent that no longer exists (writeback completed right after
756 * we found the previous extent covering eof and before we
757 * attempted to increment its reference count).
759 ret = btrfs_wait_ordered_range(inode, wb_start,
766 * Lock destination range to serialize with concurrent readahead() and
767 * source range to serialize with relocation.
769 btrfs_double_extent_lock(src, off, inode, destoff, len);
770 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
771 btrfs_double_extent_unlock(src, off, inode, destoff, len);
774 * We may have copied an inline extent into a page of the destination
775 * range, so wait for writeback to complete before truncating pages
776 * from the page cache. This is a rare case.
778 wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
779 ret = ret ? ret : wb_ret;
781 * Truncate page cache pages so that future reads will see the cloned
782 * data immediately and not the previous data.
784 truncate_inode_pages_range(&inode->i_data,
785 round_down(destoff, PAGE_SIZE),
786 round_up(destoff + len, PAGE_SIZE) - 1);
788 btrfs_btree_balance_dirty(fs_info);
793 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
794 struct file *file_out, loff_t pos_out,
795 loff_t *len, unsigned int remap_flags)
797 struct inode *inode_in = file_inode(file_in);
798 struct inode *inode_out = file_inode(file_out);
799 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
803 if (!(remap_flags & REMAP_FILE_DEDUP)) {
804 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
806 if (btrfs_root_readonly(root_out))
809 ASSERT(inode_in->i_sb == inode_out->i_sb);
812 /* Don't make the dst file partly checksummed */
813 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
814 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
819 * Now that the inodes are locked, we need to start writeback ourselves
820 * and can not rely on the writeback from the VFS's generic helper
821 * generic_remap_file_range_prep() because:
823 * 1) For compression we must call filemap_fdatawrite_range() range
824 * twice (btrfs_fdatawrite_range() does it for us), and the generic
825 * helper only calls it once;
827 * 2) filemap_fdatawrite_range(), called by the generic helper only
828 * waits for the writeback to complete, i.e. for IO to be done, and
829 * not for the ordered extents to complete. We need to wait for them
830 * to complete so that new file extent items are in the fs tree.
832 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
833 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
835 wb_len = ALIGN(*len, bs);
838 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
840 * Btrfs' back references do not have a block level granularity, they
841 * work at the whole extent level.
842 * NOCOW buffered write without data space reserved may not be able
843 * to fall back to CoW due to lack of data space, thus could cause
846 * Here we take a shortcut by flushing the whole inode, so that all
847 * nocow write should reach disk as nocow before we increase the
848 * reference of the extent. We could do better by only flushing NOCOW
849 * data, but that needs extra accounting.
851 * Also we don't need to check ASYNC_EXTENT, as async extent will be
852 * CoWed anyway, not affecting nocow part.
854 ret = filemap_flush(inode_in->i_mapping);
858 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
862 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
867 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
871 static bool file_sync_write(const struct file *file)
873 if (file->f_flags & (__O_SYNC | O_DSYNC))
875 if (IS_SYNC(file_inode(file)))
881 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
882 struct file *dst_file, loff_t destoff, loff_t len,
883 unsigned int remap_flags)
885 struct inode *src_inode = file_inode(src_file);
886 struct inode *dst_inode = file_inode(dst_file);
887 bool same_inode = dst_inode == src_inode;
890 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
894 btrfs_inode_lock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP);
896 lock_two_nondirectories(src_inode, dst_inode);
897 btrfs_double_mmap_lock(src_inode, dst_inode);
900 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
902 if (ret < 0 || len == 0)
905 if (remap_flags & REMAP_FILE_DEDUP)
906 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
908 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
912 btrfs_inode_unlock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP);
914 btrfs_double_mmap_unlock(src_inode, dst_inode);
915 unlock_two_nondirectories(src_inode, dst_inode);
919 * If either the source or the destination file was opened with O_SYNC,
920 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and
921 * source files/ranges, so that after a successful return (0) followed
922 * by a power failure results in the reflinked data to be readable from
925 if (ret == 0 && len > 0 &&
926 (file_sync_write(src_file) || file_sync_write(dst_file))) {
927 ret = btrfs_sync_file(src_file, off, off + len - 1, 0);
929 ret = btrfs_sync_file(dst_file, destoff,
930 destoff + len - 1, 0);
933 return ret < 0 ? ret : len;