1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/slab.h>
3 #include <linux/stat.h>
4 #include <linux/sched/xacct.h>
5 #include <linux/fcntl.h>
6 #include <linux/file.h>
8 #include <linux/fsnotify.h>
9 #include <linux/security.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/pagemap.h>
13 #include <linux/splice.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
23 * Performs necessary checks before doing a clone.
25 * Can adjust amount of bytes to clone via @req_count argument.
26 * Returns appropriate error code that caller should return or
27 * zero in case the clone should be allowed.
29 static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 struct file *file_out, loff_t pos_out,
31 loff_t *req_count, unsigned int remap_flags)
33 struct inode *inode_in = file_in->f_mapping->host;
34 struct inode *inode_out = file_out->f_mapping->host;
35 uint64_t count = *req_count;
37 loff_t size_in, size_out;
38 loff_t bs = inode_out->i_sb->s_blocksize;
41 /* The start of both ranges must be aligned to an fs block. */
42 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
45 /* Ensure offsets don't wrap. */
46 if (pos_in + count < pos_in || pos_out + count < pos_out)
49 size_in = i_size_read(inode_in);
50 size_out = i_size_read(inode_out);
52 /* Dedupe requires both ranges to be within EOF. */
53 if ((remap_flags & REMAP_FILE_DEDUP) &&
54 (pos_in >= size_in || pos_in + count > size_in ||
55 pos_out >= size_out || pos_out + count > size_out))
58 /* Ensure the infile range is within the infile. */
59 if (pos_in >= size_in)
61 count = min(count, size_in - (uint64_t)pos_in);
63 ret = generic_write_check_limits(file_out, pos_out, &count);
68 * If the user wanted us to link to the infile's EOF, round up to the
69 * next block boundary for this check.
71 * Otherwise, make sure the count is also block-aligned, having
72 * already confirmed the starting offsets' block alignment.
74 if (pos_in + count == size_in) {
75 bcount = ALIGN(size_in, bs) - pos_in;
77 if (!IS_ALIGNED(count, bs))
78 count = ALIGN_DOWN(count, bs);
82 /* Don't allow overlapped cloning within the same file. */
83 if (inode_in == inode_out &&
84 pos_out + bcount > pos_in &&
85 pos_out < pos_in + bcount)
89 * We shortened the request but the caller can't deal with that, so
90 * bounce the request back to userspace.
92 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
99 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
102 if (unlikely(pos < 0 || len < 0))
105 if (unlikely((loff_t) (pos + len) < 0))
108 return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
112 * Ensure that we don't remap a partial EOF block in the middle of something
113 * else. Assume that the offsets have already been checked for block
116 * For clone we only link a partial EOF block above or at the destination file's
117 * EOF. For deduplication we accept a partial EOF block only if it ends at the
118 * destination file's EOF (can not link it into the middle of a file).
120 * Shorten the request if possible.
122 static int generic_remap_check_len(struct inode *inode_in,
123 struct inode *inode_out,
126 unsigned int remap_flags)
128 u64 blkmask = i_blocksize(inode_in) - 1;
129 loff_t new_len = *len;
131 if ((*len & blkmask) == 0)
134 if (pos_out + *len < i_size_read(inode_out))
140 if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
145 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
148 /* Read a page's worth of file data into the page cache. */
149 static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
151 return read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
155 * Lock two folios, ensuring that we lock in offset order if the folios
156 * are from the same file.
158 static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
160 /* Always lock in order of increasing index. */
161 if (folio1->index > folio2->index)
162 swap(folio1, folio2);
165 if (folio1 != folio2)
169 /* Unlock two folios, being careful not to unlock the same folio twice. */
170 static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
172 folio_unlock(folio1);
173 if (folio1 != folio2)
174 folio_unlock(folio2);
178 * Compare extents of two files to see if they are the same.
179 * Caller must have locked both inodes to prevent write races.
181 static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff,
182 struct file *dest, loff_t dstoff,
183 loff_t len, bool *is_same)
189 struct folio *src_folio, *dst_folio;
190 void *src_addr, *dst_addr;
191 loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
192 PAGE_SIZE - offset_in_page(dstoff));
194 cmp_len = min(cmp_len, len);
198 src_folio = vfs_dedupe_get_folio(src, srcoff);
199 if (IS_ERR(src_folio)) {
200 error = PTR_ERR(src_folio);
203 dst_folio = vfs_dedupe_get_folio(dest, dstoff);
204 if (IS_ERR(dst_folio)) {
205 error = PTR_ERR(dst_folio);
206 folio_put(src_folio);
210 vfs_lock_two_folios(src_folio, dst_folio);
213 * Now that we've locked both folios, make sure they're still
214 * mapped to the file data we're interested in. If not,
215 * someone is invalidating pages on us and we lose.
217 if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
218 src_folio->mapping != src->f_mapping ||
219 dst_folio->mapping != dest->f_mapping) {
224 src_addr = kmap_local_folio(src_folio,
225 offset_in_folio(src_folio, srcoff));
226 dst_addr = kmap_local_folio(dst_folio,
227 offset_in_folio(dst_folio, dstoff));
229 flush_dcache_folio(src_folio);
230 flush_dcache_folio(dst_folio);
232 if (memcmp(src_addr, dst_addr, cmp_len))
235 kunmap_local(dst_addr);
236 kunmap_local(src_addr);
238 vfs_unlock_two_folios(src_folio, dst_folio);
239 folio_put(dst_folio);
240 folio_put(src_folio);
258 * Check that the two inodes are eligible for cloning, the ranges make
259 * sense, and then flush all dirty data. Caller must ensure that the
260 * inodes have been locked against any other modifications.
262 * If there's an error, then the usual negative error code is returned.
263 * Otherwise returns 0 with *len set to the request length.
265 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
266 struct file *file_out, loff_t pos_out,
267 loff_t *len, unsigned int remap_flags)
269 struct inode *inode_in = file_inode(file_in);
270 struct inode *inode_out = file_inode(file_out);
271 bool same_inode = (inode_in == inode_out);
274 /* Don't touch certain kinds of inodes */
275 if (IS_IMMUTABLE(inode_out))
278 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
281 /* Don't reflink dirs, pipes, sockets... */
282 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
284 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
287 /* Zero length dedupe exits immediately; reflink goes to EOF. */
289 loff_t isize = i_size_read(inode_in);
291 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
295 *len = isize - pos_in;
300 /* Check that we don't violate system file offset limits. */
301 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
306 /* Wait for the completion of any pending IOs on both files */
307 inode_dio_wait(inode_in);
309 inode_dio_wait(inode_out);
311 ret = filemap_write_and_wait_range(inode_in->i_mapping,
312 pos_in, pos_in + *len - 1);
316 ret = filemap_write_and_wait_range(inode_out->i_mapping,
317 pos_out, pos_out + *len - 1);
322 * Check that the extents are the same.
324 if (remap_flags & REMAP_FILE_DEDUP) {
325 bool is_same = false;
327 ret = vfs_dedupe_file_range_compare(file_in, pos_in,
328 file_out, pos_out, *len, &is_same);
335 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
340 /* If can't alter the file contents, we're done. */
341 if (!(remap_flags & REMAP_FILE_DEDUP))
342 ret = file_modified(file_out);
346 EXPORT_SYMBOL(generic_remap_file_range_prep);
348 loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
349 struct file *file_out, loff_t pos_out,
350 loff_t len, unsigned int remap_flags)
354 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
356 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
359 ret = generic_file_rw_checks(file_in, file_out);
363 if (!file_in->f_op->remap_file_range)
366 ret = remap_verify_area(file_in, pos_in, len, false);
370 ret = remap_verify_area(file_out, pos_out, len, true);
374 ret = file_in->f_op->remap_file_range(file_in, pos_in,
375 file_out, pos_out, len, remap_flags);
379 fsnotify_access(file_in);
380 fsnotify_modify(file_out);
383 EXPORT_SYMBOL(do_clone_file_range);
385 loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
386 struct file *file_out, loff_t pos_out,
387 loff_t len, unsigned int remap_flags)
391 file_start_write(file_out);
392 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
394 file_end_write(file_out);
398 EXPORT_SYMBOL(vfs_clone_file_range);
400 /* Check whether we are allowed to dedupe the destination file */
401 static bool allow_file_dedupe(struct file *file)
403 struct user_namespace *mnt_userns = file_mnt_user_ns(file);
404 struct inode *inode = file_inode(file);
406 if (capable(CAP_SYS_ADMIN))
408 if (file->f_mode & FMODE_WRITE)
410 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
412 if (!inode_permission(mnt_userns, inode, MAY_WRITE))
417 loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
418 struct file *dst_file, loff_t dst_pos,
419 loff_t len, unsigned int remap_flags)
423 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
424 REMAP_FILE_CAN_SHORTEN));
426 ret = mnt_want_write_file(dst_file);
431 * This is redundant if called from vfs_dedupe_file_range(), but other
432 * callers need it and it's not performance sesitive...
434 ret = remap_verify_area(src_file, src_pos, len, false);
438 ret = remap_verify_area(dst_file, dst_pos, len, true);
443 if (!allow_file_dedupe(dst_file))
447 if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb)
451 if (S_ISDIR(file_inode(dst_file)->i_mode))
455 if (!dst_file->f_op->remap_file_range)
463 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
464 dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
466 mnt_drop_write_file(dst_file);
470 EXPORT_SYMBOL(vfs_dedupe_file_range_one);
472 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
474 struct file_dedupe_range_info *info;
475 struct inode *src = file_inode(file);
480 u16 count = same->dest_count;
483 if (!(file->f_mode & FMODE_READ))
486 if (same->reserved1 || same->reserved2)
489 off = same->src_offset;
490 len = same->src_length;
492 if (S_ISDIR(src->i_mode))
495 if (!S_ISREG(src->i_mode))
498 if (!file->f_op->remap_file_range)
501 ret = remap_verify_area(file, off, len, false);
506 if (off + len > i_size_read(src))
509 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
510 len = min_t(u64, len, 1 << 30);
512 /* pre-format output fields to sane values */
513 for (i = 0; i < count; i++) {
514 same->info[i].bytes_deduped = 0ULL;
515 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
518 for (i = 0, info = same->info; i < count; i++, info++) {
519 struct fd dst_fd = fdget(info->dest_fd);
520 struct file *dst_file = dst_fd.file;
523 info->status = -EBADF;
527 if (info->reserved) {
528 info->status = -EINVAL;
532 deduped = vfs_dedupe_file_range_one(file, off, dst_file,
533 info->dest_offset, len,
534 REMAP_FILE_CAN_SHORTEN);
535 if (deduped == -EBADE)
536 info->status = FILE_DEDUPE_RANGE_DIFFERS;
537 else if (deduped < 0)
538 info->status = deduped;
540 info->bytes_deduped = len;
545 if (fatal_signal_pending(current))
550 EXPORT_SYMBOL(vfs_dedupe_file_range);