1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
31 #include "transaction.h"
32 #include "btrfs_inode.h"
33 #include "print-tree.h"
36 #include "inode-map.h"
38 #include "rcu-string.h"
40 #include "dev-replace.h"
45 #include "compression.h"
48 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
49 * structures are incorrect, as the timespec structure from userspace
50 * is 4 bytes too small. We define these alternatives here to teach
51 * the kernel about the 32-bit struct packing.
53 struct btrfs_ioctl_timespec_32 {
56 } __attribute__ ((__packed__));
58 struct btrfs_ioctl_received_subvol_args_32 {
59 char uuid[BTRFS_UUID_SIZE]; /* in */
60 __u64 stransid; /* in */
61 __u64 rtransid; /* out */
62 struct btrfs_ioctl_timespec_32 stime; /* in */
63 struct btrfs_ioctl_timespec_32 rtime; /* out */
65 __u64 reserved[16]; /* in */
66 } __attribute__ ((__packed__));
68 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
69 struct btrfs_ioctl_received_subvol_args_32)
72 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
73 struct btrfs_ioctl_send_args_32 {
74 __s64 send_fd; /* in */
75 __u64 clone_sources_count; /* in */
76 compat_uptr_t clone_sources; /* in */
77 __u64 parent_root; /* in */
79 __u64 reserved[4]; /* in */
80 } __attribute__ ((__packed__));
82 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
83 struct btrfs_ioctl_send_args_32)
86 static int btrfs_clone(struct inode *src, struct inode *inode,
87 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
90 /* Mask out flags that are inappropriate for the given type of inode. */
91 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
94 if (S_ISDIR(inode->i_mode))
96 else if (S_ISREG(inode->i_mode))
97 return flags & ~FS_DIRSYNC_FL;
99 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
103 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
106 static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
108 unsigned int iflags = 0;
110 if (flags & BTRFS_INODE_SYNC)
111 iflags |= FS_SYNC_FL;
112 if (flags & BTRFS_INODE_IMMUTABLE)
113 iflags |= FS_IMMUTABLE_FL;
114 if (flags & BTRFS_INODE_APPEND)
115 iflags |= FS_APPEND_FL;
116 if (flags & BTRFS_INODE_NODUMP)
117 iflags |= FS_NODUMP_FL;
118 if (flags & BTRFS_INODE_NOATIME)
119 iflags |= FS_NOATIME_FL;
120 if (flags & BTRFS_INODE_DIRSYNC)
121 iflags |= FS_DIRSYNC_FL;
122 if (flags & BTRFS_INODE_NODATACOW)
123 iflags |= FS_NOCOW_FL;
125 if (flags & BTRFS_INODE_NOCOMPRESS)
126 iflags |= FS_NOCOMP_FL;
127 else if (flags & BTRFS_INODE_COMPRESS)
128 iflags |= FS_COMPR_FL;
134 * Update inode->i_flags based on the btrfs internal flags.
136 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
138 struct btrfs_inode *binode = BTRFS_I(inode);
139 unsigned int new_fl = 0;
141 if (binode->flags & BTRFS_INODE_SYNC)
143 if (binode->flags & BTRFS_INODE_IMMUTABLE)
144 new_fl |= S_IMMUTABLE;
145 if (binode->flags & BTRFS_INODE_APPEND)
147 if (binode->flags & BTRFS_INODE_NOATIME)
149 if (binode->flags & BTRFS_INODE_DIRSYNC)
152 set_mask_bits(&inode->i_flags,
153 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
157 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
159 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
160 unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags);
162 if (copy_to_user(arg, &flags, sizeof(flags)))
167 /* Check if @flags are a supported and valid set of FS_*_FL flags */
168 static int check_fsflags(unsigned int flags)
170 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
171 FS_NOATIME_FL | FS_NODUMP_FL | \
172 FS_SYNC_FL | FS_DIRSYNC_FL | \
173 FS_NOCOMP_FL | FS_COMPR_FL |
177 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
183 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
185 struct inode *inode = file_inode(file);
186 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
187 struct btrfs_inode *binode = BTRFS_I(inode);
188 struct btrfs_root *root = binode->root;
189 struct btrfs_trans_handle *trans;
190 unsigned int fsflags, old_fsflags;
193 unsigned int old_i_flags;
196 if (!inode_owner_or_capable(inode))
199 if (btrfs_root_readonly(root))
202 if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
205 ret = check_fsflags(fsflags);
209 ret = mnt_want_write_file(file);
215 old_flags = binode->flags;
216 old_i_flags = inode->i_flags;
217 mode = inode->i_mode;
219 fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
220 old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
221 if ((fsflags ^ old_fsflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
222 if (!capable(CAP_LINUX_IMMUTABLE)) {
228 if (fsflags & FS_SYNC_FL)
229 binode->flags |= BTRFS_INODE_SYNC;
231 binode->flags &= ~BTRFS_INODE_SYNC;
232 if (fsflags & FS_IMMUTABLE_FL)
233 binode->flags |= BTRFS_INODE_IMMUTABLE;
235 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
236 if (fsflags & FS_APPEND_FL)
237 binode->flags |= BTRFS_INODE_APPEND;
239 binode->flags &= ~BTRFS_INODE_APPEND;
240 if (fsflags & FS_NODUMP_FL)
241 binode->flags |= BTRFS_INODE_NODUMP;
243 binode->flags &= ~BTRFS_INODE_NODUMP;
244 if (fsflags & FS_NOATIME_FL)
245 binode->flags |= BTRFS_INODE_NOATIME;
247 binode->flags &= ~BTRFS_INODE_NOATIME;
248 if (fsflags & FS_DIRSYNC_FL)
249 binode->flags |= BTRFS_INODE_DIRSYNC;
251 binode->flags &= ~BTRFS_INODE_DIRSYNC;
252 if (fsflags & FS_NOCOW_FL) {
255 * It's safe to turn csums off here, no extents exist.
256 * Otherwise we want the flag to reflect the real COW
257 * status of the file and will not set it.
259 if (inode->i_size == 0)
260 binode->flags |= BTRFS_INODE_NODATACOW
261 | BTRFS_INODE_NODATASUM;
263 binode->flags |= BTRFS_INODE_NODATACOW;
267 * Revert back under same assumptions as above
270 if (inode->i_size == 0)
271 binode->flags &= ~(BTRFS_INODE_NODATACOW
272 | BTRFS_INODE_NODATASUM);
274 binode->flags &= ~BTRFS_INODE_NODATACOW;
279 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
280 * flag may be changed automatically if compression code won't make
283 if (fsflags & FS_NOCOMP_FL) {
284 binode->flags &= ~BTRFS_INODE_COMPRESS;
285 binode->flags |= BTRFS_INODE_NOCOMPRESS;
287 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
288 if (ret && ret != -ENODATA)
290 } else if (fsflags & FS_COMPR_FL) {
293 binode->flags |= BTRFS_INODE_COMPRESS;
294 binode->flags &= ~BTRFS_INODE_NOCOMPRESS;
296 comp = btrfs_compress_type2str(fs_info->compress_type);
297 if (!comp || comp[0] == 0)
298 comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
300 ret = btrfs_set_prop(inode, "btrfs.compression",
301 comp, strlen(comp), 0);
306 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
307 if (ret && ret != -ENODATA)
309 binode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
312 trans = btrfs_start_transaction(root, 1);
314 ret = PTR_ERR(trans);
318 btrfs_sync_inode_flags_to_i_flags(inode);
319 inode_inc_iversion(inode);
320 inode->i_ctime = current_time(inode);
321 ret = btrfs_update_inode(trans, root, inode);
323 btrfs_end_transaction(trans);
326 binode->flags = old_flags;
327 inode->i_flags = old_i_flags;
332 mnt_drop_write_file(file);
337 * Translate btrfs internal inode flags to xflags as expected by the
338 * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are
341 static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags)
343 unsigned int xflags = 0;
345 if (flags & BTRFS_INODE_APPEND)
346 xflags |= FS_XFLAG_APPEND;
347 if (flags & BTRFS_INODE_IMMUTABLE)
348 xflags |= FS_XFLAG_IMMUTABLE;
349 if (flags & BTRFS_INODE_NOATIME)
350 xflags |= FS_XFLAG_NOATIME;
351 if (flags & BTRFS_INODE_NODUMP)
352 xflags |= FS_XFLAG_NODUMP;
353 if (flags & BTRFS_INODE_SYNC)
354 xflags |= FS_XFLAG_SYNC;
359 /* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */
360 static int check_xflags(unsigned int flags)
362 if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
363 FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
369 * Set the xflags from the internal inode flags. The remaining items of fsxattr
372 static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg)
374 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
377 memset(&fa, 0, sizeof(fa));
378 fa.fsx_xflags = btrfs_inode_flags_to_xflags(binode->flags);
380 if (copy_to_user(arg, &fa, sizeof(fa)))
386 static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
388 struct inode *inode = file_inode(file);
389 struct btrfs_inode *binode = BTRFS_I(inode);
390 struct btrfs_root *root = binode->root;
391 struct btrfs_trans_handle *trans;
394 unsigned old_i_flags;
397 if (!inode_owner_or_capable(inode))
400 if (btrfs_root_readonly(root))
403 memset(&fa, 0, sizeof(fa));
404 if (copy_from_user(&fa, arg, sizeof(fa)))
407 ret = check_xflags(fa.fsx_xflags);
411 if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0)
414 ret = mnt_want_write_file(file);
420 old_flags = binode->flags;
421 old_i_flags = inode->i_flags;
423 /* We need the capabilities to change append-only or immutable inode */
424 if (((old_flags & (BTRFS_INODE_APPEND | BTRFS_INODE_IMMUTABLE)) ||
425 (fa.fsx_xflags & (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE))) &&
426 !capable(CAP_LINUX_IMMUTABLE)) {
431 if (fa.fsx_xflags & FS_XFLAG_SYNC)
432 binode->flags |= BTRFS_INODE_SYNC;
434 binode->flags &= ~BTRFS_INODE_SYNC;
435 if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE)
436 binode->flags |= BTRFS_INODE_IMMUTABLE;
438 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
439 if (fa.fsx_xflags & FS_XFLAG_APPEND)
440 binode->flags |= BTRFS_INODE_APPEND;
442 binode->flags &= ~BTRFS_INODE_APPEND;
443 if (fa.fsx_xflags & FS_XFLAG_NODUMP)
444 binode->flags |= BTRFS_INODE_NODUMP;
446 binode->flags &= ~BTRFS_INODE_NODUMP;
447 if (fa.fsx_xflags & FS_XFLAG_NOATIME)
448 binode->flags |= BTRFS_INODE_NOATIME;
450 binode->flags &= ~BTRFS_INODE_NOATIME;
452 /* 1 item for the inode */
453 trans = btrfs_start_transaction(root, 1);
455 ret = PTR_ERR(trans);
459 btrfs_sync_inode_flags_to_i_flags(inode);
460 inode_inc_iversion(inode);
461 inode->i_ctime = current_time(inode);
462 ret = btrfs_update_inode(trans, root, inode);
464 btrfs_end_transaction(trans);
468 binode->flags = old_flags;
469 inode->i_flags = old_i_flags;
473 mnt_drop_write_file(file);
478 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
480 struct inode *inode = file_inode(file);
482 return put_user(inode->i_generation, arg);
485 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
487 struct inode *inode = file_inode(file);
488 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
489 struct btrfs_device *device;
490 struct request_queue *q;
491 struct fstrim_range range;
492 u64 minlen = ULLONG_MAX;
494 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
497 if (!capable(CAP_SYS_ADMIN))
501 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
505 q = bdev_get_queue(device->bdev);
506 if (blk_queue_discard(q)) {
508 minlen = min_t(u64, q->limits.discard_granularity,
516 if (copy_from_user(&range, arg, sizeof(range)))
518 if (range.start > total_bytes ||
519 range.len < fs_info->sb->s_blocksize)
522 range.len = min(range.len, total_bytes - range.start);
523 range.minlen = max(range.minlen, minlen);
524 ret = btrfs_trim_fs(fs_info, &range);
528 if (copy_to_user(arg, &range, sizeof(range)))
534 int btrfs_is_empty_uuid(u8 *uuid)
538 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
545 static noinline int create_subvol(struct inode *dir,
546 struct dentry *dentry,
547 const char *name, int namelen,
549 struct btrfs_qgroup_inherit *inherit)
551 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
552 struct btrfs_trans_handle *trans;
553 struct btrfs_key key;
554 struct btrfs_root_item *root_item;
555 struct btrfs_inode_item *inode_item;
556 struct extent_buffer *leaf;
557 struct btrfs_root *root = BTRFS_I(dir)->root;
558 struct btrfs_root *new_root;
559 struct btrfs_block_rsv block_rsv;
560 struct timespec64 cur_time = current_time(dir);
565 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
569 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
573 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
578 * Don't create subvolume whose level is not zero. Or qgroup will be
579 * screwed up since it assumes subvolume qgroup's level to be 0.
581 if (btrfs_qgroup_level(objectid)) {
586 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
588 * The same as the snapshot creation, please see the comment
589 * of create_snapshot().
591 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
595 trans = btrfs_start_transaction(root, 0);
597 ret = PTR_ERR(trans);
598 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
601 trans->block_rsv = &block_rsv;
602 trans->bytes_reserved = block_rsv.size;
604 ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
608 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
614 btrfs_mark_buffer_dirty(leaf);
616 inode_item = &root_item->inode;
617 btrfs_set_stack_inode_generation(inode_item, 1);
618 btrfs_set_stack_inode_size(inode_item, 3);
619 btrfs_set_stack_inode_nlink(inode_item, 1);
620 btrfs_set_stack_inode_nbytes(inode_item,
622 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
624 btrfs_set_root_flags(root_item, 0);
625 btrfs_set_root_limit(root_item, 0);
626 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
628 btrfs_set_root_bytenr(root_item, leaf->start);
629 btrfs_set_root_generation(root_item, trans->transid);
630 btrfs_set_root_level(root_item, 0);
631 btrfs_set_root_refs(root_item, 1);
632 btrfs_set_root_used(root_item, leaf->len);
633 btrfs_set_root_last_snapshot(root_item, 0);
635 btrfs_set_root_generation_v2(root_item,
636 btrfs_root_generation(root_item));
637 uuid_le_gen(&new_uuid);
638 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
639 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
640 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
641 root_item->ctime = root_item->otime;
642 btrfs_set_root_ctransid(root_item, trans->transid);
643 btrfs_set_root_otransid(root_item, trans->transid);
645 btrfs_tree_unlock(leaf);
646 free_extent_buffer(leaf);
649 btrfs_set_root_dirid(root_item, new_dirid);
651 key.objectid = objectid;
653 key.type = BTRFS_ROOT_ITEM_KEY;
654 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
659 key.offset = (u64)-1;
660 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
661 if (IS_ERR(new_root)) {
662 ret = PTR_ERR(new_root);
663 btrfs_abort_transaction(trans, ret);
667 btrfs_record_root_in_trans(trans, new_root);
669 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
671 /* We potentially lose an unused inode item here */
672 btrfs_abort_transaction(trans, ret);
676 mutex_lock(&new_root->objectid_mutex);
677 new_root->highest_objectid = new_dirid;
678 mutex_unlock(&new_root->objectid_mutex);
681 * insert the directory item
683 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
685 btrfs_abort_transaction(trans, ret);
689 ret = btrfs_insert_dir_item(trans, root,
690 name, namelen, BTRFS_I(dir), &key,
691 BTRFS_FT_DIR, index);
693 btrfs_abort_transaction(trans, ret);
697 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
698 ret = btrfs_update_inode(trans, root, dir);
701 ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
702 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
705 ret = btrfs_uuid_tree_add(trans, root_item->uuid,
706 BTRFS_UUID_KEY_SUBVOL, objectid);
708 btrfs_abort_transaction(trans, ret);
712 trans->block_rsv = NULL;
713 trans->bytes_reserved = 0;
714 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
717 *async_transid = trans->transid;
718 err = btrfs_commit_transaction_async(trans, 1);
720 err = btrfs_commit_transaction(trans);
722 err = btrfs_commit_transaction(trans);
728 inode = btrfs_lookup_dentry(dir, dentry);
730 return PTR_ERR(inode);
731 d_instantiate(dentry, inode);
740 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
741 struct dentry *dentry,
742 u64 *async_transid, bool readonly,
743 struct btrfs_qgroup_inherit *inherit)
745 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
747 struct btrfs_pending_snapshot *pending_snapshot;
748 struct btrfs_trans_handle *trans;
750 bool snapshot_force_cow = false;
752 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
755 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
756 if (!pending_snapshot)
759 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
761 pending_snapshot->path = btrfs_alloc_path();
762 if (!pending_snapshot->root_item || !pending_snapshot->path) {
768 * Force new buffered writes to reserve space even when NOCOW is
769 * possible. This is to avoid later writeback (running dealloc) to
770 * fallback to COW mode and unexpectedly fail with ENOSPC.
772 atomic_inc(&root->will_be_snapshotted);
773 smp_mb__after_atomic();
774 /* wait for no snapshot writes */
775 wait_event(root->subv_writers->wait,
776 percpu_counter_sum(&root->subv_writers->counter) == 0);
778 ret = btrfs_start_delalloc_inodes(root);
783 * All previous writes have started writeback in NOCOW mode, so now
784 * we force future writes to fallback to COW mode during snapshot
787 atomic_inc(&root->snapshot_force_cow);
788 snapshot_force_cow = true;
790 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
792 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
793 BTRFS_BLOCK_RSV_TEMP);
795 * 1 - parent dir inode
798 * 2 - root ref/backref
799 * 1 - root of snapshot
802 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
803 &pending_snapshot->block_rsv, 8,
808 pending_snapshot->dentry = dentry;
809 pending_snapshot->root = root;
810 pending_snapshot->readonly = readonly;
811 pending_snapshot->dir = dir;
812 pending_snapshot->inherit = inherit;
814 trans = btrfs_start_transaction(root, 0);
816 ret = PTR_ERR(trans);
820 spin_lock(&fs_info->trans_lock);
821 list_add(&pending_snapshot->list,
822 &trans->transaction->pending_snapshots);
823 spin_unlock(&fs_info->trans_lock);
825 *async_transid = trans->transid;
826 ret = btrfs_commit_transaction_async(trans, 1);
828 ret = btrfs_commit_transaction(trans);
830 ret = btrfs_commit_transaction(trans);
835 ret = pending_snapshot->error;
839 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
843 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
845 ret = PTR_ERR(inode);
849 d_instantiate(dentry, inode);
852 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
854 if (snapshot_force_cow)
855 atomic_dec(&root->snapshot_force_cow);
856 if (atomic_dec_and_test(&root->will_be_snapshotted))
857 wake_up_var(&root->will_be_snapshotted);
859 kfree(pending_snapshot->root_item);
860 btrfs_free_path(pending_snapshot->path);
861 kfree(pending_snapshot);
866 /* copy of may_delete in fs/namei.c()
867 * Check whether we can remove a link victim from directory dir, check
868 * whether the type of victim is right.
869 * 1. We can't do it if dir is read-only (done in permission())
870 * 2. We should have write and exec permissions on dir
871 * 3. We can't remove anything from append-only dir
872 * 4. We can't do anything with immutable dir (done in permission())
873 * 5. If the sticky bit on dir is set we should either
874 * a. be owner of dir, or
875 * b. be owner of victim, or
876 * c. have CAP_FOWNER capability
877 * 6. If the victim is append-only or immutable we can't do anything with
878 * links pointing to it.
879 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
880 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
881 * 9. We can't remove a root or mountpoint.
882 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
883 * nfs_async_unlink().
886 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
890 if (d_really_is_negative(victim))
893 BUG_ON(d_inode(victim->d_parent) != dir);
894 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
896 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
901 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
902 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
905 if (!d_is_dir(victim))
909 } else if (d_is_dir(victim))
913 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
918 /* copy of may_create in fs/namei.c() */
919 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
921 if (d_really_is_positive(child))
925 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
929 * Create a new subvolume below @parent. This is largely modeled after
930 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
931 * inside this filesystem so it's quite a bit simpler.
933 static noinline int btrfs_mksubvol(const struct path *parent,
934 const char *name, int namelen,
935 struct btrfs_root *snap_src,
936 u64 *async_transid, bool readonly,
937 struct btrfs_qgroup_inherit *inherit)
939 struct inode *dir = d_inode(parent->dentry);
940 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
941 struct dentry *dentry;
944 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
948 dentry = lookup_one_len(name, parent->dentry, namelen);
949 error = PTR_ERR(dentry);
953 error = btrfs_may_create(dir, dentry);
958 * even if this name doesn't exist, we may get hash collisions.
959 * check for them now when we can safely fail
961 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
967 down_read(&fs_info->subvol_sem);
969 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
973 error = create_snapshot(snap_src, dir, dentry,
974 async_transid, readonly, inherit);
976 error = create_subvol(dir, dentry, name, namelen,
977 async_transid, inherit);
980 fsnotify_mkdir(dir, dentry);
982 up_read(&fs_info->subvol_sem);
991 * When we're defragging a range, we don't want to kick it off again
992 * if it is really just waiting for delalloc to send it down.
993 * If we find a nice big extent or delalloc range for the bytes in the
994 * file you want to defrag, we return 0 to let you know to skip this
997 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
999 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1000 struct extent_map *em = NULL;
1001 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1004 read_lock(&em_tree->lock);
1005 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
1006 read_unlock(&em_tree->lock);
1009 end = extent_map_end(em);
1010 free_extent_map(em);
1011 if (end - offset > thresh)
1014 /* if we already have a nice delalloc here, just stop */
1016 end = count_range_bits(io_tree, &offset, offset + thresh,
1017 thresh, EXTENT_DELALLOC, 1);
1024 * helper function to walk through a file and find extents
1025 * newer than a specific transid, and smaller than thresh.
1027 * This is used by the defragging code to find new and small
1030 static int find_new_extents(struct btrfs_root *root,
1031 struct inode *inode, u64 newer_than,
1032 u64 *off, u32 thresh)
1034 struct btrfs_path *path;
1035 struct btrfs_key min_key;
1036 struct extent_buffer *leaf;
1037 struct btrfs_file_extent_item *extent;
1040 u64 ino = btrfs_ino(BTRFS_I(inode));
1042 path = btrfs_alloc_path();
1046 min_key.objectid = ino;
1047 min_key.type = BTRFS_EXTENT_DATA_KEY;
1048 min_key.offset = *off;
1051 ret = btrfs_search_forward(root, &min_key, path, newer_than);
1055 if (min_key.objectid != ino)
1057 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
1060 leaf = path->nodes[0];
1061 extent = btrfs_item_ptr(leaf, path->slots[0],
1062 struct btrfs_file_extent_item);
1064 type = btrfs_file_extent_type(leaf, extent);
1065 if (type == BTRFS_FILE_EXTENT_REG &&
1066 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
1067 check_defrag_in_cache(inode, min_key.offset, thresh)) {
1068 *off = min_key.offset;
1069 btrfs_free_path(path);
1074 if (path->slots[0] < btrfs_header_nritems(leaf)) {
1075 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
1079 if (min_key.offset == (u64)-1)
1083 btrfs_release_path(path);
1086 btrfs_free_path(path);
1090 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1092 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1093 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1094 struct extent_map *em;
1095 u64 len = PAGE_SIZE;
1098 * hopefully we have this extent in the tree already, try without
1099 * the full extent lock
1101 read_lock(&em_tree->lock);
1102 em = lookup_extent_mapping(em_tree, start, len);
1103 read_unlock(&em_tree->lock);
1106 struct extent_state *cached = NULL;
1107 u64 end = start + len - 1;
1109 /* get the big lock and read metadata off disk */
1110 lock_extent_bits(io_tree, start, end, &cached);
1111 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1112 unlock_extent_cached(io_tree, start, end, &cached);
1121 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1123 struct extent_map *next;
1126 /* this is the last extent */
1127 if (em->start + em->len >= i_size_read(inode))
1130 next = defrag_lookup_extent(inode, em->start + em->len);
1131 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1133 else if ((em->block_start + em->block_len == next->block_start) &&
1134 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1137 free_extent_map(next);
1141 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1142 u64 *last_len, u64 *skip, u64 *defrag_end,
1145 struct extent_map *em;
1147 bool next_mergeable = true;
1148 bool prev_mergeable = true;
1151 * make sure that once we start defragging an extent, we keep on
1154 if (start < *defrag_end)
1159 em = defrag_lookup_extent(inode, start);
1163 /* this will cover holes, and inline extents */
1164 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1170 prev_mergeable = false;
1172 next_mergeable = defrag_check_next_extent(inode, em);
1174 * we hit a real extent, if it is big or the next extent is not a
1175 * real extent, don't bother defragging it
1177 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1178 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1182 * last_len ends up being a counter of how many bytes we've defragged.
1183 * every time we choose not to defrag an extent, we reset *last_len
1184 * so that the next tiny extent will force a defrag.
1186 * The end result of this is that tiny extents before a single big
1187 * extent will force at least part of that big extent to be defragged.
1190 *defrag_end = extent_map_end(em);
1193 *skip = extent_map_end(em);
1197 free_extent_map(em);
1202 * it doesn't do much good to defrag one or two pages
1203 * at a time. This pulls in a nice chunk of pages
1204 * to COW and defrag.
1206 * It also makes sure the delalloc code has enough
1207 * dirty data to avoid making new small extents as part
1210 * It's a good idea to start RA on this range
1211 * before calling this.
1213 static int cluster_pages_for_defrag(struct inode *inode,
1214 struct page **pages,
1215 unsigned long start_index,
1216 unsigned long num_pages)
1218 unsigned long file_end;
1219 u64 isize = i_size_read(inode);
1226 struct btrfs_ordered_extent *ordered;
1227 struct extent_state *cached_state = NULL;
1228 struct extent_io_tree *tree;
1229 struct extent_changeset *data_reserved = NULL;
1230 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1232 file_end = (isize - 1) >> PAGE_SHIFT;
1233 if (!isize || start_index > file_end)
1236 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1238 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1239 start_index << PAGE_SHIFT,
1240 page_cnt << PAGE_SHIFT);
1244 tree = &BTRFS_I(inode)->io_tree;
1246 /* step one, lock all the pages */
1247 for (i = 0; i < page_cnt; i++) {
1250 page = find_or_create_page(inode->i_mapping,
1251 start_index + i, mask);
1255 page_start = page_offset(page);
1256 page_end = page_start + PAGE_SIZE - 1;
1258 lock_extent_bits(tree, page_start, page_end,
1260 ordered = btrfs_lookup_ordered_extent(inode,
1262 unlock_extent_cached(tree, page_start, page_end,
1268 btrfs_start_ordered_extent(inode, ordered, 1);
1269 btrfs_put_ordered_extent(ordered);
1272 * we unlocked the page above, so we need check if
1273 * it was released or not.
1275 if (page->mapping != inode->i_mapping) {
1282 if (!PageUptodate(page)) {
1283 btrfs_readpage(NULL, page);
1285 if (!PageUptodate(page)) {
1293 if (page->mapping != inode->i_mapping) {
1305 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1309 * so now we have a nice long stream of locked
1310 * and up to date pages, lets wait on them
1312 for (i = 0; i < i_done; i++)
1313 wait_on_page_writeback(pages[i]);
1315 page_start = page_offset(pages[0]);
1316 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1318 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1319 page_start, page_end - 1, &cached_state);
1320 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1321 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1322 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1325 if (i_done != page_cnt) {
1326 spin_lock(&BTRFS_I(inode)->lock);
1327 BTRFS_I(inode)->outstanding_extents++;
1328 spin_unlock(&BTRFS_I(inode)->lock);
1329 btrfs_delalloc_release_space(inode, data_reserved,
1330 start_index << PAGE_SHIFT,
1331 (page_cnt - i_done) << PAGE_SHIFT, true);
1335 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1338 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1339 page_start, page_end - 1, &cached_state);
1341 for (i = 0; i < i_done; i++) {
1342 clear_page_dirty_for_io(pages[i]);
1343 ClearPageChecked(pages[i]);
1344 set_page_extent_mapped(pages[i]);
1345 set_page_dirty(pages[i]);
1346 unlock_page(pages[i]);
1349 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
1351 extent_changeset_free(data_reserved);
1354 for (i = 0; i < i_done; i++) {
1355 unlock_page(pages[i]);
1358 btrfs_delalloc_release_space(inode, data_reserved,
1359 start_index << PAGE_SHIFT,
1360 page_cnt << PAGE_SHIFT, true);
1361 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
1363 extent_changeset_free(data_reserved);
1368 int btrfs_defrag_file(struct inode *inode, struct file *file,
1369 struct btrfs_ioctl_defrag_range_args *range,
1370 u64 newer_than, unsigned long max_to_defrag)
1372 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1373 struct btrfs_root *root = BTRFS_I(inode)->root;
1374 struct file_ra_state *ra = NULL;
1375 unsigned long last_index;
1376 u64 isize = i_size_read(inode);
1380 u64 newer_off = range->start;
1382 unsigned long ra_index = 0;
1384 int defrag_count = 0;
1385 int compress_type = BTRFS_COMPRESS_ZLIB;
1386 u32 extent_thresh = range->extent_thresh;
1387 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1388 unsigned long cluster = max_cluster;
1389 u64 new_align = ~((u64)SZ_128K - 1);
1390 struct page **pages = NULL;
1391 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1396 if (range->start >= isize)
1400 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1402 if (range->compress_type)
1403 compress_type = range->compress_type;
1406 if (extent_thresh == 0)
1407 extent_thresh = SZ_256K;
1410 * If we were not given a file, allocate a readahead context. As
1411 * readahead is just an optimization, defrag will work without it so
1412 * we don't error out.
1415 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1417 file_ra_state_init(ra, inode->i_mapping);
1422 pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
1428 /* find the last page to defrag */
1429 if (range->start + range->len > range->start) {
1430 last_index = min_t(u64, isize - 1,
1431 range->start + range->len - 1) >> PAGE_SHIFT;
1433 last_index = (isize - 1) >> PAGE_SHIFT;
1437 ret = find_new_extents(root, inode, newer_than,
1438 &newer_off, SZ_64K);
1440 range->start = newer_off;
1442 * we always align our defrag to help keep
1443 * the extents in the file evenly spaced
1445 i = (newer_off & new_align) >> PAGE_SHIFT;
1449 i = range->start >> PAGE_SHIFT;
1452 max_to_defrag = last_index - i + 1;
1455 * make writeback starts from i, so the defrag range can be
1456 * written sequentially.
1458 if (i < inode->i_mapping->writeback_index)
1459 inode->i_mapping->writeback_index = i;
1461 while (i <= last_index && defrag_count < max_to_defrag &&
1462 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1464 * make sure we stop running if someone unmounts
1467 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1470 if (btrfs_defrag_cancelled(fs_info)) {
1471 btrfs_debug(fs_info, "defrag_file cancelled");
1476 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1477 extent_thresh, &last_len, &skip,
1478 &defrag_end, do_compress)){
1481 * the should_defrag function tells us how much to skip
1482 * bump our counter by the suggested amount
1484 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1485 i = max(i + 1, next);
1490 cluster = (PAGE_ALIGN(defrag_end) >>
1492 cluster = min(cluster, max_cluster);
1494 cluster = max_cluster;
1497 if (i + cluster > ra_index) {
1498 ra_index = max(i, ra_index);
1500 page_cache_sync_readahead(inode->i_mapping, ra,
1501 file, ra_index, cluster);
1502 ra_index += cluster;
1507 BTRFS_I(inode)->defrag_compress = compress_type;
1508 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1510 inode_unlock(inode);
1514 defrag_count += ret;
1515 balance_dirty_pages_ratelimited(inode->i_mapping);
1516 inode_unlock(inode);
1519 if (newer_off == (u64)-1)
1525 newer_off = max(newer_off + 1,
1526 (u64)i << PAGE_SHIFT);
1528 ret = find_new_extents(root, inode, newer_than,
1529 &newer_off, SZ_64K);
1531 range->start = newer_off;
1532 i = (newer_off & new_align) >> PAGE_SHIFT;
1539 last_len += ret << PAGE_SHIFT;
1547 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1548 filemap_flush(inode->i_mapping);
1549 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1550 &BTRFS_I(inode)->runtime_flags))
1551 filemap_flush(inode->i_mapping);
1554 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1555 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1556 } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
1557 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1565 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1566 inode_unlock(inode);
1574 static noinline int btrfs_ioctl_resize(struct file *file,
1577 struct inode *inode = file_inode(file);
1578 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1582 struct btrfs_root *root = BTRFS_I(inode)->root;
1583 struct btrfs_ioctl_vol_args *vol_args;
1584 struct btrfs_trans_handle *trans;
1585 struct btrfs_device *device = NULL;
1588 char *devstr = NULL;
1592 if (!capable(CAP_SYS_ADMIN))
1595 ret = mnt_want_write_file(file);
1599 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1600 mnt_drop_write_file(file);
1601 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1604 vol_args = memdup_user(arg, sizeof(*vol_args));
1605 if (IS_ERR(vol_args)) {
1606 ret = PTR_ERR(vol_args);
1610 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1612 sizestr = vol_args->name;
1613 devstr = strchr(sizestr, ':');
1615 sizestr = devstr + 1;
1617 devstr = vol_args->name;
1618 ret = kstrtoull(devstr, 10, &devid);
1625 btrfs_info(fs_info, "resizing devid %llu", devid);
1628 device = btrfs_find_device(fs_info, devid, NULL, NULL);
1630 btrfs_info(fs_info, "resizer unable to find device %llu",
1636 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1638 "resizer unable to apply on readonly device %llu",
1644 if (!strcmp(sizestr, "max"))
1645 new_size = device->bdev->bd_inode->i_size;
1647 if (sizestr[0] == '-') {
1650 } else if (sizestr[0] == '+') {
1654 new_size = memparse(sizestr, &retptr);
1655 if (*retptr != '\0' || new_size == 0) {
1661 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1666 old_size = btrfs_device_get_total_bytes(device);
1669 if (new_size > old_size) {
1673 new_size = old_size - new_size;
1674 } else if (mod > 0) {
1675 if (new_size > ULLONG_MAX - old_size) {
1679 new_size = old_size + new_size;
1682 if (new_size < SZ_256M) {
1686 if (new_size > device->bdev->bd_inode->i_size) {
1691 new_size = round_down(new_size, fs_info->sectorsize);
1693 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1694 rcu_str_deref(device->name), new_size);
1696 if (new_size > old_size) {
1697 trans = btrfs_start_transaction(root, 0);
1698 if (IS_ERR(trans)) {
1699 ret = PTR_ERR(trans);
1702 ret = btrfs_grow_device(trans, device, new_size);
1703 btrfs_commit_transaction(trans);
1704 } else if (new_size < old_size) {
1705 ret = btrfs_shrink_device(device, new_size);
1706 } /* equal, nothing need to do */
1711 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1712 mnt_drop_write_file(file);
1716 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1717 const char *name, unsigned long fd, int subvol,
1718 u64 *transid, bool readonly,
1719 struct btrfs_qgroup_inherit *inherit)
1724 if (!S_ISDIR(file_inode(file)->i_mode))
1727 ret = mnt_want_write_file(file);
1731 namelen = strlen(name);
1732 if (strchr(name, '/')) {
1734 goto out_drop_write;
1737 if (name[0] == '.' &&
1738 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1740 goto out_drop_write;
1744 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1745 NULL, transid, readonly, inherit);
1747 struct fd src = fdget(fd);
1748 struct inode *src_inode;
1751 goto out_drop_write;
1754 src_inode = file_inode(src.file);
1755 if (src_inode->i_sb != file_inode(file)->i_sb) {
1756 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1757 "Snapshot src from another FS");
1759 } else if (!inode_owner_or_capable(src_inode)) {
1761 * Subvolume creation is not restricted, but snapshots
1762 * are limited to own subvolumes only
1766 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1767 BTRFS_I(src_inode)->root,
1768 transid, readonly, inherit);
1773 mnt_drop_write_file(file);
1778 static noinline int btrfs_ioctl_snap_create(struct file *file,
1779 void __user *arg, int subvol)
1781 struct btrfs_ioctl_vol_args *vol_args;
1784 if (!S_ISDIR(file_inode(file)->i_mode))
1787 vol_args = memdup_user(arg, sizeof(*vol_args));
1788 if (IS_ERR(vol_args))
1789 return PTR_ERR(vol_args);
1790 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1792 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1793 vol_args->fd, subvol,
1800 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1801 void __user *arg, int subvol)
1803 struct btrfs_ioctl_vol_args_v2 *vol_args;
1807 bool readonly = false;
1808 struct btrfs_qgroup_inherit *inherit = NULL;
1810 if (!S_ISDIR(file_inode(file)->i_mode))
1813 vol_args = memdup_user(arg, sizeof(*vol_args));
1814 if (IS_ERR(vol_args))
1815 return PTR_ERR(vol_args);
1816 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1818 if (vol_args->flags &
1819 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1820 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1825 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1827 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1829 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1830 if (vol_args->size > PAGE_SIZE) {
1834 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1835 if (IS_ERR(inherit)) {
1836 ret = PTR_ERR(inherit);
1841 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1842 vol_args->fd, subvol, ptr,
1847 if (ptr && copy_to_user(arg +
1848 offsetof(struct btrfs_ioctl_vol_args_v2,
1860 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1863 struct inode *inode = file_inode(file);
1864 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1865 struct btrfs_root *root = BTRFS_I(inode)->root;
1869 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1872 down_read(&fs_info->subvol_sem);
1873 if (btrfs_root_readonly(root))
1874 flags |= BTRFS_SUBVOL_RDONLY;
1875 up_read(&fs_info->subvol_sem);
1877 if (copy_to_user(arg, &flags, sizeof(flags)))
1883 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1886 struct inode *inode = file_inode(file);
1887 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1888 struct btrfs_root *root = BTRFS_I(inode)->root;
1889 struct btrfs_trans_handle *trans;
1894 if (!inode_owner_or_capable(inode))
1897 ret = mnt_want_write_file(file);
1901 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1903 goto out_drop_write;
1906 if (copy_from_user(&flags, arg, sizeof(flags))) {
1908 goto out_drop_write;
1911 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1913 goto out_drop_write;
1916 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1918 goto out_drop_write;
1921 down_write(&fs_info->subvol_sem);
1924 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1927 root_flags = btrfs_root_flags(&root->root_item);
1928 if (flags & BTRFS_SUBVOL_RDONLY) {
1929 btrfs_set_root_flags(&root->root_item,
1930 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1933 * Block RO -> RW transition if this subvolume is involved in
1936 spin_lock(&root->root_item_lock);
1937 if (root->send_in_progress == 0) {
1938 btrfs_set_root_flags(&root->root_item,
1939 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1940 spin_unlock(&root->root_item_lock);
1942 spin_unlock(&root->root_item_lock);
1944 "Attempt to set subvolume %llu read-write during send",
1945 root->root_key.objectid);
1951 trans = btrfs_start_transaction(root, 1);
1952 if (IS_ERR(trans)) {
1953 ret = PTR_ERR(trans);
1957 ret = btrfs_update_root(trans, fs_info->tree_root,
1958 &root->root_key, &root->root_item);
1960 btrfs_end_transaction(trans);
1964 ret = btrfs_commit_transaction(trans);
1968 btrfs_set_root_flags(&root->root_item, root_flags);
1970 up_write(&fs_info->subvol_sem);
1972 mnt_drop_write_file(file);
1977 static noinline int key_in_sk(struct btrfs_key *key,
1978 struct btrfs_ioctl_search_key *sk)
1980 struct btrfs_key test;
1983 test.objectid = sk->min_objectid;
1984 test.type = sk->min_type;
1985 test.offset = sk->min_offset;
1987 ret = btrfs_comp_cpu_keys(key, &test);
1991 test.objectid = sk->max_objectid;
1992 test.type = sk->max_type;
1993 test.offset = sk->max_offset;
1995 ret = btrfs_comp_cpu_keys(key, &test);
2001 static noinline int copy_to_sk(struct btrfs_path *path,
2002 struct btrfs_key *key,
2003 struct btrfs_ioctl_search_key *sk,
2006 unsigned long *sk_offset,
2010 struct extent_buffer *leaf;
2011 struct btrfs_ioctl_search_header sh;
2012 struct btrfs_key test;
2013 unsigned long item_off;
2014 unsigned long item_len;
2020 leaf = path->nodes[0];
2021 slot = path->slots[0];
2022 nritems = btrfs_header_nritems(leaf);
2024 if (btrfs_header_generation(leaf) > sk->max_transid) {
2028 found_transid = btrfs_header_generation(leaf);
2030 for (i = slot; i < nritems; i++) {
2031 item_off = btrfs_item_ptr_offset(leaf, i);
2032 item_len = btrfs_item_size_nr(leaf, i);
2034 btrfs_item_key_to_cpu(leaf, key, i);
2035 if (!key_in_sk(key, sk))
2038 if (sizeof(sh) + item_len > *buf_size) {
2045 * return one empty item back for v1, which does not
2049 *buf_size = sizeof(sh) + item_len;
2054 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2059 sh.objectid = key->objectid;
2060 sh.offset = key->offset;
2061 sh.type = key->type;
2063 sh.transid = found_transid;
2065 /* copy search result header */
2066 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2071 *sk_offset += sizeof(sh);
2074 char __user *up = ubuf + *sk_offset;
2076 if (read_extent_buffer_to_user(leaf, up,
2077 item_off, item_len)) {
2082 *sk_offset += item_len;
2086 if (ret) /* -EOVERFLOW from above */
2089 if (*num_found >= sk->nr_items) {
2096 test.objectid = sk->max_objectid;
2097 test.type = sk->max_type;
2098 test.offset = sk->max_offset;
2099 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2101 else if (key->offset < (u64)-1)
2103 else if (key->type < (u8)-1) {
2106 } else if (key->objectid < (u64)-1) {
2114 * 0: all items from this leaf copied, continue with next
2115 * 1: * more items can be copied, but unused buffer is too small
2116 * * all items were found
2117 * Either way, it will stops the loop which iterates to the next
2119 * -EOVERFLOW: item was to large for buffer
2120 * -EFAULT: could not copy extent buffer back to userspace
2125 static noinline int search_ioctl(struct inode *inode,
2126 struct btrfs_ioctl_search_key *sk,
2130 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2131 struct btrfs_root *root;
2132 struct btrfs_key key;
2133 struct btrfs_path *path;
2136 unsigned long sk_offset = 0;
2138 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2139 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2143 path = btrfs_alloc_path();
2147 if (sk->tree_id == 0) {
2148 /* search the root of the inode that was passed */
2149 root = BTRFS_I(inode)->root;
2151 key.objectid = sk->tree_id;
2152 key.type = BTRFS_ROOT_ITEM_KEY;
2153 key.offset = (u64)-1;
2154 root = btrfs_read_fs_root_no_name(info, &key);
2156 btrfs_free_path(path);
2157 return PTR_ERR(root);
2161 key.objectid = sk->min_objectid;
2162 key.type = sk->min_type;
2163 key.offset = sk->min_offset;
2166 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2172 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2173 &sk_offset, &num_found);
2174 btrfs_release_path(path);
2182 sk->nr_items = num_found;
2183 btrfs_free_path(path);
2187 static noinline int btrfs_ioctl_tree_search(struct file *file,
2190 struct btrfs_ioctl_search_args __user *uargs;
2191 struct btrfs_ioctl_search_key sk;
2192 struct inode *inode;
2196 if (!capable(CAP_SYS_ADMIN))
2199 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2201 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2204 buf_size = sizeof(uargs->buf);
2206 inode = file_inode(file);
2207 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2210 * In the origin implementation an overflow is handled by returning a
2211 * search header with a len of zero, so reset ret.
2213 if (ret == -EOVERFLOW)
2216 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2221 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2224 struct btrfs_ioctl_search_args_v2 __user *uarg;
2225 struct btrfs_ioctl_search_args_v2 args;
2226 struct inode *inode;
2229 const size_t buf_limit = SZ_16M;
2231 if (!capable(CAP_SYS_ADMIN))
2234 /* copy search header and buffer size */
2235 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2236 if (copy_from_user(&args, uarg, sizeof(args)))
2239 buf_size = args.buf_size;
2241 /* limit result size to 16MB */
2242 if (buf_size > buf_limit)
2243 buf_size = buf_limit;
2245 inode = file_inode(file);
2246 ret = search_ioctl(inode, &args.key, &buf_size,
2247 (char __user *)(&uarg->buf[0]));
2248 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2250 else if (ret == -EOVERFLOW &&
2251 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2258 * Search INODE_REFs to identify path name of 'dirid' directory
2259 * in a 'tree_id' tree. and sets path name to 'name'.
2261 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2262 u64 tree_id, u64 dirid, char *name)
2264 struct btrfs_root *root;
2265 struct btrfs_key key;
2271 struct btrfs_inode_ref *iref;
2272 struct extent_buffer *l;
2273 struct btrfs_path *path;
2275 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2280 path = btrfs_alloc_path();
2284 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2286 key.objectid = tree_id;
2287 key.type = BTRFS_ROOT_ITEM_KEY;
2288 key.offset = (u64)-1;
2289 root = btrfs_read_fs_root_no_name(info, &key);
2291 ret = PTR_ERR(root);
2295 key.objectid = dirid;
2296 key.type = BTRFS_INODE_REF_KEY;
2297 key.offset = (u64)-1;
2300 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2304 ret = btrfs_previous_item(root, path, dirid,
2305 BTRFS_INODE_REF_KEY);
2315 slot = path->slots[0];
2316 btrfs_item_key_to_cpu(l, &key, slot);
2318 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2319 len = btrfs_inode_ref_name_len(l, iref);
2321 total_len += len + 1;
2323 ret = -ENAMETOOLONG;
2328 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2330 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2333 btrfs_release_path(path);
2334 key.objectid = key.offset;
2335 key.offset = (u64)-1;
2336 dirid = key.objectid;
2338 memmove(name, ptr, total_len);
2339 name[total_len] = '\0';
2342 btrfs_free_path(path);
2346 static int btrfs_search_path_in_tree_user(struct inode *inode,
2347 struct btrfs_ioctl_ino_lookup_user_args *args)
2349 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2350 struct super_block *sb = inode->i_sb;
2351 struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2352 u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2353 u64 dirid = args->dirid;
2354 unsigned long item_off;
2355 unsigned long item_len;
2356 struct btrfs_inode_ref *iref;
2357 struct btrfs_root_ref *rref;
2358 struct btrfs_root *root;
2359 struct btrfs_path *path;
2360 struct btrfs_key key, key2;
2361 struct extent_buffer *leaf;
2362 struct inode *temp_inode;
2369 path = btrfs_alloc_path();
2374 * If the bottom subvolume does not exist directly under upper_limit,
2375 * construct the path in from the bottom up.
2377 if (dirid != upper_limit.objectid) {
2378 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2380 key.objectid = treeid;
2381 key.type = BTRFS_ROOT_ITEM_KEY;
2382 key.offset = (u64)-1;
2383 root = btrfs_read_fs_root_no_name(fs_info, &key);
2385 ret = PTR_ERR(root);
2389 key.objectid = dirid;
2390 key.type = BTRFS_INODE_REF_KEY;
2391 key.offset = (u64)-1;
2393 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2396 } else if (ret > 0) {
2397 ret = btrfs_previous_item(root, path, dirid,
2398 BTRFS_INODE_REF_KEY);
2401 } else if (ret > 0) {
2407 leaf = path->nodes[0];
2408 slot = path->slots[0];
2409 btrfs_item_key_to_cpu(leaf, &key, slot);
2411 iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2412 len = btrfs_inode_ref_name_len(leaf, iref);
2414 total_len += len + 1;
2415 if (ptr < args->path) {
2416 ret = -ENAMETOOLONG;
2421 read_extent_buffer(leaf, ptr,
2422 (unsigned long)(iref + 1), len);
2424 /* Check the read+exec permission of this directory */
2425 ret = btrfs_previous_item(root, path, dirid,
2426 BTRFS_INODE_ITEM_KEY);
2429 } else if (ret > 0) {
2434 leaf = path->nodes[0];
2435 slot = path->slots[0];
2436 btrfs_item_key_to_cpu(leaf, &key2, slot);
2437 if (key2.objectid != dirid) {
2442 temp_inode = btrfs_iget(sb, &key2, root, NULL);
2443 if (IS_ERR(temp_inode)) {
2444 ret = PTR_ERR(temp_inode);
2447 ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
2454 if (key.offset == upper_limit.objectid)
2456 if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2461 btrfs_release_path(path);
2462 key.objectid = key.offset;
2463 key.offset = (u64)-1;
2464 dirid = key.objectid;
2467 memmove(args->path, ptr, total_len);
2468 args->path[total_len] = '\0';
2469 btrfs_release_path(path);
2472 /* Get the bottom subvolume's name from ROOT_REF */
2473 root = fs_info->tree_root;
2474 key.objectid = treeid;
2475 key.type = BTRFS_ROOT_REF_KEY;
2476 key.offset = args->treeid;
2477 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2480 } else if (ret > 0) {
2485 leaf = path->nodes[0];
2486 slot = path->slots[0];
2487 btrfs_item_key_to_cpu(leaf, &key, slot);
2489 item_off = btrfs_item_ptr_offset(leaf, slot);
2490 item_len = btrfs_item_size_nr(leaf, slot);
2491 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2492 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2493 if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2498 /* Copy subvolume's name */
2499 item_off += sizeof(struct btrfs_root_ref);
2500 item_len -= sizeof(struct btrfs_root_ref);
2501 read_extent_buffer(leaf, args->name, item_off, item_len);
2502 args->name[item_len] = 0;
2505 btrfs_free_path(path);
2509 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2512 struct btrfs_ioctl_ino_lookup_args *args;
2513 struct inode *inode;
2516 args = memdup_user(argp, sizeof(*args));
2518 return PTR_ERR(args);
2520 inode = file_inode(file);
2523 * Unprivileged query to obtain the containing subvolume root id. The
2524 * path is reset so it's consistent with btrfs_search_path_in_tree.
2526 if (args->treeid == 0)
2527 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2529 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2534 if (!capable(CAP_SYS_ADMIN)) {
2539 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2540 args->treeid, args->objectid,
2544 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2552 * Version of ino_lookup ioctl (unprivileged)
2554 * The main differences from ino_lookup ioctl are:
2556 * 1. Read + Exec permission will be checked using inode_permission() during
2557 * path construction. -EACCES will be returned in case of failure.
2558 * 2. Path construction will be stopped at the inode number which corresponds
2559 * to the fd with which this ioctl is called. If constructed path does not
2560 * exist under fd's inode, -EACCES will be returned.
2561 * 3. The name of bottom subvolume is also searched and filled.
2563 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2565 struct btrfs_ioctl_ino_lookup_user_args *args;
2566 struct inode *inode;
2569 args = memdup_user(argp, sizeof(*args));
2571 return PTR_ERR(args);
2573 inode = file_inode(file);
2575 if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2576 BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2578 * The subvolume does not exist under fd with which this is
2585 ret = btrfs_search_path_in_tree_user(inode, args);
2587 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2594 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
2595 static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
2597 struct btrfs_ioctl_get_subvol_info_args *subvol_info;
2598 struct btrfs_fs_info *fs_info;
2599 struct btrfs_root *root;
2600 struct btrfs_path *path;
2601 struct btrfs_key key;
2602 struct btrfs_root_item *root_item;
2603 struct btrfs_root_ref *rref;
2604 struct extent_buffer *leaf;
2605 unsigned long item_off;
2606 unsigned long item_len;
2607 struct inode *inode;
2611 path = btrfs_alloc_path();
2615 subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
2617 btrfs_free_path(path);
2621 inode = file_inode(file);
2622 fs_info = BTRFS_I(inode)->root->fs_info;
2624 /* Get root_item of inode's subvolume */
2625 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
2626 key.type = BTRFS_ROOT_ITEM_KEY;
2627 key.offset = (u64)-1;
2628 root = btrfs_read_fs_root_no_name(fs_info, &key);
2630 ret = PTR_ERR(root);
2633 root_item = &root->root_item;
2635 subvol_info->treeid = key.objectid;
2637 subvol_info->generation = btrfs_root_generation(root_item);
2638 subvol_info->flags = btrfs_root_flags(root_item);
2640 memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
2641 memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
2643 memcpy(subvol_info->received_uuid, root_item->received_uuid,
2646 subvol_info->ctransid = btrfs_root_ctransid(root_item);
2647 subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
2648 subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
2650 subvol_info->otransid = btrfs_root_otransid(root_item);
2651 subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
2652 subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
2654 subvol_info->stransid = btrfs_root_stransid(root_item);
2655 subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
2656 subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
2658 subvol_info->rtransid = btrfs_root_rtransid(root_item);
2659 subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
2660 subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
2662 if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
2663 /* Search root tree for ROOT_BACKREF of this subvolume */
2664 root = fs_info->tree_root;
2666 key.type = BTRFS_ROOT_BACKREF_KEY;
2668 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2671 } else if (path->slots[0] >=
2672 btrfs_header_nritems(path->nodes[0])) {
2673 ret = btrfs_next_leaf(root, path);
2676 } else if (ret > 0) {
2682 leaf = path->nodes[0];
2683 slot = path->slots[0];
2684 btrfs_item_key_to_cpu(leaf, &key, slot);
2685 if (key.objectid == subvol_info->treeid &&
2686 key.type == BTRFS_ROOT_BACKREF_KEY) {
2687 subvol_info->parent_id = key.offset;
2689 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2690 subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
2692 item_off = btrfs_item_ptr_offset(leaf, slot)
2693 + sizeof(struct btrfs_root_ref);
2694 item_len = btrfs_item_size_nr(leaf, slot)
2695 - sizeof(struct btrfs_root_ref);
2696 read_extent_buffer(leaf, subvol_info->name,
2697 item_off, item_len);
2704 if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
2708 btrfs_free_path(path);
2709 kzfree(subvol_info);
2714 * Return ROOT_REF information of the subvolume containing this inode
2715 * except the subvolume name.
2717 static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
2719 struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
2720 struct btrfs_root_ref *rref;
2721 struct btrfs_root *root;
2722 struct btrfs_path *path;
2723 struct btrfs_key key;
2724 struct extent_buffer *leaf;
2725 struct inode *inode;
2731 path = btrfs_alloc_path();
2735 rootrefs = memdup_user(argp, sizeof(*rootrefs));
2736 if (IS_ERR(rootrefs)) {
2737 btrfs_free_path(path);
2738 return PTR_ERR(rootrefs);
2741 inode = file_inode(file);
2742 root = BTRFS_I(inode)->root->fs_info->tree_root;
2743 objectid = BTRFS_I(inode)->root->root_key.objectid;
2745 key.objectid = objectid;
2746 key.type = BTRFS_ROOT_REF_KEY;
2747 key.offset = rootrefs->min_treeid;
2750 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2753 } else if (path->slots[0] >=
2754 btrfs_header_nritems(path->nodes[0])) {
2755 ret = btrfs_next_leaf(root, path);
2758 } else if (ret > 0) {
2764 leaf = path->nodes[0];
2765 slot = path->slots[0];
2767 btrfs_item_key_to_cpu(leaf, &key, slot);
2768 if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
2773 if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
2778 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2779 rootrefs->rootref[found].treeid = key.offset;
2780 rootrefs->rootref[found].dirid =
2781 btrfs_root_ref_dirid(leaf, rref);
2784 ret = btrfs_next_item(root, path);
2787 } else if (ret > 0) {
2794 if (!ret || ret == -EOVERFLOW) {
2795 rootrefs->num_items = found;
2796 /* update min_treeid for next search */
2798 rootrefs->min_treeid =
2799 rootrefs->rootref[found - 1].treeid + 1;
2800 if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
2805 btrfs_free_path(path);
2810 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2813 struct dentry *parent = file->f_path.dentry;
2814 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2815 struct dentry *dentry;
2816 struct inode *dir = d_inode(parent);
2817 struct inode *inode;
2818 struct btrfs_root *root = BTRFS_I(dir)->root;
2819 struct btrfs_root *dest = NULL;
2820 struct btrfs_ioctl_vol_args *vol_args;
2824 if (!S_ISDIR(dir->i_mode))
2827 vol_args = memdup_user(arg, sizeof(*vol_args));
2828 if (IS_ERR(vol_args))
2829 return PTR_ERR(vol_args);
2831 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2832 namelen = strlen(vol_args->name);
2833 if (strchr(vol_args->name, '/') ||
2834 strncmp(vol_args->name, "..", namelen) == 0) {
2839 err = mnt_want_write_file(file);
2844 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2846 goto out_drop_write;
2847 dentry = lookup_one_len(vol_args->name, parent, namelen);
2848 if (IS_ERR(dentry)) {
2849 err = PTR_ERR(dentry);
2850 goto out_unlock_dir;
2853 if (d_really_is_negative(dentry)) {
2858 inode = d_inode(dentry);
2859 dest = BTRFS_I(inode)->root;
2860 if (!capable(CAP_SYS_ADMIN)) {
2862 * Regular user. Only allow this with a special mount
2863 * option, when the user has write+exec access to the
2864 * subvol root, and when rmdir(2) would have been
2867 * Note that this is _not_ check that the subvol is
2868 * empty or doesn't contain data that we wouldn't
2869 * otherwise be able to delete.
2871 * Users who want to delete empty subvols should try
2875 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2879 * Do not allow deletion if the parent dir is the same
2880 * as the dir to be deleted. That means the ioctl
2881 * must be called on the dentry referencing the root
2882 * of the subvol, not a random directory contained
2889 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2894 /* check if subvolume may be deleted by a user */
2895 err = btrfs_may_delete(dir, dentry, 1);
2899 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2905 err = btrfs_delete_subvolume(dir, dentry);
2906 inode_unlock(inode);
2915 mnt_drop_write_file(file);
2921 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2923 struct inode *inode = file_inode(file);
2924 struct btrfs_root *root = BTRFS_I(inode)->root;
2925 struct btrfs_ioctl_defrag_range_args *range;
2928 ret = mnt_want_write_file(file);
2932 if (btrfs_root_readonly(root)) {
2937 switch (inode->i_mode & S_IFMT) {
2939 if (!capable(CAP_SYS_ADMIN)) {
2943 ret = btrfs_defrag_root(root);
2947 * Note that this does not check the file descriptor for write
2948 * access. This prevents defragmenting executables that are
2949 * running and allows defrag on files open in read-only mode.
2951 if (!capable(CAP_SYS_ADMIN) &&
2952 inode_permission(inode, MAY_WRITE)) {
2957 range = kzalloc(sizeof(*range), GFP_KERNEL);
2964 if (copy_from_user(range, argp,
2970 /* compression requires us to start the IO */
2971 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2972 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2973 range->extent_thresh = (u32)-1;
2976 /* the rest are all set to zero by kzalloc */
2977 range->len = (u64)-1;
2979 ret = btrfs_defrag_file(file_inode(file), file,
2980 range, BTRFS_OLDEST_GENERATION, 0);
2989 mnt_drop_write_file(file);
2993 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
2995 struct btrfs_ioctl_vol_args *vol_args;
2998 if (!capable(CAP_SYS_ADMIN))
3001 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
3002 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3004 vol_args = memdup_user(arg, sizeof(*vol_args));
3005 if (IS_ERR(vol_args)) {
3006 ret = PTR_ERR(vol_args);
3010 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3011 ret = btrfs_init_new_device(fs_info, vol_args->name);
3014 btrfs_info(fs_info, "disk added %s", vol_args->name);
3018 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3022 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3024 struct inode *inode = file_inode(file);
3025 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3026 struct btrfs_ioctl_vol_args_v2 *vol_args;
3029 if (!capable(CAP_SYS_ADMIN))
3032 ret = mnt_want_write_file(file);
3036 vol_args = memdup_user(arg, sizeof(*vol_args));
3037 if (IS_ERR(vol_args)) {
3038 ret = PTR_ERR(vol_args);
3042 /* Check for compatibility reject unknown flags */
3043 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
3048 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3049 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3053 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3054 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
3056 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3057 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3059 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3062 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3063 btrfs_info(fs_info, "device deleted: id %llu",
3066 btrfs_info(fs_info, "device deleted: %s",
3072 mnt_drop_write_file(file);
3076 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3078 struct inode *inode = file_inode(file);
3079 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3080 struct btrfs_ioctl_vol_args *vol_args;
3083 if (!capable(CAP_SYS_ADMIN))
3086 ret = mnt_want_write_file(file);
3090 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3091 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3092 goto out_drop_write;
3095 vol_args = memdup_user(arg, sizeof(*vol_args));
3096 if (IS_ERR(vol_args)) {
3097 ret = PTR_ERR(vol_args);
3101 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3102 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3105 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3108 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3110 mnt_drop_write_file(file);
3115 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3118 struct btrfs_ioctl_fs_info_args *fi_args;
3119 struct btrfs_device *device;
3120 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3123 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
3128 fi_args->num_devices = fs_devices->num_devices;
3130 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3131 if (device->devid > fi_args->max_id)
3132 fi_args->max_id = device->devid;
3136 memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
3137 fi_args->nodesize = fs_info->nodesize;
3138 fi_args->sectorsize = fs_info->sectorsize;
3139 fi_args->clone_alignment = fs_info->sectorsize;
3141 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3148 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3151 struct btrfs_ioctl_dev_info_args *di_args;
3152 struct btrfs_device *dev;
3154 char *s_uuid = NULL;
3156 di_args = memdup_user(arg, sizeof(*di_args));
3157 if (IS_ERR(di_args))
3158 return PTR_ERR(di_args);
3160 if (!btrfs_is_empty_uuid(di_args->uuid))
3161 s_uuid = di_args->uuid;
3164 dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
3171 di_args->devid = dev->devid;
3172 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3173 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3174 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3176 strncpy(di_args->path, rcu_str_deref(dev->name),
3177 sizeof(di_args->path) - 1);
3178 di_args->path[sizeof(di_args->path) - 1] = 0;
3180 di_args->path[0] = '\0';
3185 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3192 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
3196 page = grab_cache_page(inode->i_mapping, index);
3198 return ERR_PTR(-ENOMEM);
3200 if (!PageUptodate(page)) {
3203 ret = btrfs_readpage(NULL, page);
3205 return ERR_PTR(ret);
3207 if (!PageUptodate(page)) {
3210 return ERR_PTR(-EIO);
3212 if (page->mapping != inode->i_mapping) {
3215 return ERR_PTR(-EAGAIN);
3222 static int gather_extent_pages(struct inode *inode, struct page **pages,
3223 int num_pages, u64 off)
3226 pgoff_t index = off >> PAGE_SHIFT;
3228 for (i = 0; i < num_pages; i++) {
3230 pages[i] = extent_same_get_page(inode, index + i);
3231 if (IS_ERR(pages[i])) {
3232 int err = PTR_ERR(pages[i]);
3243 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
3244 bool retry_range_locking)
3247 * Do any pending delalloc/csum calculations on inode, one way or
3248 * another, and lock file content.
3249 * The locking order is:
3252 * 2) range in the inode's io tree
3255 struct btrfs_ordered_extent *ordered;
3256 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3257 ordered = btrfs_lookup_first_ordered_extent(inode,
3260 ordered->file_offset + ordered->len <= off ||
3261 ordered->file_offset >= off + len) &&
3262 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
3263 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
3265 btrfs_put_ordered_extent(ordered);
3268 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3270 btrfs_put_ordered_extent(ordered);
3271 if (!retry_range_locking)
3273 btrfs_wait_ordered_range(inode, off, len);
3278 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
3280 inode_unlock(inode1);
3281 inode_unlock(inode2);
3284 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3286 if (inode1 < inode2)
3287 swap(inode1, inode2);
3289 inode_lock_nested(inode1, I_MUTEX_PARENT);
3290 inode_lock_nested(inode2, I_MUTEX_CHILD);
3293 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3294 struct inode *inode2, u64 loff2, u64 len)
3296 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3297 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3300 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3301 struct inode *inode2, u64 loff2, u64 len,
3302 bool retry_range_locking)
3306 if (inode1 < inode2) {
3307 swap(inode1, inode2);
3310 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
3313 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
3315 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
3322 struct page **src_pages;
3323 struct page **dst_pages;
3326 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
3331 for (i = 0; i < cmp->num_pages; i++) {
3332 pg = cmp->src_pages[i];
3336 cmp->src_pages[i] = NULL;
3338 pg = cmp->dst_pages[i];
3342 cmp->dst_pages[i] = NULL;
3347 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3348 struct inode *dst, u64 dst_loff,
3349 u64 len, struct cmp_pages *cmp)
3352 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3354 cmp->num_pages = num_pages;
3356 ret = gather_extent_pages(src, cmp->src_pages, num_pages, loff);
3360 ret = gather_extent_pages(dst, cmp->dst_pages, num_pages, dst_loff);
3364 btrfs_cmp_data_free(cmp);
3368 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
3372 struct page *src_page, *dst_page;
3373 unsigned int cmp_len = PAGE_SIZE;
3374 void *addr, *dst_addr;
3378 if (len < PAGE_SIZE)
3381 BUG_ON(i >= cmp->num_pages);
3383 src_page = cmp->src_pages[i];
3384 dst_page = cmp->dst_pages[i];
3385 ASSERT(PageLocked(src_page));
3386 ASSERT(PageLocked(dst_page));
3388 addr = kmap_atomic(src_page);
3389 dst_addr = kmap_atomic(dst_page);
3391 flush_dcache_page(src_page);
3392 flush_dcache_page(dst_page);
3394 if (memcmp(addr, dst_addr, cmp_len))
3397 kunmap_atomic(addr);
3398 kunmap_atomic(dst_addr);
3410 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3414 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3416 if (off + olen > inode->i_size || off + olen < off)
3419 /* if we extend to eof, continue to block boundary */
3420 if (off + len == inode->i_size)
3421 *plen = len = ALIGN(inode->i_size, bs) - off;
3423 /* Check that we are block aligned - btrfs_clone() requires this */
3424 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3430 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3431 struct inode *dst, u64 dst_loff,
3432 struct cmp_pages *cmp)
3436 bool same_inode = (src == dst);
3437 u64 same_lock_start = 0;
3438 u64 same_lock_len = 0;
3440 ret = extent_same_check_offsets(src, loff, &len, olen);
3444 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3450 * Single inode case wants the same checks, except we
3451 * don't want our length pushed out past i_size as
3452 * comparing that data range makes no sense.
3454 * extent_same_check_offsets() will do this for an
3455 * unaligned length at i_size, so catch it here and
3456 * reject the request.
3458 * This effectively means we require aligned extents
3459 * for the single-inode case, whereas the other cases
3460 * allow an unaligned length so long as it ends at
3466 /* Check for overlapping ranges */
3467 if (dst_loff + len > loff && dst_loff < loff + len)
3470 same_lock_start = min_t(u64, loff, dst_loff);
3471 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3474 * If the source and destination inodes are different, the
3475 * source's range end offset matches the source's i_size, that
3476 * i_size is not a multiple of the sector size, and the
3477 * destination range does not go past the destination's i_size,
3478 * we must round down the length to the nearest sector size
3479 * multiple. If we don't do this adjustment we end replacing
3480 * with zeroes the bytes in the range that starts at the
3481 * deduplication range's end offset and ends at the next sector
3484 if (loff + olen == i_size_read(src) &&
3485 dst_loff + len < i_size_read(dst)) {
3486 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3488 len = round_down(i_size_read(src), sz) - loff;
3494 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, cmp);
3499 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3502 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3505 * If one of the inodes has dirty pages in the respective range or
3506 * ordered extents, we need to flush dellaloc and wait for all ordered
3507 * extents in the range. We must unlock the pages and the ranges in the
3508 * io trees to avoid deadlocks when flushing delalloc (requires locking
3509 * pages) and when waiting for ordered extents to complete (they require
3512 if (ret == -EAGAIN) {
3514 * Ranges in the io trees already unlocked. Now unlock all
3515 * pages before waiting for all IO to complete.
3517 btrfs_cmp_data_free(cmp);
3519 btrfs_wait_ordered_range(src, same_lock_start,
3522 btrfs_wait_ordered_range(src, loff, len);
3523 btrfs_wait_ordered_range(dst, dst_loff, len);
3529 /* ranges in the io trees already unlocked */
3530 btrfs_cmp_data_free(cmp);
3534 /* pass original length for comparison so we stay within i_size */
3535 ret = btrfs_cmp_data(olen, cmp);
3537 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3540 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3541 same_lock_start + same_lock_len - 1);
3543 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3545 btrfs_cmp_data_free(cmp);
3550 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3552 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3553 struct inode *dst, u64 dst_loff)
3556 struct cmp_pages cmp;
3557 int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
3558 bool same_inode = (src == dst);
3559 u64 i, tail_len, chunk_count;
3567 btrfs_double_inode_lock(src, dst);
3569 /* don't make the dst file partly checksummed */
3570 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3571 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3576 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
3577 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
3578 if (chunk_count == 0)
3579 num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;
3582 * If deduping ranges in the same inode, locking rules make it
3583 * mandatory to always lock pages in ascending order to avoid deadlocks
3584 * with concurrent tasks (such as starting writeback/delalloc).
3586 if (same_inode && dst_loff < loff)
3587 swap(loff, dst_loff);
3590 * We must gather up all the pages before we initiate our extent
3591 * locking. We use an array for the page pointers. Size of the array is
3592 * bounded by len, which is in turn bounded by BTRFS_MAX_DEDUPE_LEN.
3594 cmp.src_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3595 GFP_KERNEL | __GFP_ZERO);
3596 cmp.dst_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3597 GFP_KERNEL | __GFP_ZERO);
3598 if (!cmp.src_pages || !cmp.dst_pages) {
3603 for (i = 0; i < chunk_count; i++) {
3604 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
3605 dst, dst_loff, &cmp);
3609 loff += BTRFS_MAX_DEDUPE_LEN;
3610 dst_loff += BTRFS_MAX_DEDUPE_LEN;
3614 ret = btrfs_extent_same_range(src, loff, tail_len, dst,
3618 kvfree(cmp.src_pages);
3619 kvfree(cmp.dst_pages);
3625 btrfs_double_inode_unlock(src, dst);
3630 int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
3631 struct file *dst_file, loff_t dst_loff,
3634 struct inode *src = file_inode(src_file);
3635 struct inode *dst = file_inode(dst_file);
3636 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3638 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3640 * Btrfs does not support blocksize < page_size. As a
3641 * result, btrfs_cmp_data() won't correctly handle
3642 * this situation without an update.
3647 return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
3650 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3651 struct inode *inode,
3657 struct btrfs_root *root = BTRFS_I(inode)->root;
3660 inode_inc_iversion(inode);
3661 if (!no_time_update)
3662 inode->i_mtime = inode->i_ctime = current_time(inode);
3664 * We round up to the block size at eof when determining which
3665 * extents to clone above, but shouldn't round up the file size.
3667 if (endoff > destoff + olen)
3668 endoff = destoff + olen;
3669 if (endoff > inode->i_size)
3670 btrfs_i_size_write(BTRFS_I(inode), endoff);
3672 ret = btrfs_update_inode(trans, root, inode);
3674 btrfs_abort_transaction(trans, ret);
3675 btrfs_end_transaction(trans);
3678 ret = btrfs_end_transaction(trans);
3683 static void clone_update_extent_map(struct btrfs_inode *inode,
3684 const struct btrfs_trans_handle *trans,
3685 const struct btrfs_path *path,
3686 const u64 hole_offset,
3689 struct extent_map_tree *em_tree = &inode->extent_tree;
3690 struct extent_map *em;
3693 em = alloc_extent_map();
3695 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3700 struct btrfs_file_extent_item *fi;
3702 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3703 struct btrfs_file_extent_item);
3704 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3705 em->generation = -1;
3706 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3707 BTRFS_FILE_EXTENT_INLINE)
3708 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3709 &inode->runtime_flags);
3711 em->start = hole_offset;
3713 em->ram_bytes = em->len;
3714 em->orig_start = hole_offset;
3715 em->block_start = EXTENT_MAP_HOLE;
3717 em->orig_block_len = 0;
3718 em->compress_type = BTRFS_COMPRESS_NONE;
3719 em->generation = trans->transid;
3723 write_lock(&em_tree->lock);
3724 ret = add_extent_mapping(em_tree, em, 1);
3725 write_unlock(&em_tree->lock);
3726 if (ret != -EEXIST) {
3727 free_extent_map(em);
3730 btrfs_drop_extent_cache(inode, em->start,
3731 em->start + em->len - 1, 0);
3735 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3739 * Make sure we do not end up inserting an inline extent into a file that has
3740 * already other (non-inline) extents. If a file has an inline extent it can
3741 * not have any other extents and the (single) inline extent must start at the
3742 * file offset 0. Failing to respect these rules will lead to file corruption,
3743 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3745 * We can have extents that have been already written to disk or we can have
3746 * dirty ranges still in delalloc, in which case the extent maps and items are
3747 * created only when we run delalloc, and the delalloc ranges might fall outside
3748 * the range we are currently locking in the inode's io tree. So we check the
3749 * inode's i_size because of that (i_size updates are done while holding the
3750 * i_mutex, which we are holding here).
3751 * We also check to see if the inode has a size not greater than "datal" but has
3752 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3753 * protected against such concurrent fallocate calls by the i_mutex).
3755 * If the file has no extents but a size greater than datal, do not allow the
3756 * copy because we would need turn the inline extent into a non-inline one (even
3757 * with NO_HOLES enabled). If we find our destination inode only has one inline
3758 * extent, just overwrite it with the source inline extent if its size is less
3759 * than the source extent's size, or we could copy the source inline extent's
3760 * data into the destination inode's inline extent if the later is greater then
3763 static int clone_copy_inline_extent(struct inode *dst,
3764 struct btrfs_trans_handle *trans,
3765 struct btrfs_path *path,
3766 struct btrfs_key *new_key,
3767 const u64 drop_start,
3773 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3774 struct btrfs_root *root = BTRFS_I(dst)->root;
3775 const u64 aligned_end = ALIGN(new_key->offset + datal,
3776 fs_info->sectorsize);
3778 struct btrfs_key key;
3780 if (new_key->offset > 0)
3783 key.objectid = btrfs_ino(BTRFS_I(dst));
3784 key.type = BTRFS_EXTENT_DATA_KEY;
3786 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3789 } else if (ret > 0) {
3790 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3791 ret = btrfs_next_leaf(root, path);
3795 goto copy_inline_extent;
3797 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3798 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3799 key.type == BTRFS_EXTENT_DATA_KEY) {
3800 ASSERT(key.offset > 0);
3803 } else if (i_size_read(dst) <= datal) {
3804 struct btrfs_file_extent_item *ei;
3808 * If the file size is <= datal, make sure there are no other
3809 * extents following (can happen do to an fallocate call with
3810 * the flag FALLOC_FL_KEEP_SIZE).
3812 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3813 struct btrfs_file_extent_item);
3815 * If it's an inline extent, it can not have other extents
3818 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3819 BTRFS_FILE_EXTENT_INLINE)
3820 goto copy_inline_extent;
3822 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3823 if (ext_len > aligned_end)
3826 ret = btrfs_next_item(root, path);
3829 } else if (ret == 0) {
3830 btrfs_item_key_to_cpu(path->nodes[0], &key,
3832 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3833 key.type == BTRFS_EXTENT_DATA_KEY)
3840 * We have no extent items, or we have an extent at offset 0 which may
3841 * or may not be inlined. All these cases are dealt the same way.
3843 if (i_size_read(dst) > datal) {
3845 * If the destination inode has an inline extent...
3846 * This would require copying the data from the source inline
3847 * extent into the beginning of the destination's inline extent.
3848 * But this is really complex, both extents can be compressed
3849 * or just one of them, which would require decompressing and
3850 * re-compressing data (which could increase the new compressed
3851 * size, not allowing the compressed data to fit anymore in an
3853 * So just don't support this case for now (it should be rare,
3854 * we are not really saving space when cloning inline extents).
3859 btrfs_release_path(path);
3860 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3863 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3868 const u32 start = btrfs_file_extent_calc_inline_size(0);
3870 memmove(inline_data + start, inline_data + start + skip, datal);
3873 write_extent_buffer(path->nodes[0], inline_data,
3874 btrfs_item_ptr_offset(path->nodes[0],
3877 inode_add_bytes(dst, datal);
3883 * btrfs_clone() - clone a range from inode file to another
3885 * @src: Inode to clone from
3886 * @inode: Inode to clone to
3887 * @off: Offset within source to start clone from
3888 * @olen: Original length, passed by user, of range to clone
3889 * @olen_aligned: Block-aligned value of olen
3890 * @destoff: Offset within @inode to start clone
3891 * @no_time_update: Whether to update mtime/ctime on the target inode
3893 static int btrfs_clone(struct inode *src, struct inode *inode,
3894 const u64 off, const u64 olen, const u64 olen_aligned,
3895 const u64 destoff, int no_time_update)
3897 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3898 struct btrfs_root *root = BTRFS_I(inode)->root;
3899 struct btrfs_path *path = NULL;
3900 struct extent_buffer *leaf;
3901 struct btrfs_trans_handle *trans;
3903 struct btrfs_key key;
3907 const u64 len = olen_aligned;
3908 u64 last_dest_end = destoff;
3911 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3915 path = btrfs_alloc_path();
3921 path->reada = READA_FORWARD;
3923 key.objectid = btrfs_ino(BTRFS_I(src));
3924 key.type = BTRFS_EXTENT_DATA_KEY;
3928 u64 next_key_min_offset = key.offset + 1;
3931 * note the key will change type as we walk through the
3934 path->leave_spinning = 1;
3935 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3940 * First search, if no extent item that starts at offset off was
3941 * found but the previous item is an extent item, it's possible
3942 * it might overlap our target range, therefore process it.
3944 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3945 btrfs_item_key_to_cpu(path->nodes[0], &key,
3946 path->slots[0] - 1);
3947 if (key.type == BTRFS_EXTENT_DATA_KEY)
3951 nritems = btrfs_header_nritems(path->nodes[0]);
3953 if (path->slots[0] >= nritems) {
3954 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3959 nritems = btrfs_header_nritems(path->nodes[0]);
3961 leaf = path->nodes[0];
3962 slot = path->slots[0];
3964 btrfs_item_key_to_cpu(leaf, &key, slot);
3965 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3966 key.objectid != btrfs_ino(BTRFS_I(src)))
3969 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3970 struct btrfs_file_extent_item *extent;
3973 struct btrfs_key new_key;
3974 u64 disko = 0, diskl = 0;
3975 u64 datao = 0, datal = 0;
3979 extent = btrfs_item_ptr(leaf, slot,
3980 struct btrfs_file_extent_item);
3981 comp = btrfs_file_extent_compression(leaf, extent);
3982 type = btrfs_file_extent_type(leaf, extent);
3983 if (type == BTRFS_FILE_EXTENT_REG ||
3984 type == BTRFS_FILE_EXTENT_PREALLOC) {
3985 disko = btrfs_file_extent_disk_bytenr(leaf,
3987 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3989 datao = btrfs_file_extent_offset(leaf, extent);
3990 datal = btrfs_file_extent_num_bytes(leaf,
3992 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3993 /* take upper bound, may be compressed */
3994 datal = btrfs_file_extent_ram_bytes(leaf,
3999 * The first search might have left us at an extent
4000 * item that ends before our target range's start, can
4001 * happen if we have holes and NO_HOLES feature enabled.
4003 if (key.offset + datal <= off) {
4006 } else if (key.offset >= off + len) {
4009 next_key_min_offset = key.offset + datal;
4010 size = btrfs_item_size_nr(leaf, slot);
4011 read_extent_buffer(leaf, buf,
4012 btrfs_item_ptr_offset(leaf, slot),
4015 btrfs_release_path(path);
4016 path->leave_spinning = 0;
4018 memcpy(&new_key, &key, sizeof(new_key));
4019 new_key.objectid = btrfs_ino(BTRFS_I(inode));
4020 if (off <= key.offset)
4021 new_key.offset = key.offset + destoff - off;
4023 new_key.offset = destoff;
4026 * Deal with a hole that doesn't have an extent item
4027 * that represents it (NO_HOLES feature enabled).
4028 * This hole is either in the middle of the cloning
4029 * range or at the beginning (fully overlaps it or
4030 * partially overlaps it).
4032 if (new_key.offset != last_dest_end)
4033 drop_start = last_dest_end;
4035 drop_start = new_key.offset;
4038 * 1 - adjusting old extent (we may have to split it)
4039 * 1 - add new extent
4042 trans = btrfs_start_transaction(root, 3);
4043 if (IS_ERR(trans)) {
4044 ret = PTR_ERR(trans);
4048 if (type == BTRFS_FILE_EXTENT_REG ||
4049 type == BTRFS_FILE_EXTENT_PREALLOC) {
4051 * a | --- range to clone ---| b
4052 * | ------------- extent ------------- |
4055 /* subtract range b */
4056 if (key.offset + datal > off + len)
4057 datal = off + len - key.offset;
4059 /* subtract range a */
4060 if (off > key.offset) {
4061 datao += off - key.offset;
4062 datal -= off - key.offset;
4065 ret = btrfs_drop_extents(trans, root, inode,
4067 new_key.offset + datal,
4070 if (ret != -EOPNOTSUPP)
4071 btrfs_abort_transaction(trans,
4073 btrfs_end_transaction(trans);
4077 ret = btrfs_insert_empty_item(trans, root, path,
4080 btrfs_abort_transaction(trans, ret);
4081 btrfs_end_transaction(trans);
4085 leaf = path->nodes[0];
4086 slot = path->slots[0];
4087 write_extent_buffer(leaf, buf,
4088 btrfs_item_ptr_offset(leaf, slot),
4091 extent = btrfs_item_ptr(leaf, slot,
4092 struct btrfs_file_extent_item);
4094 /* disko == 0 means it's a hole */
4098 btrfs_set_file_extent_offset(leaf, extent,
4100 btrfs_set_file_extent_num_bytes(leaf, extent,
4104 inode_add_bytes(inode, datal);
4105 ret = btrfs_inc_extent_ref(trans,
4108 root->root_key.objectid,
4109 btrfs_ino(BTRFS_I(inode)),
4110 new_key.offset - datao);
4112 btrfs_abort_transaction(trans,
4114 btrfs_end_transaction(trans);
4119 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
4123 if (off > key.offset) {
4124 skip = off - key.offset;
4125 new_key.offset += skip;
4128 if (key.offset + datal > off + len)
4129 trim = key.offset + datal - (off + len);
4131 if (comp && (skip || trim)) {
4133 btrfs_end_transaction(trans);
4136 size -= skip + trim;
4137 datal -= skip + trim;
4139 ret = clone_copy_inline_extent(inode,
4146 if (ret != -EOPNOTSUPP)
4147 btrfs_abort_transaction(trans,
4149 btrfs_end_transaction(trans);
4152 leaf = path->nodes[0];
4153 slot = path->slots[0];
4156 /* If we have an implicit hole (NO_HOLES feature). */
4157 if (drop_start < new_key.offset)
4158 clone_update_extent_map(BTRFS_I(inode), trans,
4160 new_key.offset - drop_start);
4162 clone_update_extent_map(BTRFS_I(inode), trans,
4165 btrfs_mark_buffer_dirty(leaf);
4166 btrfs_release_path(path);
4168 last_dest_end = ALIGN(new_key.offset + datal,
4169 fs_info->sectorsize);
4170 ret = clone_finish_inode_update(trans, inode,
4176 if (new_key.offset + datal >= destoff + len)
4179 btrfs_release_path(path);
4180 key.offset = next_key_min_offset;
4182 if (fatal_signal_pending(current)) {
4189 if (last_dest_end < destoff + len) {
4191 * We have an implicit hole (NO_HOLES feature is enabled) that
4192 * fully or partially overlaps our cloning range at its end.
4194 btrfs_release_path(path);
4197 * 1 - remove extent(s)
4200 trans = btrfs_start_transaction(root, 2);
4201 if (IS_ERR(trans)) {
4202 ret = PTR_ERR(trans);
4205 ret = btrfs_drop_extents(trans, root, inode,
4206 last_dest_end, destoff + len, 1);
4208 if (ret != -EOPNOTSUPP)
4209 btrfs_abort_transaction(trans, ret);
4210 btrfs_end_transaction(trans);
4213 clone_update_extent_map(BTRFS_I(inode), trans, NULL,
4215 destoff + len - last_dest_end);
4216 ret = clone_finish_inode_update(trans, inode, destoff + len,
4217 destoff, olen, no_time_update);
4221 btrfs_free_path(path);
4226 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
4227 u64 off, u64 olen, u64 destoff)
4229 struct inode *inode = file_inode(file);
4230 struct inode *src = file_inode(file_src);
4231 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4232 struct btrfs_root *root = BTRFS_I(inode)->root;
4235 u64 bs = fs_info->sb->s_blocksize;
4236 int same_inode = src == inode;
4240 * - split compressed inline extents. annoying: we need to
4241 * decompress into destination's address_space (the file offset
4242 * may change, so source mapping won't do), then recompress (or
4243 * otherwise reinsert) a subrange.
4245 * - split destination inode's inline extents. The inline extents can
4246 * be either compressed or non-compressed.
4249 if (btrfs_root_readonly(root))
4252 if (file_src->f_path.mnt != file->f_path.mnt ||
4253 src->i_sb != inode->i_sb)
4256 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
4260 btrfs_double_inode_lock(src, inode);
4265 /* don't make the dst file partly checksummed */
4266 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
4267 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
4272 /* determine range to clone */
4274 if (off + len > src->i_size || off + len < off)
4277 olen = len = src->i_size - off;
4278 /* if we extend to eof, continue to block boundary */
4279 if (off + len == src->i_size)
4280 len = ALIGN(src->i_size, bs) - off;
4287 /* verify the end result is block aligned */
4288 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
4289 !IS_ALIGNED(destoff, bs))
4292 /* verify if ranges are overlapped within the same file */
4294 if (destoff + len > off && destoff < off + len)
4298 if (destoff > inode->i_size) {
4299 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
4305 * Lock the target range too. Right after we replace the file extent
4306 * items in the fs tree (which now point to the cloned data), we might
4307 * have a worker replace them with extent items relative to a write
4308 * operation that was issued before this clone operation (i.e. confront
4309 * with inode.c:btrfs_finish_ordered_io).
4312 u64 lock_start = min_t(u64, off, destoff);
4313 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
4315 ret = lock_extent_range(src, lock_start, lock_len, true);
4317 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
4322 /* ranges in the io trees already unlocked */
4326 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
4329 u64 lock_start = min_t(u64, off, destoff);
4330 u64 lock_end = max_t(u64, off, destoff) + len - 1;
4332 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
4334 btrfs_double_extent_unlock(src, off, inode, destoff, len);
4337 * Truncate page cache pages so that future reads will see the cloned
4338 * data immediately and not the previous data.
4340 truncate_inode_pages_range(&inode->i_data,
4341 round_down(destoff, PAGE_SIZE),
4342 round_up(destoff + len, PAGE_SIZE) - 1);
4345 btrfs_double_inode_unlock(src, inode);
4351 int btrfs_clone_file_range(struct file *src_file, loff_t off,
4352 struct file *dst_file, loff_t destoff, u64 len)
4354 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
4357 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4359 struct inode *inode = file_inode(file);
4360 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4361 struct btrfs_root *root = BTRFS_I(inode)->root;
4362 struct btrfs_root *new_root;
4363 struct btrfs_dir_item *di;
4364 struct btrfs_trans_handle *trans;
4365 struct btrfs_path *path;
4366 struct btrfs_key location;
4367 struct btrfs_disk_key disk_key;
4372 if (!capable(CAP_SYS_ADMIN))
4375 ret = mnt_want_write_file(file);
4379 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4385 objectid = BTRFS_FS_TREE_OBJECTID;
4387 location.objectid = objectid;
4388 location.type = BTRFS_ROOT_ITEM_KEY;
4389 location.offset = (u64)-1;
4391 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4392 if (IS_ERR(new_root)) {
4393 ret = PTR_ERR(new_root);
4396 if (!is_fstree(new_root->objectid)) {
4401 path = btrfs_alloc_path();
4406 path->leave_spinning = 1;
4408 trans = btrfs_start_transaction(root, 1);
4409 if (IS_ERR(trans)) {
4410 btrfs_free_path(path);
4411 ret = PTR_ERR(trans);
4415 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4416 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4417 dir_id, "default", 7, 1);
4418 if (IS_ERR_OR_NULL(di)) {
4419 btrfs_free_path(path);
4420 btrfs_end_transaction(trans);
4422 "Umm, you don't have the default diritem, this isn't going to work");
4427 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4428 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4429 btrfs_mark_buffer_dirty(path->nodes[0]);
4430 btrfs_free_path(path);
4432 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4433 btrfs_end_transaction(trans);
4435 mnt_drop_write_file(file);
4439 static void get_block_group_info(struct list_head *groups_list,
4440 struct btrfs_ioctl_space_info *space)
4442 struct btrfs_block_group_cache *block_group;
4444 space->total_bytes = 0;
4445 space->used_bytes = 0;
4447 list_for_each_entry(block_group, groups_list, list) {
4448 space->flags = block_group->flags;
4449 space->total_bytes += block_group->key.offset;
4450 space->used_bytes +=
4451 btrfs_block_group_used(&block_group->item);
4455 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4458 struct btrfs_ioctl_space_args space_args;
4459 struct btrfs_ioctl_space_info space;
4460 struct btrfs_ioctl_space_info *dest;
4461 struct btrfs_ioctl_space_info *dest_orig;
4462 struct btrfs_ioctl_space_info __user *user_dest;
4463 struct btrfs_space_info *info;
4464 static const u64 types[] = {
4465 BTRFS_BLOCK_GROUP_DATA,
4466 BTRFS_BLOCK_GROUP_SYSTEM,
4467 BTRFS_BLOCK_GROUP_METADATA,
4468 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
4476 if (copy_from_user(&space_args,
4477 (struct btrfs_ioctl_space_args __user *)arg,
4478 sizeof(space_args)))
4481 for (i = 0; i < num_types; i++) {
4482 struct btrfs_space_info *tmp;
4486 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4488 if (tmp->flags == types[i]) {
4498 down_read(&info->groups_sem);
4499 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4500 if (!list_empty(&info->block_groups[c]))
4503 up_read(&info->groups_sem);
4507 * Global block reserve, exported as a space_info
4511 /* space_slots == 0 means they are asking for a count */
4512 if (space_args.space_slots == 0) {
4513 space_args.total_spaces = slot_count;
4517 slot_count = min_t(u64, space_args.space_slots, slot_count);
4519 alloc_size = sizeof(*dest) * slot_count;
4521 /* we generally have at most 6 or so space infos, one for each raid
4522 * level. So, a whole page should be more than enough for everyone
4524 if (alloc_size > PAGE_SIZE)
4527 space_args.total_spaces = 0;
4528 dest = kmalloc(alloc_size, GFP_KERNEL);
4533 /* now we have a buffer to copy into */
4534 for (i = 0; i < num_types; i++) {
4535 struct btrfs_space_info *tmp;
4542 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4544 if (tmp->flags == types[i]) {
4553 down_read(&info->groups_sem);
4554 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4555 if (!list_empty(&info->block_groups[c])) {
4556 get_block_group_info(&info->block_groups[c],
4558 memcpy(dest, &space, sizeof(space));
4560 space_args.total_spaces++;
4566 up_read(&info->groups_sem);
4570 * Add global block reserve
4573 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4575 spin_lock(&block_rsv->lock);
4576 space.total_bytes = block_rsv->size;
4577 space.used_bytes = block_rsv->size - block_rsv->reserved;
4578 spin_unlock(&block_rsv->lock);
4579 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4580 memcpy(dest, &space, sizeof(space));
4581 space_args.total_spaces++;
4584 user_dest = (struct btrfs_ioctl_space_info __user *)
4585 (arg + sizeof(struct btrfs_ioctl_space_args));
4587 if (copy_to_user(user_dest, dest_orig, alloc_size))
4592 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4598 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4601 struct btrfs_trans_handle *trans;
4605 trans = btrfs_attach_transaction_barrier(root);
4606 if (IS_ERR(trans)) {
4607 if (PTR_ERR(trans) != -ENOENT)
4608 return PTR_ERR(trans);
4610 /* No running transaction, don't bother */
4611 transid = root->fs_info->last_trans_committed;
4614 transid = trans->transid;
4615 ret = btrfs_commit_transaction_async(trans, 0);
4617 btrfs_end_transaction(trans);
4622 if (copy_to_user(argp, &transid, sizeof(transid)))
4627 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4633 if (copy_from_user(&transid, argp, sizeof(transid)))
4636 transid = 0; /* current trans */
4638 return btrfs_wait_for_commit(fs_info, transid);
4641 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4643 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4644 struct btrfs_ioctl_scrub_args *sa;
4647 if (!capable(CAP_SYS_ADMIN))
4650 sa = memdup_user(arg, sizeof(*sa));
4654 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4655 ret = mnt_want_write_file(file);
4660 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4661 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4664 if (copy_to_user(arg, sa, sizeof(*sa)))
4667 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4668 mnt_drop_write_file(file);
4674 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4676 if (!capable(CAP_SYS_ADMIN))
4679 return btrfs_scrub_cancel(fs_info);
4682 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4685 struct btrfs_ioctl_scrub_args *sa;
4688 if (!capable(CAP_SYS_ADMIN))
4691 sa = memdup_user(arg, sizeof(*sa));
4695 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4697 if (copy_to_user(arg, sa, sizeof(*sa)))
4704 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4707 struct btrfs_ioctl_get_dev_stats *sa;
4710 sa = memdup_user(arg, sizeof(*sa));
4714 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4719 ret = btrfs_get_dev_stats(fs_info, sa);
4721 if (copy_to_user(arg, sa, sizeof(*sa)))
4728 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4731 struct btrfs_ioctl_dev_replace_args *p;
4734 if (!capable(CAP_SYS_ADMIN))
4737 p = memdup_user(arg, sizeof(*p));
4742 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4743 if (sb_rdonly(fs_info->sb)) {
4747 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4748 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4750 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4751 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4754 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4755 btrfs_dev_replace_status(fs_info, p);
4758 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4759 p->result = btrfs_dev_replace_cancel(fs_info);
4767 if (copy_to_user(arg, p, sizeof(*p)))
4774 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4780 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4781 struct inode_fs_paths *ipath = NULL;
4782 struct btrfs_path *path;
4784 if (!capable(CAP_DAC_READ_SEARCH))
4787 path = btrfs_alloc_path();
4793 ipa = memdup_user(arg, sizeof(*ipa));
4800 size = min_t(u32, ipa->size, 4096);
4801 ipath = init_ipath(size, root, path);
4802 if (IS_ERR(ipath)) {
4803 ret = PTR_ERR(ipath);
4808 ret = paths_from_inode(ipa->inum, ipath);
4812 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4813 rel_ptr = ipath->fspath->val[i] -
4814 (u64)(unsigned long)ipath->fspath->val;
4815 ipath->fspath->val[i] = rel_ptr;
4818 ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
4819 ipath->fspath, size);
4826 btrfs_free_path(path);
4833 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4835 struct btrfs_data_container *inodes = ctx;
4836 const size_t c = 3 * sizeof(u64);
4838 if (inodes->bytes_left >= c) {
4839 inodes->bytes_left -= c;
4840 inodes->val[inodes->elem_cnt] = inum;
4841 inodes->val[inodes->elem_cnt + 1] = offset;
4842 inodes->val[inodes->elem_cnt + 2] = root;
4843 inodes->elem_cnt += 3;
4845 inodes->bytes_missing += c - inodes->bytes_left;
4846 inodes->bytes_left = 0;
4847 inodes->elem_missed += 3;
4853 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4854 void __user *arg, int version)
4858 struct btrfs_ioctl_logical_ino_args *loi;
4859 struct btrfs_data_container *inodes = NULL;
4860 struct btrfs_path *path = NULL;
4863 if (!capable(CAP_SYS_ADMIN))
4866 loi = memdup_user(arg, sizeof(*loi));
4868 return PTR_ERR(loi);
4871 ignore_offset = false;
4872 size = min_t(u32, loi->size, SZ_64K);
4874 /* All reserved bits must be 0 for now */
4875 if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4879 /* Only accept flags we have defined so far */
4880 if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4884 ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4885 size = min_t(u32, loi->size, SZ_16M);
4888 path = btrfs_alloc_path();
4894 inodes = init_data_container(size);
4895 if (IS_ERR(inodes)) {
4896 ret = PTR_ERR(inodes);
4901 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4902 build_ino_list, inodes, ignore_offset);
4908 ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4914 btrfs_free_path(path);
4922 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4923 struct btrfs_ioctl_balance_args *bargs)
4925 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4927 bargs->flags = bctl->flags;
4929 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4930 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4931 if (atomic_read(&fs_info->balance_pause_req))
4932 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4933 if (atomic_read(&fs_info->balance_cancel_req))
4934 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4936 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4937 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4938 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4940 spin_lock(&fs_info->balance_lock);
4941 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4942 spin_unlock(&fs_info->balance_lock);
4945 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4947 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4948 struct btrfs_fs_info *fs_info = root->fs_info;
4949 struct btrfs_ioctl_balance_args *bargs;
4950 struct btrfs_balance_control *bctl;
4951 bool need_unlock; /* for mut. excl. ops lock */
4954 if (!capable(CAP_SYS_ADMIN))
4957 ret = mnt_want_write_file(file);
4962 if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4963 mutex_lock(&fs_info->balance_mutex);
4969 * mut. excl. ops lock is locked. Three possibilities:
4970 * (1) some other op is running
4971 * (2) balance is running
4972 * (3) balance is paused -- special case (think resume)
4974 mutex_lock(&fs_info->balance_mutex);
4975 if (fs_info->balance_ctl) {
4976 /* this is either (2) or (3) */
4977 if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4978 mutex_unlock(&fs_info->balance_mutex);
4980 * Lock released to allow other waiters to continue,
4981 * we'll reexamine the status again.
4983 mutex_lock(&fs_info->balance_mutex);
4985 if (fs_info->balance_ctl &&
4986 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4988 need_unlock = false;
4992 mutex_unlock(&fs_info->balance_mutex);
4996 mutex_unlock(&fs_info->balance_mutex);
5002 mutex_unlock(&fs_info->balance_mutex);
5003 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
5008 BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
5011 bargs = memdup_user(arg, sizeof(*bargs));
5012 if (IS_ERR(bargs)) {
5013 ret = PTR_ERR(bargs);
5017 if (bargs->flags & BTRFS_BALANCE_RESUME) {
5018 if (!fs_info->balance_ctl) {
5023 bctl = fs_info->balance_ctl;
5024 spin_lock(&fs_info->balance_lock);
5025 bctl->flags |= BTRFS_BALANCE_RESUME;
5026 spin_unlock(&fs_info->balance_lock);
5034 if (fs_info->balance_ctl) {
5039 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
5046 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
5047 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
5048 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
5050 bctl->flags = bargs->flags;
5052 /* balance everything - no filters */
5053 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
5056 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
5063 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to
5064 * btrfs_balance. bctl is freed in reset_balance_state, or, if
5065 * restriper was paused all the way until unmount, in free_fs_info.
5066 * The flag should be cleared after reset_balance_state.
5068 need_unlock = false;
5070 ret = btrfs_balance(fs_info, bctl, bargs);
5074 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5083 mutex_unlock(&fs_info->balance_mutex);
5085 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
5087 mnt_drop_write_file(file);
5091 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
5093 if (!capable(CAP_SYS_ADMIN))
5097 case BTRFS_BALANCE_CTL_PAUSE:
5098 return btrfs_pause_balance(fs_info);
5099 case BTRFS_BALANCE_CTL_CANCEL:
5100 return btrfs_cancel_balance(fs_info);
5106 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
5109 struct btrfs_ioctl_balance_args *bargs;
5112 if (!capable(CAP_SYS_ADMIN))
5115 mutex_lock(&fs_info->balance_mutex);
5116 if (!fs_info->balance_ctl) {
5121 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
5127 btrfs_update_ioctl_balance_args(fs_info, bargs);
5129 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5134 mutex_unlock(&fs_info->balance_mutex);
5138 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
5140 struct inode *inode = file_inode(file);
5141 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5142 struct btrfs_ioctl_quota_ctl_args *sa;
5145 if (!capable(CAP_SYS_ADMIN))
5148 ret = mnt_want_write_file(file);
5152 sa = memdup_user(arg, sizeof(*sa));
5158 down_write(&fs_info->subvol_sem);
5161 case BTRFS_QUOTA_CTL_ENABLE:
5162 ret = btrfs_quota_enable(fs_info);
5164 case BTRFS_QUOTA_CTL_DISABLE:
5165 ret = btrfs_quota_disable(fs_info);
5173 up_write(&fs_info->subvol_sem);
5175 mnt_drop_write_file(file);
5179 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
5181 struct inode *inode = file_inode(file);
5182 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5183 struct btrfs_root *root = BTRFS_I(inode)->root;
5184 struct btrfs_ioctl_qgroup_assign_args *sa;
5185 struct btrfs_trans_handle *trans;
5189 if (!capable(CAP_SYS_ADMIN))
5192 ret = mnt_want_write_file(file);
5196 sa = memdup_user(arg, sizeof(*sa));
5202 trans = btrfs_join_transaction(root);
5203 if (IS_ERR(trans)) {
5204 ret = PTR_ERR(trans);
5209 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
5211 ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
5214 /* update qgroup status and info */
5215 err = btrfs_run_qgroups(trans);
5217 btrfs_handle_fs_error(fs_info, err,
5218 "failed to update qgroup status and info");
5219 err = btrfs_end_transaction(trans);
5226 mnt_drop_write_file(file);
5230 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
5232 struct inode *inode = file_inode(file);
5233 struct btrfs_root *root = BTRFS_I(inode)->root;
5234 struct btrfs_ioctl_qgroup_create_args *sa;
5235 struct btrfs_trans_handle *trans;
5239 if (!capable(CAP_SYS_ADMIN))
5242 ret = mnt_want_write_file(file);
5246 sa = memdup_user(arg, sizeof(*sa));
5252 if (!sa->qgroupid) {
5257 trans = btrfs_join_transaction(root);
5258 if (IS_ERR(trans)) {
5259 ret = PTR_ERR(trans);
5264 ret = btrfs_create_qgroup(trans, sa->qgroupid);
5266 ret = btrfs_remove_qgroup(trans, sa->qgroupid);
5269 err = btrfs_end_transaction(trans);
5276 mnt_drop_write_file(file);
5280 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
5282 struct inode *inode = file_inode(file);
5283 struct btrfs_root *root = BTRFS_I(inode)->root;
5284 struct btrfs_ioctl_qgroup_limit_args *sa;
5285 struct btrfs_trans_handle *trans;
5290 if (!capable(CAP_SYS_ADMIN))
5293 ret = mnt_want_write_file(file);
5297 sa = memdup_user(arg, sizeof(*sa));
5303 trans = btrfs_join_transaction(root);
5304 if (IS_ERR(trans)) {
5305 ret = PTR_ERR(trans);
5309 qgroupid = sa->qgroupid;
5311 /* take the current subvol as qgroup */
5312 qgroupid = root->root_key.objectid;
5315 ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
5317 err = btrfs_end_transaction(trans);
5324 mnt_drop_write_file(file);
5328 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5330 struct inode *inode = file_inode(file);
5331 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5332 struct btrfs_ioctl_quota_rescan_args *qsa;
5335 if (!capable(CAP_SYS_ADMIN))
5338 ret = mnt_want_write_file(file);
5342 qsa = memdup_user(arg, sizeof(*qsa));
5353 ret = btrfs_qgroup_rescan(fs_info);
5358 mnt_drop_write_file(file);
5362 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5364 struct inode *inode = file_inode(file);
5365 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5366 struct btrfs_ioctl_quota_rescan_args *qsa;
5369 if (!capable(CAP_SYS_ADMIN))
5372 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5376 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5378 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5381 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5388 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5390 struct inode *inode = file_inode(file);
5391 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5393 if (!capable(CAP_SYS_ADMIN))
5396 return btrfs_qgroup_wait_for_completion(fs_info, true);
5399 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5400 struct btrfs_ioctl_received_subvol_args *sa)
5402 struct inode *inode = file_inode(file);
5403 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5404 struct btrfs_root *root = BTRFS_I(inode)->root;
5405 struct btrfs_root_item *root_item = &root->root_item;
5406 struct btrfs_trans_handle *trans;
5407 struct timespec64 ct = current_time(inode);
5409 int received_uuid_changed;
5411 if (!inode_owner_or_capable(inode))
5414 ret = mnt_want_write_file(file);
5418 down_write(&fs_info->subvol_sem);
5420 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5425 if (btrfs_root_readonly(root)) {
5432 * 2 - uuid items (received uuid + subvol uuid)
5434 trans = btrfs_start_transaction(root, 3);
5435 if (IS_ERR(trans)) {
5436 ret = PTR_ERR(trans);
5441 sa->rtransid = trans->transid;
5442 sa->rtime.sec = ct.tv_sec;
5443 sa->rtime.nsec = ct.tv_nsec;
5445 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5447 if (received_uuid_changed &&
5448 !btrfs_is_empty_uuid(root_item->received_uuid)) {
5449 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
5450 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5451 root->root_key.objectid);
5452 if (ret && ret != -ENOENT) {
5453 btrfs_abort_transaction(trans, ret);
5454 btrfs_end_transaction(trans);
5458 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5459 btrfs_set_root_stransid(root_item, sa->stransid);
5460 btrfs_set_root_rtransid(root_item, sa->rtransid);
5461 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5462 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5463 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5464 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5466 ret = btrfs_update_root(trans, fs_info->tree_root,
5467 &root->root_key, &root->root_item);
5469 btrfs_end_transaction(trans);
5472 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5473 ret = btrfs_uuid_tree_add(trans, sa->uuid,
5474 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5475 root->root_key.objectid);
5476 if (ret < 0 && ret != -EEXIST) {
5477 btrfs_abort_transaction(trans, ret);
5478 btrfs_end_transaction(trans);
5482 ret = btrfs_commit_transaction(trans);
5484 up_write(&fs_info->subvol_sem);
5485 mnt_drop_write_file(file);
5490 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5493 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5494 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5497 args32 = memdup_user(arg, sizeof(*args32));
5499 return PTR_ERR(args32);
5501 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5507 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5508 args64->stransid = args32->stransid;
5509 args64->rtransid = args32->rtransid;
5510 args64->stime.sec = args32->stime.sec;
5511 args64->stime.nsec = args32->stime.nsec;
5512 args64->rtime.sec = args32->rtime.sec;
5513 args64->rtime.nsec = args32->rtime.nsec;
5514 args64->flags = args32->flags;
5516 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5520 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5521 args32->stransid = args64->stransid;
5522 args32->rtransid = args64->rtransid;
5523 args32->stime.sec = args64->stime.sec;
5524 args32->stime.nsec = args64->stime.nsec;
5525 args32->rtime.sec = args64->rtime.sec;
5526 args32->rtime.nsec = args64->rtime.nsec;
5527 args32->flags = args64->flags;
5529 ret = copy_to_user(arg, args32, sizeof(*args32));
5540 static long btrfs_ioctl_set_received_subvol(struct file *file,
5543 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5546 sa = memdup_user(arg, sizeof(*sa));
5550 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5555 ret = copy_to_user(arg, sa, sizeof(*sa));
5564 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5566 struct inode *inode = file_inode(file);
5567 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5570 char label[BTRFS_LABEL_SIZE];
5572 spin_lock(&fs_info->super_lock);
5573 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5574 spin_unlock(&fs_info->super_lock);
5576 len = strnlen(label, BTRFS_LABEL_SIZE);
5578 if (len == BTRFS_LABEL_SIZE) {
5580 "label is too long, return the first %zu bytes",
5584 ret = copy_to_user(arg, label, len);
5586 return ret ? -EFAULT : 0;
5589 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5591 struct inode *inode = file_inode(file);
5592 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5593 struct btrfs_root *root = BTRFS_I(inode)->root;
5594 struct btrfs_super_block *super_block = fs_info->super_copy;
5595 struct btrfs_trans_handle *trans;
5596 char label[BTRFS_LABEL_SIZE];
5599 if (!capable(CAP_SYS_ADMIN))
5602 if (copy_from_user(label, arg, sizeof(label)))
5605 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5607 "unable to set label with more than %d bytes",
5608 BTRFS_LABEL_SIZE - 1);
5612 ret = mnt_want_write_file(file);
5616 trans = btrfs_start_transaction(root, 0);
5617 if (IS_ERR(trans)) {
5618 ret = PTR_ERR(trans);
5622 spin_lock(&fs_info->super_lock);
5623 strcpy(super_block->label, label);
5624 spin_unlock(&fs_info->super_lock);
5625 ret = btrfs_commit_transaction(trans);
5628 mnt_drop_write_file(file);
5632 #define INIT_FEATURE_FLAGS(suffix) \
5633 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5634 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5635 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5637 int btrfs_ioctl_get_supported_features(void __user *arg)
5639 static const struct btrfs_ioctl_feature_flags features[3] = {
5640 INIT_FEATURE_FLAGS(SUPP),
5641 INIT_FEATURE_FLAGS(SAFE_SET),
5642 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5645 if (copy_to_user(arg, &features, sizeof(features)))
5651 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5653 struct inode *inode = file_inode(file);
5654 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5655 struct btrfs_super_block *super_block = fs_info->super_copy;
5656 struct btrfs_ioctl_feature_flags features;
5658 features.compat_flags = btrfs_super_compat_flags(super_block);
5659 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5660 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5662 if (copy_to_user(arg, &features, sizeof(features)))
5668 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5669 enum btrfs_feature_set set,
5670 u64 change_mask, u64 flags, u64 supported_flags,
5671 u64 safe_set, u64 safe_clear)
5673 const char *type = btrfs_feature_set_names[set];
5675 u64 disallowed, unsupported;
5676 u64 set_mask = flags & change_mask;
5677 u64 clear_mask = ~flags & change_mask;
5679 unsupported = set_mask & ~supported_flags;
5681 names = btrfs_printable_features(set, unsupported);
5684 "this kernel does not support the %s feature bit%s",
5685 names, strchr(names, ',') ? "s" : "");
5689 "this kernel does not support %s bits 0x%llx",
5694 disallowed = set_mask & ~safe_set;
5696 names = btrfs_printable_features(set, disallowed);
5699 "can't set the %s feature bit%s while mounted",
5700 names, strchr(names, ',') ? "s" : "");
5704 "can't set %s bits 0x%llx while mounted",
5709 disallowed = clear_mask & ~safe_clear;
5711 names = btrfs_printable_features(set, disallowed);
5714 "can't clear the %s feature bit%s while mounted",
5715 names, strchr(names, ',') ? "s" : "");
5719 "can't clear %s bits 0x%llx while mounted",
5727 #define check_feature(fs_info, change_mask, flags, mask_base) \
5728 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5729 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5730 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5731 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5733 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5735 struct inode *inode = file_inode(file);
5736 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5737 struct btrfs_root *root = BTRFS_I(inode)->root;
5738 struct btrfs_super_block *super_block = fs_info->super_copy;
5739 struct btrfs_ioctl_feature_flags flags[2];
5740 struct btrfs_trans_handle *trans;
5744 if (!capable(CAP_SYS_ADMIN))
5747 if (copy_from_user(flags, arg, sizeof(flags)))
5751 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5752 !flags[0].incompat_flags)
5755 ret = check_feature(fs_info, flags[0].compat_flags,
5756 flags[1].compat_flags, COMPAT);
5760 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5761 flags[1].compat_ro_flags, COMPAT_RO);
5765 ret = check_feature(fs_info, flags[0].incompat_flags,
5766 flags[1].incompat_flags, INCOMPAT);
5770 ret = mnt_want_write_file(file);
5774 trans = btrfs_start_transaction(root, 0);
5775 if (IS_ERR(trans)) {
5776 ret = PTR_ERR(trans);
5777 goto out_drop_write;
5780 spin_lock(&fs_info->super_lock);
5781 newflags = btrfs_super_compat_flags(super_block);
5782 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5783 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5784 btrfs_set_super_compat_flags(super_block, newflags);
5786 newflags = btrfs_super_compat_ro_flags(super_block);
5787 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5788 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5789 btrfs_set_super_compat_ro_flags(super_block, newflags);
5791 newflags = btrfs_super_incompat_flags(super_block);
5792 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5793 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5794 btrfs_set_super_incompat_flags(super_block, newflags);
5795 spin_unlock(&fs_info->super_lock);
5797 ret = btrfs_commit_transaction(trans);
5799 mnt_drop_write_file(file);
5804 static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
5806 struct btrfs_ioctl_send_args *arg;
5810 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5811 struct btrfs_ioctl_send_args_32 args32;
5813 ret = copy_from_user(&args32, argp, sizeof(args32));
5816 arg = kzalloc(sizeof(*arg), GFP_KERNEL);
5819 arg->send_fd = args32.send_fd;
5820 arg->clone_sources_count = args32.clone_sources_count;
5821 arg->clone_sources = compat_ptr(args32.clone_sources);
5822 arg->parent_root = args32.parent_root;
5823 arg->flags = args32.flags;
5824 memcpy(arg->reserved, args32.reserved,
5825 sizeof(args32.reserved));
5830 arg = memdup_user(argp, sizeof(*arg));
5832 return PTR_ERR(arg);
5834 ret = btrfs_ioctl_send(file, arg);
5839 long btrfs_ioctl(struct file *file, unsigned int
5840 cmd, unsigned long arg)
5842 struct inode *inode = file_inode(file);
5843 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5844 struct btrfs_root *root = BTRFS_I(inode)->root;
5845 void __user *argp = (void __user *)arg;
5848 case FS_IOC_GETFLAGS:
5849 return btrfs_ioctl_getflags(file, argp);
5850 case FS_IOC_SETFLAGS:
5851 return btrfs_ioctl_setflags(file, argp);
5852 case FS_IOC_GETVERSION:
5853 return btrfs_ioctl_getversion(file, argp);
5855 return btrfs_ioctl_fitrim(file, argp);
5856 case BTRFS_IOC_SNAP_CREATE:
5857 return btrfs_ioctl_snap_create(file, argp, 0);
5858 case BTRFS_IOC_SNAP_CREATE_V2:
5859 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5860 case BTRFS_IOC_SUBVOL_CREATE:
5861 return btrfs_ioctl_snap_create(file, argp, 1);
5862 case BTRFS_IOC_SUBVOL_CREATE_V2:
5863 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5864 case BTRFS_IOC_SNAP_DESTROY:
5865 return btrfs_ioctl_snap_destroy(file, argp);
5866 case BTRFS_IOC_SUBVOL_GETFLAGS:
5867 return btrfs_ioctl_subvol_getflags(file, argp);
5868 case BTRFS_IOC_SUBVOL_SETFLAGS:
5869 return btrfs_ioctl_subvol_setflags(file, argp);
5870 case BTRFS_IOC_DEFAULT_SUBVOL:
5871 return btrfs_ioctl_default_subvol(file, argp);
5872 case BTRFS_IOC_DEFRAG:
5873 return btrfs_ioctl_defrag(file, NULL);
5874 case BTRFS_IOC_DEFRAG_RANGE:
5875 return btrfs_ioctl_defrag(file, argp);
5876 case BTRFS_IOC_RESIZE:
5877 return btrfs_ioctl_resize(file, argp);
5878 case BTRFS_IOC_ADD_DEV:
5879 return btrfs_ioctl_add_dev(fs_info, argp);
5880 case BTRFS_IOC_RM_DEV:
5881 return btrfs_ioctl_rm_dev(file, argp);
5882 case BTRFS_IOC_RM_DEV_V2:
5883 return btrfs_ioctl_rm_dev_v2(file, argp);
5884 case BTRFS_IOC_FS_INFO:
5885 return btrfs_ioctl_fs_info(fs_info, argp);
5886 case BTRFS_IOC_DEV_INFO:
5887 return btrfs_ioctl_dev_info(fs_info, argp);
5888 case BTRFS_IOC_BALANCE:
5889 return btrfs_ioctl_balance(file, NULL);
5890 case BTRFS_IOC_TREE_SEARCH:
5891 return btrfs_ioctl_tree_search(file, argp);
5892 case BTRFS_IOC_TREE_SEARCH_V2:
5893 return btrfs_ioctl_tree_search_v2(file, argp);
5894 case BTRFS_IOC_INO_LOOKUP:
5895 return btrfs_ioctl_ino_lookup(file, argp);
5896 case BTRFS_IOC_INO_PATHS:
5897 return btrfs_ioctl_ino_to_path(root, argp);
5898 case BTRFS_IOC_LOGICAL_INO:
5899 return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5900 case BTRFS_IOC_LOGICAL_INO_V2:
5901 return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5902 case BTRFS_IOC_SPACE_INFO:
5903 return btrfs_ioctl_space_info(fs_info, argp);
5904 case BTRFS_IOC_SYNC: {
5907 ret = btrfs_start_delalloc_roots(fs_info, -1);
5910 ret = btrfs_sync_fs(inode->i_sb, 1);
5912 * The transaction thread may want to do more work,
5913 * namely it pokes the cleaner kthread that will start
5914 * processing uncleaned subvols.
5916 wake_up_process(fs_info->transaction_kthread);
5919 case BTRFS_IOC_START_SYNC:
5920 return btrfs_ioctl_start_sync(root, argp);
5921 case BTRFS_IOC_WAIT_SYNC:
5922 return btrfs_ioctl_wait_sync(fs_info, argp);
5923 case BTRFS_IOC_SCRUB:
5924 return btrfs_ioctl_scrub(file, argp);
5925 case BTRFS_IOC_SCRUB_CANCEL:
5926 return btrfs_ioctl_scrub_cancel(fs_info);
5927 case BTRFS_IOC_SCRUB_PROGRESS:
5928 return btrfs_ioctl_scrub_progress(fs_info, argp);
5929 case BTRFS_IOC_BALANCE_V2:
5930 return btrfs_ioctl_balance(file, argp);
5931 case BTRFS_IOC_BALANCE_CTL:
5932 return btrfs_ioctl_balance_ctl(fs_info, arg);
5933 case BTRFS_IOC_BALANCE_PROGRESS:
5934 return btrfs_ioctl_balance_progress(fs_info, argp);
5935 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5936 return btrfs_ioctl_set_received_subvol(file, argp);
5938 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5939 return btrfs_ioctl_set_received_subvol_32(file, argp);
5941 case BTRFS_IOC_SEND:
5942 return _btrfs_ioctl_send(file, argp, false);
5943 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5944 case BTRFS_IOC_SEND_32:
5945 return _btrfs_ioctl_send(file, argp, true);
5947 case BTRFS_IOC_GET_DEV_STATS:
5948 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5949 case BTRFS_IOC_QUOTA_CTL:
5950 return btrfs_ioctl_quota_ctl(file, argp);
5951 case BTRFS_IOC_QGROUP_ASSIGN:
5952 return btrfs_ioctl_qgroup_assign(file, argp);
5953 case BTRFS_IOC_QGROUP_CREATE:
5954 return btrfs_ioctl_qgroup_create(file, argp);
5955 case BTRFS_IOC_QGROUP_LIMIT:
5956 return btrfs_ioctl_qgroup_limit(file, argp);
5957 case BTRFS_IOC_QUOTA_RESCAN:
5958 return btrfs_ioctl_quota_rescan(file, argp);
5959 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5960 return btrfs_ioctl_quota_rescan_status(file, argp);
5961 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5962 return btrfs_ioctl_quota_rescan_wait(file, argp);
5963 case BTRFS_IOC_DEV_REPLACE:
5964 return btrfs_ioctl_dev_replace(fs_info, argp);
5965 case BTRFS_IOC_GET_FSLABEL:
5966 return btrfs_ioctl_get_fslabel(file, argp);
5967 case BTRFS_IOC_SET_FSLABEL:
5968 return btrfs_ioctl_set_fslabel(file, argp);
5969 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5970 return btrfs_ioctl_get_supported_features(argp);
5971 case BTRFS_IOC_GET_FEATURES:
5972 return btrfs_ioctl_get_features(file, argp);
5973 case BTRFS_IOC_SET_FEATURES:
5974 return btrfs_ioctl_set_features(file, argp);
5975 case FS_IOC_FSGETXATTR:
5976 return btrfs_ioctl_fsgetxattr(file, argp);
5977 case FS_IOC_FSSETXATTR:
5978 return btrfs_ioctl_fssetxattr(file, argp);
5979 case BTRFS_IOC_GET_SUBVOL_INFO:
5980 return btrfs_ioctl_get_subvol_info(file, argp);
5981 case BTRFS_IOC_GET_SUBVOL_ROOTREF:
5982 return btrfs_ioctl_get_subvol_rootref(file, argp);
5983 case BTRFS_IOC_INO_LOOKUP_USER:
5984 return btrfs_ioctl_ino_lookup_user(file, argp);
5990 #ifdef CONFIG_COMPAT
5991 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5994 * These all access 32-bit values anyway so no further
5995 * handling is necessary.
5998 case FS_IOC32_GETFLAGS:
5999 cmd = FS_IOC_GETFLAGS;
6001 case FS_IOC32_SETFLAGS:
6002 cmd = FS_IOC_SETFLAGS;
6004 case FS_IOC32_GETVERSION:
6005 cmd = FS_IOC_GETVERSION;
6009 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));