1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 #include <linux/nospec.h>
40 /* Maximum number of mounts in a mount namespace */
41 static unsigned int sysctl_mount_max __read_mostly = 100000;
43 static unsigned int m_hash_mask __ro_after_init;
44 static unsigned int m_hash_shift __ro_after_init;
45 static unsigned int mp_hash_mask __ro_after_init;
46 static unsigned int mp_hash_shift __ro_after_init;
48 static __initdata unsigned long mhash_entries;
49 static int __init set_mhash_entries(char *str)
53 mhash_entries = simple_strtoul(str, &str, 0);
56 __setup("mhash_entries=", set_mhash_entries);
58 static __initdata unsigned long mphash_entries;
59 static int __init set_mphash_entries(char *str)
63 mphash_entries = simple_strtoul(str, &str, 0);
66 __setup("mphash_entries=", set_mphash_entries);
69 static DEFINE_IDA(mnt_id_ida);
70 static DEFINE_IDA(mnt_group_ida);
72 /* Don't allow confusion with old 32bit mount ID */
73 static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
75 static struct hlist_head *mount_hashtable __ro_after_init;
76 static struct hlist_head *mountpoint_hashtable __ro_after_init;
77 static struct kmem_cache *mnt_cache __ro_after_init;
78 static DECLARE_RWSEM(namespace_sem);
79 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
80 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
83 unsigned int attr_set;
84 unsigned int attr_clr;
85 unsigned int propagation;
86 unsigned int lookup_flags;
88 struct user_namespace *mnt_userns;
89 struct mnt_idmap *mnt_idmap;
93 struct kobject *fs_kobj __ro_after_init;
94 EXPORT_SYMBOL_GPL(fs_kobj);
97 * vfsmount lock may be taken for read to prevent changes to the
98 * vfsmount hash, ie. during mountpoint lookups or walking back
101 * It should be taken for write in all cases where the vfsmount
102 * tree or hash is modified or when a vfsmount structure is modified.
104 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
106 static inline void lock_mount_hash(void)
108 write_seqlock(&mount_lock);
111 static inline void unlock_mount_hash(void)
113 write_sequnlock(&mount_lock);
116 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
118 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
119 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
120 tmp = tmp + (tmp >> m_hash_shift);
121 return &mount_hashtable[tmp & m_hash_mask];
124 static inline struct hlist_head *mp_hash(struct dentry *dentry)
126 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
127 tmp = tmp + (tmp >> mp_hash_shift);
128 return &mountpoint_hashtable[tmp & mp_hash_mask];
131 static int mnt_alloc_id(struct mount *mnt)
133 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
138 mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
142 static void mnt_free_id(struct mount *mnt)
144 ida_free(&mnt_id_ida, mnt->mnt_id);
148 * Allocate a new peer group ID
150 static int mnt_alloc_group_id(struct mount *mnt)
152 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
156 mnt->mnt_group_id = res;
161 * Release a peer group ID
163 void mnt_release_group_id(struct mount *mnt)
165 ida_free(&mnt_group_ida, mnt->mnt_group_id);
166 mnt->mnt_group_id = 0;
170 * vfsmount lock must be held for read
172 static inline void mnt_add_count(struct mount *mnt, int n)
175 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
184 * vfsmount lock must be held for write
186 int mnt_get_count(struct mount *mnt)
192 for_each_possible_cpu(cpu) {
193 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
198 return mnt->mnt_count;
202 static struct mount *alloc_vfsmnt(const char *name)
204 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
208 err = mnt_alloc_id(mnt);
213 mnt->mnt_devname = kstrdup_const(name,
215 if (!mnt->mnt_devname)
220 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
222 goto out_free_devname;
224 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
227 mnt->mnt_writers = 0;
230 INIT_HLIST_NODE(&mnt->mnt_hash);
231 INIT_LIST_HEAD(&mnt->mnt_child);
232 INIT_LIST_HEAD(&mnt->mnt_mounts);
233 INIT_LIST_HEAD(&mnt->mnt_list);
234 INIT_LIST_HEAD(&mnt->mnt_expire);
235 INIT_LIST_HEAD(&mnt->mnt_share);
236 INIT_LIST_HEAD(&mnt->mnt_slave_list);
237 INIT_LIST_HEAD(&mnt->mnt_slave);
238 INIT_HLIST_NODE(&mnt->mnt_mp_list);
239 INIT_LIST_HEAD(&mnt->mnt_umounting);
240 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
241 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
247 kfree_const(mnt->mnt_devname);
252 kmem_cache_free(mnt_cache, mnt);
257 * Most r/o checks on a fs are for operations that take
258 * discrete amounts of time, like a write() or unlink().
259 * We must keep track of when those operations start
260 * (for permission checks) and when they end, so that
261 * we can determine when writes are able to occur to
265 * __mnt_is_readonly: check whether a mount is read-only
266 * @mnt: the mount to check for its write status
268 * This shouldn't be used directly ouside of the VFS.
269 * It does not guarantee that the filesystem will stay
270 * r/w, just that it is right *now*. This can not and
271 * should not be used in place of IS_RDONLY(inode).
272 * mnt_want/drop_write() will _keep_ the filesystem
275 bool __mnt_is_readonly(struct vfsmount *mnt)
277 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
279 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
281 static inline void mnt_inc_writers(struct mount *mnt)
284 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
290 static inline void mnt_dec_writers(struct mount *mnt)
293 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
299 static unsigned int mnt_get_writers(struct mount *mnt)
302 unsigned int count = 0;
305 for_each_possible_cpu(cpu) {
306 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
311 return mnt->mnt_writers;
315 static int mnt_is_readonly(struct vfsmount *mnt)
317 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
320 * The barrier pairs with the barrier in sb_start_ro_state_change()
321 * making sure if we don't see s_readonly_remount set yet, we also will
322 * not see any superblock / mount flag changes done by remount.
323 * It also pairs with the barrier in sb_end_ro_state_change()
324 * assuring that if we see s_readonly_remount already cleared, we will
325 * see the values of superblock / mount flags updated by remount.
328 return __mnt_is_readonly(mnt);
332 * Most r/o & frozen checks on a fs are for operations that take discrete
333 * amounts of time, like a write() or unlink(). We must keep track of when
334 * those operations start (for permission checks) and when they end, so that we
335 * can determine when writes are able to occur to a filesystem.
338 * mnt_get_write_access - get write access to a mount without freeze protection
339 * @m: the mount on which to take a write
341 * This tells the low-level filesystem that a write is about to be performed to
342 * it, and makes sure that writes are allowed (mnt it read-write) before
343 * returning success. This operation does not protect against filesystem being
344 * frozen. When the write operation is finished, mnt_put_write_access() must be
345 * called. This is effectively a refcount.
347 int mnt_get_write_access(struct vfsmount *m)
349 struct mount *mnt = real_mount(m);
353 mnt_inc_writers(mnt);
355 * The store to mnt_inc_writers must be visible before we pass
356 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
357 * incremented count after it has set MNT_WRITE_HOLD.
360 might_lock(&mount_lock.lock);
361 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
362 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
366 * This prevents priority inversion, if the task
367 * setting MNT_WRITE_HOLD got preempted on a remote
368 * CPU, and it prevents life lock if the task setting
369 * MNT_WRITE_HOLD has a lower priority and is bound to
370 * the same CPU as the task that is spinning here.
379 * The barrier pairs with the barrier sb_start_ro_state_change() making
380 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
381 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
382 * mnt_is_readonly() and bail in case we are racing with remount
386 if (mnt_is_readonly(m)) {
387 mnt_dec_writers(mnt);
394 EXPORT_SYMBOL_GPL(mnt_get_write_access);
397 * mnt_want_write - get write access to a mount
398 * @m: the mount on which to take a write
400 * This tells the low-level filesystem that a write is about to be performed to
401 * it, and makes sure that writes are allowed (mount is read-write, filesystem
402 * is not frozen) before returning success. When the write operation is
403 * finished, mnt_drop_write() must be called. This is effectively a refcount.
405 int mnt_want_write(struct vfsmount *m)
409 sb_start_write(m->mnt_sb);
410 ret = mnt_get_write_access(m);
412 sb_end_write(m->mnt_sb);
415 EXPORT_SYMBOL_GPL(mnt_want_write);
418 * mnt_get_write_access_file - get write access to a file's mount
419 * @file: the file who's mount on which to take a write
421 * This is like mnt_get_write_access, but if @file is already open for write it
422 * skips incrementing mnt_writers (since the open file already has a reference)
423 * and instead only does the check for emergency r/o remounts. This must be
424 * paired with mnt_put_write_access_file.
426 int mnt_get_write_access_file(struct file *file)
428 if (file->f_mode & FMODE_WRITER) {
430 * Superblock may have become readonly while there are still
431 * writable fd's, e.g. due to a fs error with errors=remount-ro
433 if (__mnt_is_readonly(file->f_path.mnt))
437 return mnt_get_write_access(file->f_path.mnt);
441 * mnt_want_write_file - get write access to a file's mount
442 * @file: the file who's mount on which to take a write
444 * This is like mnt_want_write, but if the file is already open for writing it
445 * skips incrementing mnt_writers (since the open file already has a reference)
446 * and instead only does the freeze protection and the check for emergency r/o
447 * remounts. This must be paired with mnt_drop_write_file.
449 int mnt_want_write_file(struct file *file)
453 sb_start_write(file_inode(file)->i_sb);
454 ret = mnt_get_write_access_file(file);
456 sb_end_write(file_inode(file)->i_sb);
459 EXPORT_SYMBOL_GPL(mnt_want_write_file);
462 * mnt_put_write_access - give up write access to a mount
463 * @mnt: the mount on which to give up write access
465 * Tells the low-level filesystem that we are done
466 * performing writes to it. Must be matched with
467 * mnt_get_write_access() call above.
469 void mnt_put_write_access(struct vfsmount *mnt)
472 mnt_dec_writers(real_mount(mnt));
475 EXPORT_SYMBOL_GPL(mnt_put_write_access);
478 * mnt_drop_write - give up write access to a mount
479 * @mnt: the mount on which to give up write access
481 * Tells the low-level filesystem that we are done performing writes to it and
482 * also allows filesystem to be frozen again. Must be matched with
483 * mnt_want_write() call above.
485 void mnt_drop_write(struct vfsmount *mnt)
487 mnt_put_write_access(mnt);
488 sb_end_write(mnt->mnt_sb);
490 EXPORT_SYMBOL_GPL(mnt_drop_write);
492 void mnt_put_write_access_file(struct file *file)
494 if (!(file->f_mode & FMODE_WRITER))
495 mnt_put_write_access(file->f_path.mnt);
498 void mnt_drop_write_file(struct file *file)
500 mnt_put_write_access_file(file);
501 sb_end_write(file_inode(file)->i_sb);
503 EXPORT_SYMBOL(mnt_drop_write_file);
506 * mnt_hold_writers - prevent write access to the given mount
507 * @mnt: mnt to prevent write access to
509 * Prevents write access to @mnt if there are no active writers for @mnt.
510 * This function needs to be called and return successfully before changing
511 * properties of @mnt that need to remain stable for callers with write access
514 * After this functions has been called successfully callers must pair it with
515 * a call to mnt_unhold_writers() in order to stop preventing write access to
518 * Context: This function expects lock_mount_hash() to be held serializing
519 * setting MNT_WRITE_HOLD.
520 * Return: On success 0 is returned.
521 * On error, -EBUSY is returned.
523 static inline int mnt_hold_writers(struct mount *mnt)
525 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
527 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
528 * should be visible before we do.
533 * With writers on hold, if this value is zero, then there are
534 * definitely no active writers (although held writers may subsequently
535 * increment the count, they'll have to wait, and decrement it after
536 * seeing MNT_READONLY).
538 * It is OK to have counter incremented on one CPU and decremented on
539 * another: the sum will add up correctly. The danger would be when we
540 * sum up each counter, if we read a counter before it is incremented,
541 * but then read another CPU's count which it has been subsequently
542 * decremented from -- we would see more decrements than we should.
543 * MNT_WRITE_HOLD protects against this scenario, because
544 * mnt_want_write first increments count, then smp_mb, then spins on
545 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
546 * we're counting up here.
548 if (mnt_get_writers(mnt) > 0)
555 * mnt_unhold_writers - stop preventing write access to the given mount
556 * @mnt: mnt to stop preventing write access to
558 * Stop preventing write access to @mnt allowing callers to gain write access
561 * This function can only be called after a successful call to
562 * mnt_hold_writers().
564 * Context: This function expects lock_mount_hash() to be held.
566 static inline void mnt_unhold_writers(struct mount *mnt)
569 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
570 * that become unheld will see MNT_READONLY.
573 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
576 static int mnt_make_readonly(struct mount *mnt)
580 ret = mnt_hold_writers(mnt);
582 mnt->mnt.mnt_flags |= MNT_READONLY;
583 mnt_unhold_writers(mnt);
587 int sb_prepare_remount_readonly(struct super_block *sb)
592 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
593 if (atomic_long_read(&sb->s_remove_count))
597 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
598 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
599 err = mnt_hold_writers(mnt);
604 if (!err && atomic_long_read(&sb->s_remove_count))
608 sb_start_ro_state_change(sb);
609 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
610 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
611 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
618 static void free_vfsmnt(struct mount *mnt)
620 mnt_idmap_put(mnt_idmap(&mnt->mnt));
621 kfree_const(mnt->mnt_devname);
623 free_percpu(mnt->mnt_pcp);
625 kmem_cache_free(mnt_cache, mnt);
628 static void delayed_free_vfsmnt(struct rcu_head *head)
630 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
633 /* call under rcu_read_lock */
634 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
637 if (read_seqretry(&mount_lock, seq))
641 mnt = real_mount(bastard);
642 mnt_add_count(mnt, 1);
643 smp_mb(); // see mntput_no_expire()
644 if (likely(!read_seqretry(&mount_lock, seq)))
646 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
647 mnt_add_count(mnt, -1);
651 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
652 mnt_add_count(mnt, -1);
657 /* caller will mntput() */
661 /* call under rcu_read_lock */
662 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
664 int res = __legitimize_mnt(bastard, seq);
667 if (unlikely(res < 0)) {
676 * __lookup_mnt - find first child mount
678 * @dentry: mountpoint
680 * If @mnt has a child mount @c mounted @dentry find and return it.
682 * Note that the child mount @c need not be unique. There are cases
683 * where shadow mounts are created. For example, during mount
684 * propagation when a source mount @mnt whose root got overmounted by a
685 * mount @o after path lookup but before @namespace_sem could be
686 * acquired gets copied and propagated. So @mnt gets copied including
687 * @o. When @mnt is propagated to a destination mount @d that already
688 * has another mount @n mounted at the same mountpoint then the source
689 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
690 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
693 * Return: The first child of @mnt mounted @dentry or NULL.
695 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
697 struct hlist_head *head = m_hash(mnt, dentry);
700 hlist_for_each_entry_rcu(p, head, mnt_hash)
701 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
707 * lookup_mnt - Return the first child mount mounted at path
709 * "First" means first mounted chronologically. If you create the
712 * mount /dev/sda1 /mnt
713 * mount /dev/sda2 /mnt
714 * mount /dev/sda3 /mnt
716 * Then lookup_mnt() on the base /mnt dentry in the root mount will
717 * return successively the root dentry and vfsmount of /dev/sda1, then
718 * /dev/sda2, then /dev/sda3, then NULL.
720 * lookup_mnt takes a reference to the found vfsmount.
722 struct vfsmount *lookup_mnt(const struct path *path)
724 struct mount *child_mnt;
730 seq = read_seqbegin(&mount_lock);
731 child_mnt = __lookup_mnt(path->mnt, path->dentry);
732 m = child_mnt ? &child_mnt->mnt : NULL;
733 } while (!legitimize_mnt(m, seq));
739 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
740 * current mount namespace.
742 * The common case is dentries are not mountpoints at all and that
743 * test is handled inline. For the slow case when we are actually
744 * dealing with a mountpoint of some kind, walk through all of the
745 * mounts in the current mount namespace and test to see if the dentry
748 * The mount_hashtable is not usable in the context because we
749 * need to identify all mounts that may be in the current mount
750 * namespace not just a mount that happens to have some specified
753 bool __is_local_mountpoint(struct dentry *dentry)
755 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
756 struct mount *mnt, *n;
757 bool is_covered = false;
759 down_read(&namespace_sem);
760 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
761 is_covered = (mnt->mnt_mountpoint == dentry);
765 up_read(&namespace_sem);
770 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
772 struct hlist_head *chain = mp_hash(dentry);
773 struct mountpoint *mp;
775 hlist_for_each_entry(mp, chain, m_hash) {
776 if (mp->m_dentry == dentry) {
784 static struct mountpoint *get_mountpoint(struct dentry *dentry)
786 struct mountpoint *mp, *new = NULL;
789 if (d_mountpoint(dentry)) {
790 /* might be worth a WARN_ON() */
791 if (d_unlinked(dentry))
792 return ERR_PTR(-ENOENT);
794 read_seqlock_excl(&mount_lock);
795 mp = lookup_mountpoint(dentry);
796 read_sequnlock_excl(&mount_lock);
802 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
804 return ERR_PTR(-ENOMEM);
807 /* Exactly one processes may set d_mounted */
808 ret = d_set_mounted(dentry);
810 /* Someone else set d_mounted? */
814 /* The dentry is not available as a mountpoint? */
819 /* Add the new mountpoint to the hash table */
820 read_seqlock_excl(&mount_lock);
821 new->m_dentry = dget(dentry);
823 hlist_add_head(&new->m_hash, mp_hash(dentry));
824 INIT_HLIST_HEAD(&new->m_list);
825 read_sequnlock_excl(&mount_lock);
835 * vfsmount lock must be held. Additionally, the caller is responsible
836 * for serializing calls for given disposal list.
838 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
840 if (!--mp->m_count) {
841 struct dentry *dentry = mp->m_dentry;
842 BUG_ON(!hlist_empty(&mp->m_list));
843 spin_lock(&dentry->d_lock);
844 dentry->d_flags &= ~DCACHE_MOUNTED;
845 spin_unlock(&dentry->d_lock);
846 dput_to_list(dentry, list);
847 hlist_del(&mp->m_hash);
852 /* called with namespace_lock and vfsmount lock */
853 static void put_mountpoint(struct mountpoint *mp)
855 __put_mountpoint(mp, &ex_mountpoints);
858 static inline int check_mnt(struct mount *mnt)
860 return mnt->mnt_ns == current->nsproxy->mnt_ns;
864 * vfsmount lock must be held for write
866 static void touch_mnt_namespace(struct mnt_namespace *ns)
870 wake_up_interruptible(&ns->poll);
875 * vfsmount lock must be held for write
877 static void __touch_mnt_namespace(struct mnt_namespace *ns)
879 if (ns && ns->event != event) {
881 wake_up_interruptible(&ns->poll);
886 * vfsmount lock must be held for write
888 static struct mountpoint *unhash_mnt(struct mount *mnt)
890 struct mountpoint *mp;
891 mnt->mnt_parent = mnt;
892 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
893 list_del_init(&mnt->mnt_child);
894 hlist_del_init_rcu(&mnt->mnt_hash);
895 hlist_del_init(&mnt->mnt_mp_list);
902 * vfsmount lock must be held for write
904 static void umount_mnt(struct mount *mnt)
906 put_mountpoint(unhash_mnt(mnt));
910 * vfsmount lock must be held for write
912 void mnt_set_mountpoint(struct mount *mnt,
913 struct mountpoint *mp,
914 struct mount *child_mnt)
917 mnt_add_count(mnt, 1); /* essentially, that's mntget */
918 child_mnt->mnt_mountpoint = mp->m_dentry;
919 child_mnt->mnt_parent = mnt;
920 child_mnt->mnt_mp = mp;
921 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
925 * mnt_set_mountpoint_beneath - mount a mount beneath another one
927 * @new_parent: the source mount
928 * @top_mnt: the mount beneath which @new_parent is mounted
929 * @new_mp: the new mountpoint of @top_mnt on @new_parent
931 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
932 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
933 * @new_mp. And mount @new_parent on the old parent and old
934 * mountpoint of @top_mnt.
936 * Context: This function expects namespace_lock() and lock_mount_hash()
937 * to have been acquired in that order.
939 static void mnt_set_mountpoint_beneath(struct mount *new_parent,
940 struct mount *top_mnt,
941 struct mountpoint *new_mp)
943 struct mount *old_top_parent = top_mnt->mnt_parent;
944 struct mountpoint *old_top_mp = top_mnt->mnt_mp;
946 mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent);
947 mnt_change_mountpoint(new_parent, new_mp, top_mnt);
951 static void __attach_mnt(struct mount *mnt, struct mount *parent)
953 hlist_add_head_rcu(&mnt->mnt_hash,
954 m_hash(&parent->mnt, mnt->mnt_mountpoint));
955 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
959 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
960 * list of child mounts
961 * @parent: the parent
962 * @mnt: the new mount
963 * @mp: the new mountpoint
964 * @beneath: whether to mount @mnt beneath or on top of @parent
966 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
967 * to @parent's child mount list and to @mount_hashtable.
969 * If @beneath is true, remove @mnt from its current parent and
970 * mountpoint and mount it on @mp on @parent, and mount @parent on the
971 * old parent and old mountpoint of @mnt. Finally, attach @parent to
972 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
974 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
975 * to the correct parent.
977 * Context: This function expects namespace_lock() and lock_mount_hash()
978 * to have been acquired in that order.
980 static void attach_mnt(struct mount *mnt, struct mount *parent,
981 struct mountpoint *mp, bool beneath)
984 mnt_set_mountpoint_beneath(mnt, parent, mp);
986 mnt_set_mountpoint(parent, mp, mnt);
988 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
989 * beneath @parent then @mnt will need to be attached to
990 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
991 * isn't the same mount as @parent.
993 __attach_mnt(mnt, mnt->mnt_parent);
996 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
998 struct mountpoint *old_mp = mnt->mnt_mp;
999 struct mount *old_parent = mnt->mnt_parent;
1001 list_del_init(&mnt->mnt_child);
1002 hlist_del_init(&mnt->mnt_mp_list);
1003 hlist_del_init_rcu(&mnt->mnt_hash);
1005 attach_mnt(mnt, parent, mp, false);
1007 put_mountpoint(old_mp);
1008 mnt_add_count(old_parent, -1);
1011 static inline struct mount *node_to_mount(struct rb_node *node)
1013 return node ? rb_entry(node, struct mount, mnt_node) : NULL;
1016 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
1018 struct rb_node **link = &ns->mounts.rb_node;
1019 struct rb_node *parent = NULL;
1021 WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
1025 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique)
1026 link = &parent->rb_left;
1028 link = &parent->rb_right;
1030 rb_link_node(&mnt->mnt_node, parent, link);
1031 rb_insert_color(&mnt->mnt_node, &ns->mounts);
1032 mnt->mnt.mnt_flags |= MNT_ONRB;
1036 * vfsmount lock must be held for write
1038 static void commit_tree(struct mount *mnt)
1040 struct mount *parent = mnt->mnt_parent;
1043 struct mnt_namespace *n = parent->mnt_ns;
1045 BUG_ON(parent == mnt);
1047 list_add_tail(&head, &mnt->mnt_list);
1048 while (!list_empty(&head)) {
1049 m = list_first_entry(&head, typeof(*m), mnt_list);
1050 list_del(&m->mnt_list);
1052 mnt_add_to_ns(n, m);
1054 n->nr_mounts += n->pending_mounts;
1055 n->pending_mounts = 0;
1057 __attach_mnt(mnt, parent);
1058 touch_mnt_namespace(n);
1061 static struct mount *next_mnt(struct mount *p, struct mount *root)
1063 struct list_head *next = p->mnt_mounts.next;
1064 if (next == &p->mnt_mounts) {
1068 next = p->mnt_child.next;
1069 if (next != &p->mnt_parent->mnt_mounts)
1074 return list_entry(next, struct mount, mnt_child);
1077 static struct mount *skip_mnt_tree(struct mount *p)
1079 struct list_head *prev = p->mnt_mounts.prev;
1080 while (prev != &p->mnt_mounts) {
1081 p = list_entry(prev, struct mount, mnt_child);
1082 prev = p->mnt_mounts.prev;
1088 * vfs_create_mount - Create a mount for a configured superblock
1089 * @fc: The configuration context with the superblock attached
1091 * Create a mount to an already configured superblock. If necessary, the
1092 * caller should invoke vfs_get_tree() before calling this.
1094 * Note that this does not attach the mount to anything.
1096 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1101 return ERR_PTR(-EINVAL);
1103 mnt = alloc_vfsmnt(fc->source ?: "none");
1105 return ERR_PTR(-ENOMEM);
1107 if (fc->sb_flags & SB_KERNMOUNT)
1108 mnt->mnt.mnt_flags = MNT_INTERNAL;
1110 atomic_inc(&fc->root->d_sb->s_active);
1111 mnt->mnt.mnt_sb = fc->root->d_sb;
1112 mnt->mnt.mnt_root = dget(fc->root);
1113 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1114 mnt->mnt_parent = mnt;
1117 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1118 unlock_mount_hash();
1121 EXPORT_SYMBOL(vfs_create_mount);
1123 struct vfsmount *fc_mount(struct fs_context *fc)
1125 int err = vfs_get_tree(fc);
1127 up_write(&fc->root->d_sb->s_umount);
1128 return vfs_create_mount(fc);
1130 return ERR_PTR(err);
1132 EXPORT_SYMBOL(fc_mount);
1134 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1135 int flags, const char *name,
1138 struct fs_context *fc;
1139 struct vfsmount *mnt;
1143 return ERR_PTR(-EINVAL);
1145 fc = fs_context_for_mount(type, flags);
1147 return ERR_CAST(fc);
1150 ret = vfs_parse_fs_string(fc, "source",
1151 name, strlen(name));
1153 ret = parse_monolithic_mount_data(fc, data);
1162 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1165 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1166 const char *name, void *data)
1168 /* Until it is worked out how to pass the user namespace
1169 * through from the parent mount to the submount don't support
1170 * unprivileged mounts with submounts.
1172 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1173 return ERR_PTR(-EPERM);
1175 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1177 EXPORT_SYMBOL_GPL(vfs_submount);
1179 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1182 struct super_block *sb = old->mnt.mnt_sb;
1186 mnt = alloc_vfsmnt(old->mnt_devname);
1188 return ERR_PTR(-ENOMEM);
1190 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1191 mnt->mnt_group_id = 0; /* not a peer of original */
1193 mnt->mnt_group_id = old->mnt_group_id;
1195 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1196 err = mnt_alloc_group_id(mnt);
1201 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1202 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
1204 atomic_inc(&sb->s_active);
1205 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1207 mnt->mnt.mnt_sb = sb;
1208 mnt->mnt.mnt_root = dget(root);
1209 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1210 mnt->mnt_parent = mnt;
1212 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1213 unlock_mount_hash();
1215 if ((flag & CL_SLAVE) ||
1216 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1217 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1218 mnt->mnt_master = old;
1219 CLEAR_MNT_SHARED(mnt);
1220 } else if (!(flag & CL_PRIVATE)) {
1221 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1222 list_add(&mnt->mnt_share, &old->mnt_share);
1223 if (IS_MNT_SLAVE(old))
1224 list_add(&mnt->mnt_slave, &old->mnt_slave);
1225 mnt->mnt_master = old->mnt_master;
1227 CLEAR_MNT_SHARED(mnt);
1229 if (flag & CL_MAKE_SHARED)
1230 set_mnt_shared(mnt);
1232 /* stick the duplicate mount on the same expiry list
1233 * as the original if that was on one */
1234 if (flag & CL_EXPIRE) {
1235 if (!list_empty(&old->mnt_expire))
1236 list_add(&mnt->mnt_expire, &old->mnt_expire);
1244 return ERR_PTR(err);
1247 static void cleanup_mnt(struct mount *mnt)
1249 struct hlist_node *p;
1252 * The warning here probably indicates that somebody messed
1253 * up a mnt_want/drop_write() pair. If this happens, the
1254 * filesystem was probably unable to make r/w->r/o transitions.
1255 * The locking used to deal with mnt_count decrement provides barriers,
1256 * so mnt_get_writers() below is safe.
1258 WARN_ON(mnt_get_writers(mnt));
1259 if (unlikely(mnt->mnt_pins.first))
1261 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1262 hlist_del(&m->mnt_umount);
1265 fsnotify_vfsmount_delete(&mnt->mnt);
1266 dput(mnt->mnt.mnt_root);
1267 deactivate_super(mnt->mnt.mnt_sb);
1269 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1272 static void __cleanup_mnt(struct rcu_head *head)
1274 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1277 static LLIST_HEAD(delayed_mntput_list);
1278 static void delayed_mntput(struct work_struct *unused)
1280 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1281 struct mount *m, *t;
1283 llist_for_each_entry_safe(m, t, node, mnt_llist)
1286 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1288 static void mntput_no_expire(struct mount *mnt)
1294 if (likely(READ_ONCE(mnt->mnt_ns))) {
1296 * Since we don't do lock_mount_hash() here,
1297 * ->mnt_ns can change under us. However, if it's
1298 * non-NULL, then there's a reference that won't
1299 * be dropped until after an RCU delay done after
1300 * turning ->mnt_ns NULL. So if we observe it
1301 * non-NULL under rcu_read_lock(), the reference
1302 * we are dropping is not the final one.
1304 mnt_add_count(mnt, -1);
1310 * make sure that if __legitimize_mnt() has not seen us grab
1311 * mount_lock, we'll see their refcount increment here.
1314 mnt_add_count(mnt, -1);
1315 count = mnt_get_count(mnt);
1319 unlock_mount_hash();
1322 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1324 unlock_mount_hash();
1327 mnt->mnt.mnt_flags |= MNT_DOOMED;
1330 list_del(&mnt->mnt_instance);
1332 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1333 struct mount *p, *tmp;
1334 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1335 __put_mountpoint(unhash_mnt(p), &list);
1336 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1339 unlock_mount_hash();
1340 shrink_dentry_list(&list);
1342 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1343 struct task_struct *task = current;
1344 if (likely(!(task->flags & PF_KTHREAD))) {
1345 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1346 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1349 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1350 schedule_delayed_work(&delayed_mntput_work, 1);
1356 void mntput(struct vfsmount *mnt)
1359 struct mount *m = real_mount(mnt);
1360 /* avoid cacheline pingpong */
1361 if (unlikely(m->mnt_expiry_mark))
1362 WRITE_ONCE(m->mnt_expiry_mark, 0);
1363 mntput_no_expire(m);
1366 EXPORT_SYMBOL(mntput);
1368 struct vfsmount *mntget(struct vfsmount *mnt)
1371 mnt_add_count(real_mount(mnt), 1);
1374 EXPORT_SYMBOL(mntget);
1377 * Make a mount point inaccessible to new lookups.
1378 * Because there may still be current users, the caller MUST WAIT
1379 * for an RCU grace period before destroying the mount point.
1381 void mnt_make_shortterm(struct vfsmount *mnt)
1384 real_mount(mnt)->mnt_ns = NULL;
1388 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1389 * @path: path to check
1391 * d_mountpoint() can only be used reliably to establish if a dentry is
1392 * not mounted in any namespace and that common case is handled inline.
1393 * d_mountpoint() isn't aware of the possibility there may be multiple
1394 * mounts using a given dentry in a different namespace. This function
1395 * checks if the passed in path is a mountpoint rather than the dentry
1398 bool path_is_mountpoint(const struct path *path)
1403 if (!d_mountpoint(path->dentry))
1408 seq = read_seqbegin(&mount_lock);
1409 res = __path_is_mountpoint(path);
1410 } while (read_seqretry(&mount_lock, seq));
1415 EXPORT_SYMBOL(path_is_mountpoint);
1417 struct vfsmount *mnt_clone_internal(const struct path *path)
1420 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1423 p->mnt.mnt_flags |= MNT_INTERNAL;
1428 * Returns the mount which either has the specified mnt_id, or has the next
1429 * smallest id afer the specified one.
1431 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
1433 struct rb_node *node = ns->mounts.rb_node;
1434 struct mount *ret = NULL;
1437 struct mount *m = node_to_mount(node);
1439 if (mnt_id <= m->mnt_id_unique) {
1440 ret = node_to_mount(node);
1441 if (mnt_id == m->mnt_id_unique)
1443 node = node->rb_left;
1445 node = node->rb_right;
1451 #ifdef CONFIG_PROC_FS
1453 /* iterator; we want it to have access to namespace_sem, thus here... */
1454 static void *m_start(struct seq_file *m, loff_t *pos)
1456 struct proc_mounts *p = m->private;
1458 down_read(&namespace_sem);
1460 return mnt_find_id_at(p->ns, *pos);
1463 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1465 struct mount *next = NULL, *mnt = v;
1466 struct rb_node *node = rb_next(&mnt->mnt_node);
1470 next = node_to_mount(node);
1471 *pos = next->mnt_id_unique;
1476 static void m_stop(struct seq_file *m, void *v)
1478 up_read(&namespace_sem);
1481 static int m_show(struct seq_file *m, void *v)
1483 struct proc_mounts *p = m->private;
1484 struct mount *r = v;
1485 return p->show(m, &r->mnt);
1488 const struct seq_operations mounts_op = {
1495 #endif /* CONFIG_PROC_FS */
1498 * may_umount_tree - check if a mount tree is busy
1499 * @m: root of mount tree
1501 * This is called to check if a tree of mounts has any
1502 * open files, pwds, chroots or sub mounts that are
1505 int may_umount_tree(struct vfsmount *m)
1507 struct mount *mnt = real_mount(m);
1508 int actual_refs = 0;
1509 int minimum_refs = 0;
1513 /* write lock needed for mnt_get_count */
1515 for (p = mnt; p; p = next_mnt(p, mnt)) {
1516 actual_refs += mnt_get_count(p);
1519 unlock_mount_hash();
1521 if (actual_refs > minimum_refs)
1527 EXPORT_SYMBOL(may_umount_tree);
1530 * may_umount - check if a mount point is busy
1531 * @mnt: root of mount
1533 * This is called to check if a mount point has any
1534 * open files, pwds, chroots or sub mounts. If the
1535 * mount has sub mounts this will return busy
1536 * regardless of whether the sub mounts are busy.
1538 * Doesn't take quota and stuff into account. IOW, in some cases it will
1539 * give false negatives. The main reason why it's here is that we need
1540 * a non-destructive way to look for easily umountable filesystems.
1542 int may_umount(struct vfsmount *mnt)
1545 down_read(&namespace_sem);
1547 if (propagate_mount_busy(real_mount(mnt), 2))
1549 unlock_mount_hash();
1550 up_read(&namespace_sem);
1554 EXPORT_SYMBOL(may_umount);
1556 static void namespace_unlock(void)
1558 struct hlist_head head;
1559 struct hlist_node *p;
1563 hlist_move_list(&unmounted, &head);
1564 list_splice_init(&ex_mountpoints, &list);
1566 up_write(&namespace_sem);
1568 shrink_dentry_list(&list);
1570 if (likely(hlist_empty(&head)))
1573 synchronize_rcu_expedited();
1575 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1576 hlist_del(&m->mnt_umount);
1581 static inline void namespace_lock(void)
1583 down_write(&namespace_sem);
1586 enum umount_tree_flags {
1588 UMOUNT_PROPAGATE = 2,
1589 UMOUNT_CONNECTED = 4,
1592 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1594 /* Leaving mounts connected is only valid for lazy umounts */
1595 if (how & UMOUNT_SYNC)
1598 /* A mount without a parent has nothing to be connected to */
1599 if (!mnt_has_parent(mnt))
1602 /* Because the reference counting rules change when mounts are
1603 * unmounted and connected, umounted mounts may not be
1604 * connected to mounted mounts.
1606 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1609 /* Has it been requested that the mount remain connected? */
1610 if (how & UMOUNT_CONNECTED)
1613 /* Is the mount locked such that it needs to remain connected? */
1614 if (IS_MNT_LOCKED(mnt))
1617 /* By default disconnect the mount */
1622 * mount_lock must be held
1623 * namespace_sem must be held for write
1625 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1627 LIST_HEAD(tmp_list);
1630 if (how & UMOUNT_PROPAGATE)
1631 propagate_mount_unlock(mnt);
1633 /* Gather the mounts to umount */
1634 for (p = mnt; p; p = next_mnt(p, mnt)) {
1635 p->mnt.mnt_flags |= MNT_UMOUNT;
1636 if (p->mnt.mnt_flags & MNT_ONRB)
1637 move_from_ns(p, &tmp_list);
1639 list_move(&p->mnt_list, &tmp_list);
1642 /* Hide the mounts from mnt_mounts */
1643 list_for_each_entry(p, &tmp_list, mnt_list) {
1644 list_del_init(&p->mnt_child);
1647 /* Add propogated mounts to the tmp_list */
1648 if (how & UMOUNT_PROPAGATE)
1649 propagate_umount(&tmp_list);
1651 while (!list_empty(&tmp_list)) {
1652 struct mnt_namespace *ns;
1654 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1655 list_del_init(&p->mnt_expire);
1656 list_del_init(&p->mnt_list);
1660 __touch_mnt_namespace(ns);
1663 if (how & UMOUNT_SYNC)
1664 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1666 disconnect = disconnect_mount(p, how);
1667 if (mnt_has_parent(p)) {
1668 mnt_add_count(p->mnt_parent, -1);
1670 /* Don't forget about p */
1671 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1676 change_mnt_propagation(p, MS_PRIVATE);
1678 hlist_add_head(&p->mnt_umount, &unmounted);
1682 static void shrink_submounts(struct mount *mnt);
1684 static int do_umount_root(struct super_block *sb)
1688 down_write(&sb->s_umount);
1689 if (!sb_rdonly(sb)) {
1690 struct fs_context *fc;
1692 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1697 ret = parse_monolithic_mount_data(fc, NULL);
1699 ret = reconfigure_super(fc);
1703 up_write(&sb->s_umount);
1707 static int do_umount(struct mount *mnt, int flags)
1709 struct super_block *sb = mnt->mnt.mnt_sb;
1712 retval = security_sb_umount(&mnt->mnt, flags);
1717 * Allow userspace to request a mountpoint be expired rather than
1718 * unmounting unconditionally. Unmount only happens if:
1719 * (1) the mark is already set (the mark is cleared by mntput())
1720 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1722 if (flags & MNT_EXPIRE) {
1723 if (&mnt->mnt == current->fs->root.mnt ||
1724 flags & (MNT_FORCE | MNT_DETACH))
1728 * probably don't strictly need the lock here if we examined
1729 * all race cases, but it's a slowpath.
1732 if (mnt_get_count(mnt) != 2) {
1733 unlock_mount_hash();
1736 unlock_mount_hash();
1738 if (!xchg(&mnt->mnt_expiry_mark, 1))
1743 * If we may have to abort operations to get out of this
1744 * mount, and they will themselves hold resources we must
1745 * allow the fs to do things. In the Unix tradition of
1746 * 'Gee thats tricky lets do it in userspace' the umount_begin
1747 * might fail to complete on the first run through as other tasks
1748 * must return, and the like. Thats for the mount program to worry
1749 * about for the moment.
1752 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1753 sb->s_op->umount_begin(sb);
1757 * No sense to grab the lock for this test, but test itself looks
1758 * somewhat bogus. Suggestions for better replacement?
1759 * Ho-hum... In principle, we might treat that as umount + switch
1760 * to rootfs. GC would eventually take care of the old vfsmount.
1761 * Actually it makes sense, especially if rootfs would contain a
1762 * /reboot - static binary that would close all descriptors and
1763 * call reboot(9). Then init(8) could umount root and exec /reboot.
1765 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1767 * Special case for "unmounting" root ...
1768 * we just try to remount it readonly.
1770 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1772 return do_umount_root(sb);
1778 /* Recheck MNT_LOCKED with the locks held */
1780 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1784 if (flags & MNT_DETACH) {
1785 if (mnt->mnt.mnt_flags & MNT_ONRB ||
1786 !list_empty(&mnt->mnt_list))
1787 umount_tree(mnt, UMOUNT_PROPAGATE);
1790 shrink_submounts(mnt);
1792 if (!propagate_mount_busy(mnt, 2)) {
1793 if (mnt->mnt.mnt_flags & MNT_ONRB ||
1794 !list_empty(&mnt->mnt_list))
1795 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1800 unlock_mount_hash();
1806 * __detach_mounts - lazily unmount all mounts on the specified dentry
1808 * During unlink, rmdir, and d_drop it is possible to loose the path
1809 * to an existing mountpoint, and wind up leaking the mount.
1810 * detach_mounts allows lazily unmounting those mounts instead of
1813 * The caller may hold dentry->d_inode->i_mutex.
1815 void __detach_mounts(struct dentry *dentry)
1817 struct mountpoint *mp;
1822 mp = lookup_mountpoint(dentry);
1827 while (!hlist_empty(&mp->m_list)) {
1828 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1829 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1831 hlist_add_head(&mnt->mnt_umount, &unmounted);
1833 else umount_tree(mnt, UMOUNT_CONNECTED);
1837 unlock_mount_hash();
1842 * Is the caller allowed to modify his namespace?
1844 bool may_mount(void)
1846 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1850 * path_mounted - check whether path is mounted
1851 * @path: path to check
1853 * Determine whether @path refers to the root of a mount.
1855 * Return: true if @path is the root of a mount, false if not.
1857 static inline bool path_mounted(const struct path *path)
1859 return path->mnt->mnt_root == path->dentry;
1862 static void warn_mandlock(void)
1864 pr_warn_once("=======================================================\n"
1865 "WARNING: The mand mount option has been deprecated and\n"
1866 " and is ignored by this kernel. Remove the mand\n"
1867 " option from the mount to silence this warning.\n"
1868 "=======================================================\n");
1871 static int can_umount(const struct path *path, int flags)
1873 struct mount *mnt = real_mount(path->mnt);
1877 if (!path_mounted(path))
1879 if (!check_mnt(mnt))
1881 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1883 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1888 // caller is responsible for flags being sane
1889 int path_umount(struct path *path, int flags)
1891 struct mount *mnt = real_mount(path->mnt);
1894 ret = can_umount(path, flags);
1896 ret = do_umount(mnt, flags);
1898 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1900 mntput_no_expire(mnt);
1904 static int ksys_umount(char __user *name, int flags)
1906 int lookup_flags = LOOKUP_MOUNTPOINT;
1910 // basic validity checks done first
1911 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1914 if (!(flags & UMOUNT_NOFOLLOW))
1915 lookup_flags |= LOOKUP_FOLLOW;
1916 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1919 return path_umount(&path, flags);
1922 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1924 return ksys_umount(name, flags);
1927 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1930 * The 2.0 compatible umount. No flags.
1932 SYSCALL_DEFINE1(oldumount, char __user *, name)
1934 return ksys_umount(name, 0);
1939 static bool is_mnt_ns_file(struct dentry *dentry)
1941 /* Is this a proxy for a mount namespace? */
1942 return dentry->d_op == &ns_dentry_operations &&
1943 dentry->d_fsdata == &mntns_operations;
1946 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1948 return container_of(ns, struct mnt_namespace, ns);
1951 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1956 static bool mnt_ns_loop(struct dentry *dentry)
1958 /* Could bind mounting the mount namespace inode cause a
1959 * mount namespace loop?
1961 struct mnt_namespace *mnt_ns;
1962 if (!is_mnt_ns_file(dentry))
1965 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1966 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1969 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1972 struct mount *res, *p, *q, *r, *parent;
1974 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1975 return ERR_PTR(-EINVAL);
1977 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1978 return ERR_PTR(-EINVAL);
1980 res = q = clone_mnt(mnt, dentry, flag);
1984 q->mnt_mountpoint = mnt->mnt_mountpoint;
1987 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1989 if (!is_subdir(r->mnt_mountpoint, dentry))
1992 for (s = r; s; s = next_mnt(s, r)) {
1993 if (!(flag & CL_COPY_UNBINDABLE) &&
1994 IS_MNT_UNBINDABLE(s)) {
1995 if (s->mnt.mnt_flags & MNT_LOCKED) {
1996 /* Both unbindable and locked. */
1997 q = ERR_PTR(-EPERM);
2000 s = skip_mnt_tree(s);
2004 if (!(flag & CL_COPY_MNT_NS_FILE) &&
2005 is_mnt_ns_file(s->mnt.mnt_root)) {
2006 s = skip_mnt_tree(s);
2009 while (p != s->mnt_parent) {
2015 q = clone_mnt(p, p->mnt.mnt_root, flag);
2019 list_add_tail(&q->mnt_list, &res->mnt_list);
2020 attach_mnt(q, parent, p->mnt_mp, false);
2021 unlock_mount_hash();
2028 umount_tree(res, UMOUNT_SYNC);
2029 unlock_mount_hash();
2034 /* Caller should check returned pointer for errors */
2036 struct vfsmount *collect_mounts(const struct path *path)
2040 if (!check_mnt(real_mount(path->mnt)))
2041 tree = ERR_PTR(-EINVAL);
2043 tree = copy_tree(real_mount(path->mnt), path->dentry,
2044 CL_COPY_ALL | CL_PRIVATE);
2047 return ERR_CAST(tree);
2051 static void free_mnt_ns(struct mnt_namespace *);
2052 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2054 void dissolve_on_fput(struct vfsmount *mnt)
2056 struct mnt_namespace *ns;
2059 ns = real_mount(mnt)->mnt_ns;
2062 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2066 unlock_mount_hash();
2072 void drop_collected_mounts(struct vfsmount *mnt)
2076 umount_tree(real_mount(mnt), 0);
2077 unlock_mount_hash();
2081 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2083 struct mount *child;
2085 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2086 if (!is_subdir(child->mnt_mountpoint, dentry))
2089 if (child->mnt.mnt_flags & MNT_LOCKED)
2096 * clone_private_mount - create a private clone of a path
2097 * @path: path to clone
2099 * This creates a new vfsmount, which will be the clone of @path. The new mount
2100 * will not be attached anywhere in the namespace and will be private (i.e.
2101 * changes to the originating mount won't be propagated into this).
2103 * Release with mntput().
2105 struct vfsmount *clone_private_mount(const struct path *path)
2107 struct mount *old_mnt = real_mount(path->mnt);
2108 struct mount *new_mnt;
2110 down_read(&namespace_sem);
2111 if (IS_MNT_UNBINDABLE(old_mnt))
2114 if (!check_mnt(old_mnt))
2117 if (has_locked_children(old_mnt, path->dentry))
2120 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2121 up_read(&namespace_sem);
2123 if (IS_ERR(new_mnt))
2124 return ERR_CAST(new_mnt);
2126 /* Longterm mount to be removed by kern_unmount*() */
2127 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2129 return &new_mnt->mnt;
2132 up_read(&namespace_sem);
2133 return ERR_PTR(-EINVAL);
2135 EXPORT_SYMBOL_GPL(clone_private_mount);
2137 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2138 struct vfsmount *root)
2141 int res = f(root, arg);
2144 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2145 res = f(&mnt->mnt, arg);
2152 static void lock_mnt_tree(struct mount *mnt)
2156 for (p = mnt; p; p = next_mnt(p, mnt)) {
2157 int flags = p->mnt.mnt_flags;
2158 /* Don't allow unprivileged users to change mount flags */
2159 flags |= MNT_LOCK_ATIME;
2161 if (flags & MNT_READONLY)
2162 flags |= MNT_LOCK_READONLY;
2164 if (flags & MNT_NODEV)
2165 flags |= MNT_LOCK_NODEV;
2167 if (flags & MNT_NOSUID)
2168 flags |= MNT_LOCK_NOSUID;
2170 if (flags & MNT_NOEXEC)
2171 flags |= MNT_LOCK_NOEXEC;
2172 /* Don't allow unprivileged users to reveal what is under a mount */
2173 if (list_empty(&p->mnt_expire))
2174 flags |= MNT_LOCKED;
2175 p->mnt.mnt_flags = flags;
2179 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2183 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2184 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2185 mnt_release_group_id(p);
2189 static int invent_group_ids(struct mount *mnt, bool recurse)
2193 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2194 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2195 int err = mnt_alloc_group_id(p);
2197 cleanup_group_ids(mnt, p);
2206 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2208 unsigned int max = READ_ONCE(sysctl_mount_max);
2209 unsigned int mounts = 0;
2212 if (ns->nr_mounts >= max)
2214 max -= ns->nr_mounts;
2215 if (ns->pending_mounts >= max)
2217 max -= ns->pending_mounts;
2219 for (p = mnt; p; p = next_mnt(p, mnt))
2225 ns->pending_mounts += mounts;
2229 enum mnt_tree_flags_t {
2230 MNT_TREE_MOVE = BIT(0),
2231 MNT_TREE_BENEATH = BIT(1),
2235 * attach_recursive_mnt - attach a source mount tree
2236 * @source_mnt: mount tree to be attached
2237 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
2238 * @dest_mp: the mountpoint @source_mnt will be mounted at
2239 * @flags: modify how @source_mnt is supposed to be attached
2241 * NOTE: in the table below explains the semantics when a source mount
2242 * of a given type is attached to a destination mount of a given type.
2243 * ---------------------------------------------------------------------------
2244 * | BIND MOUNT OPERATION |
2245 * |**************************************************************************
2246 * | source-->| shared | private | slave | unbindable |
2250 * |**************************************************************************
2251 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2253 * |non-shared| shared (+) | private | slave (*) | invalid |
2254 * ***************************************************************************
2255 * A bind operation clones the source mount and mounts the clone on the
2256 * destination mount.
2258 * (++) the cloned mount is propagated to all the mounts in the propagation
2259 * tree of the destination mount and the cloned mount is added to
2260 * the peer group of the source mount.
2261 * (+) the cloned mount is created under the destination mount and is marked
2262 * as shared. The cloned mount is added to the peer group of the source
2264 * (+++) the mount is propagated to all the mounts in the propagation tree
2265 * of the destination mount and the cloned mount is made slave
2266 * of the same master as that of the source mount. The cloned mount
2267 * is marked as 'shared and slave'.
2268 * (*) the cloned mount is made a slave of the same master as that of the
2271 * ---------------------------------------------------------------------------
2272 * | MOVE MOUNT OPERATION |
2273 * |**************************************************************************
2274 * | source-->| shared | private | slave | unbindable |
2278 * |**************************************************************************
2279 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2281 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2282 * ***************************************************************************
2284 * (+) the mount is moved to the destination. And is then propagated to
2285 * all the mounts in the propagation tree of the destination mount.
2286 * (+*) the mount is moved to the destination.
2287 * (+++) the mount is moved to the destination and is then propagated to
2288 * all the mounts belonging to the destination mount's propagation tree.
2289 * the mount is marked as 'shared and slave'.
2290 * (*) the mount continues to be a slave at the new location.
2292 * if the source mount is a tree, the operations explained above is
2293 * applied to each mount in the tree.
2294 * Must be called without spinlocks held, since this function can sleep
2297 * Context: The function expects namespace_lock() to be held.
2298 * Return: If @source_mnt was successfully attached 0 is returned.
2299 * Otherwise a negative error code is returned.
2301 static int attach_recursive_mnt(struct mount *source_mnt,
2302 struct mount *top_mnt,
2303 struct mountpoint *dest_mp,
2304 enum mnt_tree_flags_t flags)
2306 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2307 HLIST_HEAD(tree_list);
2308 struct mnt_namespace *ns = top_mnt->mnt_ns;
2309 struct mountpoint *smp;
2310 struct mount *child, *dest_mnt, *p;
2311 struct hlist_node *n;
2313 bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH;
2316 * Preallocate a mountpoint in case the new mounts need to be
2317 * mounted beneath mounts on the same mountpoint.
2319 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2321 return PTR_ERR(smp);
2323 /* Is there space to add these mounts to the mount namespace? */
2325 err = count_mounts(ns, source_mnt);
2331 dest_mnt = top_mnt->mnt_parent;
2335 if (IS_MNT_SHARED(dest_mnt)) {
2336 err = invent_group_ids(source_mnt, true);
2339 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2343 goto out_cleanup_ids;
2345 if (IS_MNT_SHARED(dest_mnt)) {
2346 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2353 unhash_mnt(source_mnt);
2354 attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
2355 touch_mnt_namespace(source_mnt->mnt_ns);
2357 if (source_mnt->mnt_ns) {
2360 /* move from anon - the caller will destroy */
2361 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2362 move_from_ns(p, &head);
2363 list_del_init(&head);
2366 mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
2368 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2369 commit_tree(source_mnt);
2372 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2374 hlist_del_init(&child->mnt_hash);
2375 q = __lookup_mnt(&child->mnt_parent->mnt,
2376 child->mnt_mountpoint);
2378 mnt_change_mountpoint(child, smp, q);
2379 /* Notice when we are propagating across user namespaces */
2380 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2381 lock_mnt_tree(child);
2382 child->mnt.mnt_flags &= ~MNT_LOCKED;
2385 put_mountpoint(smp);
2386 unlock_mount_hash();
2391 while (!hlist_empty(&tree_list)) {
2392 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2393 child->mnt_parent->mnt_ns->pending_mounts = 0;
2394 umount_tree(child, UMOUNT_SYNC);
2396 unlock_mount_hash();
2397 cleanup_group_ids(source_mnt, NULL);
2399 ns->pending_mounts = 0;
2401 read_seqlock_excl(&mount_lock);
2402 put_mountpoint(smp);
2403 read_sequnlock_excl(&mount_lock);
2409 * do_lock_mount - lock mount and mountpoint
2410 * @path: target path
2411 * @beneath: whether the intention is to mount beneath @path
2413 * Follow the mount stack on @path until the top mount @mnt is found. If
2414 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2415 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2416 * until nothing is stacked on top of it anymore.
2418 * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2419 * against concurrent removal of the new mountpoint from another mount
2422 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2423 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2424 * concurrent unlink of @mp->mnt_dentry from another mount namespace
2425 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2426 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2427 * on top of it for @beneath.
2429 * In addition, @beneath needs to make sure that @mnt hasn't been
2430 * unmounted or moved from its current mountpoint in between dropping
2431 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2432 * being unmounted would be detected later by e.g., calling
2433 * check_mnt(mnt) in the function it's called from. For the @beneath
2434 * case however, it's useful to detect it directly in do_lock_mount().
2435 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2436 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2437 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2439 * Return: Either the target mountpoint on the top mount or the top
2440 * mount's mountpoint.
2442 static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
2444 struct vfsmount *mnt = path->mnt;
2445 struct dentry *dentry;
2446 struct mountpoint *mp = ERR_PTR(-ENOENT);
2452 m = real_mount(mnt);
2453 read_seqlock_excl(&mount_lock);
2454 dentry = dget(m->mnt_mountpoint);
2455 read_sequnlock_excl(&mount_lock);
2457 dentry = path->dentry;
2460 inode_lock(dentry->d_inode);
2461 if (unlikely(cant_mount(dentry))) {
2462 inode_unlock(dentry->d_inode);
2468 if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
2470 inode_unlock(dentry->d_inode);
2474 mnt = lookup_mnt(path);
2479 inode_unlock(dentry->d_inode);
2484 path->dentry = dget(mnt->mnt_root);
2487 mp = get_mountpoint(dentry);
2490 inode_unlock(dentry->d_inode);
2500 static inline struct mountpoint *lock_mount(struct path *path)
2502 return do_lock_mount(path, false);
2505 static void unlock_mount(struct mountpoint *where)
2507 struct dentry *dentry = where->m_dentry;
2509 read_seqlock_excl(&mount_lock);
2510 put_mountpoint(where);
2511 read_sequnlock_excl(&mount_lock);
2514 inode_unlock(dentry->d_inode);
2517 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2519 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2522 if (d_is_dir(mp->m_dentry) !=
2523 d_is_dir(mnt->mnt.mnt_root))
2526 return attach_recursive_mnt(mnt, p, mp, 0);
2530 * Sanity check the flags to change_mnt_propagation.
2533 static int flags_to_propagation_type(int ms_flags)
2535 int type = ms_flags & ~(MS_REC | MS_SILENT);
2537 /* Fail if any non-propagation flags are set */
2538 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2540 /* Only one propagation flag should be set */
2541 if (!is_power_of_2(type))
2547 * recursively change the type of the mountpoint.
2549 static int do_change_type(struct path *path, int ms_flags)
2552 struct mount *mnt = real_mount(path->mnt);
2553 int recurse = ms_flags & MS_REC;
2557 if (!path_mounted(path))
2560 type = flags_to_propagation_type(ms_flags);
2565 if (type == MS_SHARED) {
2566 err = invent_group_ids(mnt, recurse);
2572 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2573 change_mnt_propagation(m, type);
2574 unlock_mount_hash();
2581 static struct mount *__do_loopback(struct path *old_path, int recurse)
2583 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2585 if (IS_MNT_UNBINDABLE(old))
2588 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2591 if (!recurse && has_locked_children(old, old_path->dentry))
2595 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2597 mnt = clone_mnt(old, old_path->dentry, 0);
2600 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2606 * do loopback mount.
2608 static int do_loopback(struct path *path, const char *old_name,
2611 struct path old_path;
2612 struct mount *mnt = NULL, *parent;
2613 struct mountpoint *mp;
2615 if (!old_name || !*old_name)
2617 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2622 if (mnt_ns_loop(old_path.dentry))
2625 mp = lock_mount(path);
2631 parent = real_mount(path->mnt);
2632 if (!check_mnt(parent))
2635 mnt = __do_loopback(&old_path, recurse);
2641 err = graft_tree(mnt, parent, mp);
2644 umount_tree(mnt, UMOUNT_SYNC);
2645 unlock_mount_hash();
2650 path_put(&old_path);
2654 static struct file *open_detached_copy(struct path *path, bool recursive)
2656 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2657 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2658 struct mount *mnt, *p;
2662 return ERR_CAST(ns);
2665 mnt = __do_loopback(path, recursive);
2669 return ERR_CAST(mnt);
2673 for (p = mnt; p; p = next_mnt(p, mnt)) {
2674 mnt_add_to_ns(ns, p);
2679 unlock_mount_hash();
2683 path->mnt = &mnt->mnt;
2684 file = dentry_open(path, O_PATH, current_cred());
2686 dissolve_on_fput(path->mnt);
2688 file->f_mode |= FMODE_NEED_UNMOUNT;
2692 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2696 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2697 bool detached = flags & OPEN_TREE_CLONE;
2701 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2703 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2704 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2708 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2711 if (flags & AT_NO_AUTOMOUNT)
2712 lookup_flags &= ~LOOKUP_AUTOMOUNT;
2713 if (flags & AT_SYMLINK_NOFOLLOW)
2714 lookup_flags &= ~LOOKUP_FOLLOW;
2715 if (flags & AT_EMPTY_PATH)
2716 lookup_flags |= LOOKUP_EMPTY;
2718 if (detached && !may_mount())
2721 fd = get_unused_fd_flags(flags & O_CLOEXEC);
2725 error = user_path_at(dfd, filename, lookup_flags, &path);
2726 if (unlikely(error)) {
2727 file = ERR_PTR(error);
2730 file = open_detached_copy(&path, flags & AT_RECURSIVE);
2732 file = dentry_open(&path, O_PATH, current_cred());
2737 return PTR_ERR(file);
2739 fd_install(fd, file);
2744 * Don't allow locked mount flags to be cleared.
2746 * No locks need to be held here while testing the various MNT_LOCK
2747 * flags because those flags can never be cleared once they are set.
2749 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2751 unsigned int fl = mnt->mnt.mnt_flags;
2753 if ((fl & MNT_LOCK_READONLY) &&
2754 !(mnt_flags & MNT_READONLY))
2757 if ((fl & MNT_LOCK_NODEV) &&
2758 !(mnt_flags & MNT_NODEV))
2761 if ((fl & MNT_LOCK_NOSUID) &&
2762 !(mnt_flags & MNT_NOSUID))
2765 if ((fl & MNT_LOCK_NOEXEC) &&
2766 !(mnt_flags & MNT_NOEXEC))
2769 if ((fl & MNT_LOCK_ATIME) &&
2770 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2776 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2778 bool readonly_request = (mnt_flags & MNT_READONLY);
2780 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2783 if (readonly_request)
2784 return mnt_make_readonly(mnt);
2786 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2790 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2792 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2793 mnt->mnt.mnt_flags = mnt_flags;
2794 touch_mnt_namespace(mnt->mnt_ns);
2797 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2799 struct super_block *sb = mnt->mnt_sb;
2801 if (!__mnt_is_readonly(mnt) &&
2802 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
2803 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2804 char *buf = (char *)__get_free_page(GFP_KERNEL);
2805 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2807 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
2809 is_mounted(mnt) ? "remounted" : "mounted",
2810 mntpath, &sb->s_time_max,
2811 (unsigned long long)sb->s_time_max);
2813 free_page((unsigned long)buf);
2814 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
2819 * Handle reconfiguration of the mountpoint only without alteration of the
2820 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2823 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2825 struct super_block *sb = path->mnt->mnt_sb;
2826 struct mount *mnt = real_mount(path->mnt);
2829 if (!check_mnt(mnt))
2832 if (!path_mounted(path))
2835 if (!can_change_locked_flags(mnt, mnt_flags))
2839 * We're only checking whether the superblock is read-only not
2840 * changing it, so only take down_read(&sb->s_umount).
2842 down_read(&sb->s_umount);
2844 ret = change_mount_ro_state(mnt, mnt_flags);
2846 set_mount_attributes(mnt, mnt_flags);
2847 unlock_mount_hash();
2848 up_read(&sb->s_umount);
2850 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2856 * change filesystem flags. dir should be a physical root of filesystem.
2857 * If you've mounted a non-root directory somewhere and want to do remount
2858 * on it - tough luck.
2860 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2861 int mnt_flags, void *data)
2864 struct super_block *sb = path->mnt->mnt_sb;
2865 struct mount *mnt = real_mount(path->mnt);
2866 struct fs_context *fc;
2868 if (!check_mnt(mnt))
2871 if (!path_mounted(path))
2874 if (!can_change_locked_flags(mnt, mnt_flags))
2877 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2882 * Indicate to the filesystem that the remount request is coming
2883 * from the legacy mount system call.
2887 err = parse_monolithic_mount_data(fc, data);
2889 down_write(&sb->s_umount);
2891 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2892 err = reconfigure_super(fc);
2895 set_mount_attributes(mnt, mnt_flags);
2896 unlock_mount_hash();
2899 up_write(&sb->s_umount);
2902 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2908 static inline int tree_contains_unbindable(struct mount *mnt)
2911 for (p = mnt; p; p = next_mnt(p, mnt)) {
2912 if (IS_MNT_UNBINDABLE(p))
2919 * Check that there aren't references to earlier/same mount namespaces in the
2920 * specified subtree. Such references can act as pins for mount namespaces
2921 * that aren't checked by the mount-cycle checking code, thereby allowing
2922 * cycles to be made.
2924 static bool check_for_nsfs_mounts(struct mount *subtree)
2930 for (p = subtree; p; p = next_mnt(p, subtree))
2931 if (mnt_ns_loop(p->mnt.mnt_root))
2936 unlock_mount_hash();
2940 static int do_set_group(struct path *from_path, struct path *to_path)
2942 struct mount *from, *to;
2945 from = real_mount(from_path->mnt);
2946 to = real_mount(to_path->mnt);
2951 /* To and From must be mounted */
2952 if (!is_mounted(&from->mnt))
2954 if (!is_mounted(&to->mnt))
2958 /* We should be allowed to modify mount namespaces of both mounts */
2959 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
2961 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
2965 /* To and From paths should be mount roots */
2966 if (!path_mounted(from_path))
2968 if (!path_mounted(to_path))
2971 /* Setting sharing groups is only allowed across same superblock */
2972 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2975 /* From mount root should be wider than To mount root */
2976 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2979 /* From mount should not have locked children in place of To's root */
2980 if (has_locked_children(from, to->mnt.mnt_root))
2983 /* Setting sharing groups is only allowed on private mounts */
2984 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
2987 /* From should not be private */
2988 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
2991 if (IS_MNT_SLAVE(from)) {
2992 struct mount *m = from->mnt_master;
2994 list_add(&to->mnt_slave, &m->mnt_slave_list);
2998 if (IS_MNT_SHARED(from)) {
2999 to->mnt_group_id = from->mnt_group_id;
3000 list_add(&to->mnt_share, &from->mnt_share);
3003 unlock_mount_hash();
3013 * path_overmounted - check if path is overmounted
3014 * @path: path to check
3016 * Check if path is overmounted, i.e., if there's a mount on top of
3017 * @path->mnt with @path->dentry as mountpoint.
3019 * Context: This function expects namespace_lock() to be held.
3020 * Return: If path is overmounted true is returned, false if not.
3022 static inline bool path_overmounted(const struct path *path)
3025 if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
3034 * can_move_mount_beneath - check that we can mount beneath the top mount
3035 * @from: mount to mount beneath
3036 * @to: mount under which to mount
3037 * @mp: mountpoint of @to
3039 * - Make sure that @to->dentry is actually the root of a mount under
3040 * which we can mount another mount.
3041 * - Make sure that nothing can be mounted beneath the caller's current
3042 * root or the rootfs of the namespace.
3043 * - Make sure that the caller can unmount the topmost mount ensuring
3044 * that the caller could reveal the underlying mountpoint.
3045 * - Ensure that nothing has been mounted on top of @from before we
3046 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3047 * - Prevent mounting beneath a mount if the propagation relationship
3048 * between the source mount, parent mount, and top mount would lead to
3049 * nonsensical mount trees.
3051 * Context: This function expects namespace_lock() to be held.
3052 * Return: On success 0, and on error a negative error code is returned.
3054 static int can_move_mount_beneath(const struct path *from,
3055 const struct path *to,
3056 const struct mountpoint *mp)
3058 struct mount *mnt_from = real_mount(from->mnt),
3059 *mnt_to = real_mount(to->mnt),
3060 *parent_mnt_to = mnt_to->mnt_parent;
3062 if (!mnt_has_parent(mnt_to))
3065 if (!path_mounted(to))
3068 if (IS_MNT_LOCKED(mnt_to))
3071 /* Avoid creating shadow mounts during mount propagation. */
3072 if (path_overmounted(from))
3076 * Mounting beneath the rootfs only makes sense when the
3077 * semantics of pivot_root(".", ".") are used.
3079 if (&mnt_to->mnt == current->fs->root.mnt)
3081 if (parent_mnt_to == current->nsproxy->mnt_ns->root)
3084 for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent)
3089 * If the parent mount propagates to the child mount this would
3090 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3091 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3092 * defeats the whole purpose of mounting beneath another mount.
3094 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
3098 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3099 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3100 * Afterwards @mnt_from would be mounted on top of
3101 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3102 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3103 * already mounted on @mnt_from, @mnt_to would ultimately be
3104 * remounted on top of @c. Afterwards, @mnt_from would be
3105 * covered by a copy @c of @mnt_from and @c would be covered by
3106 * @mnt_from itself. This defeats the whole purpose of mounting
3107 * @mnt_from beneath @mnt_to.
3109 if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
3115 static int do_move_mount(struct path *old_path, struct path *new_path,
3118 struct mnt_namespace *ns;
3121 struct mount *parent;
3122 struct mountpoint *mp, *old_mp;
3125 enum mnt_tree_flags_t flags = 0;
3127 mp = do_lock_mount(new_path, beneath);
3131 old = real_mount(old_path->mnt);
3132 p = real_mount(new_path->mnt);
3133 parent = old->mnt_parent;
3134 attached = mnt_has_parent(old);
3136 flags |= MNT_TREE_MOVE;
3137 old_mp = old->mnt_mp;
3141 /* The mountpoint must be in our namespace. */
3145 /* The thing moved must be mounted... */
3146 if (!is_mounted(&old->mnt))
3149 /* ... and either ours or the root of anon namespace */
3150 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
3153 if (old->mnt.mnt_flags & MNT_LOCKED)
3156 if (!path_mounted(old_path))
3159 if (d_is_dir(new_path->dentry) !=
3160 d_is_dir(old_path->dentry))
3163 * Don't move a mount residing in a shared parent.
3165 if (attached && IS_MNT_SHARED(parent))
3169 err = can_move_mount_beneath(old_path, new_path, mp);
3175 flags |= MNT_TREE_BENEATH;
3179 * Don't move a mount tree containing unbindable mounts to a destination
3180 * mount which is shared.
3182 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
3185 if (!check_for_nsfs_mounts(old))
3187 for (; mnt_has_parent(p); p = p->mnt_parent)
3191 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
3195 /* if the mount is moved, it should no longer be expire
3197 list_del_init(&old->mnt_expire);
3199 put_mountpoint(old_mp);
3204 mntput_no_expire(parent);
3211 static int do_move_mount_old(struct path *path, const char *old_name)
3213 struct path old_path;
3216 if (!old_name || !*old_name)
3219 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3223 err = do_move_mount(&old_path, path, false);
3224 path_put(&old_path);
3229 * add a mount into a namespace's mount tree
3231 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
3232 const struct path *path, int mnt_flags)
3234 struct mount *parent = real_mount(path->mnt);
3236 mnt_flags &= ~MNT_INTERNAL_FLAGS;
3238 if (unlikely(!check_mnt(parent))) {
3239 /* that's acceptable only for automounts done in private ns */
3240 if (!(mnt_flags & MNT_SHRINKABLE))
3242 /* ... and for those we'd better have mountpoint still alive */
3243 if (!parent->mnt_ns)
3247 /* Refuse the same filesystem on the same mount point */
3248 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
3251 if (d_is_symlink(newmnt->mnt.mnt_root))
3254 newmnt->mnt.mnt_flags = mnt_flags;
3255 return graft_tree(newmnt, parent, mp);
3258 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3261 * Create a new mount using a superblock configuration and request it
3262 * be added to the namespace tree.
3264 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
3265 unsigned int mnt_flags)
3267 struct vfsmount *mnt;
3268 struct mountpoint *mp;
3269 struct super_block *sb = fc->root->d_sb;
3272 error = security_sb_kern_mount(sb);
3273 if (!error && mount_too_revealing(sb, &mnt_flags))
3276 if (unlikely(error)) {
3281 up_write(&sb->s_umount);
3283 mnt = vfs_create_mount(fc);
3285 return PTR_ERR(mnt);
3287 mnt_warn_timestamp_expiry(mountpoint, mnt);
3289 mp = lock_mount(mountpoint);
3294 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3302 * create a new mount for userspace and request it to be added into the
3305 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
3306 int mnt_flags, const char *name, void *data)
3308 struct file_system_type *type;
3309 struct fs_context *fc;
3310 const char *subtype = NULL;
3316 type = get_fs_type(fstype);
3320 if (type->fs_flags & FS_HAS_SUBTYPE) {
3321 subtype = strchr(fstype, '.');
3325 put_filesystem(type);
3331 fc = fs_context_for_mount(type, sb_flags);
3332 put_filesystem(type);
3337 * Indicate to the filesystem that the mount request is coming
3338 * from the legacy mount system call.
3343 err = vfs_parse_fs_string(fc, "subtype",
3344 subtype, strlen(subtype));
3346 err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3348 err = parse_monolithic_mount_data(fc, data);
3349 if (!err && !mount_capable(fc))
3352 err = vfs_get_tree(fc);
3354 err = do_new_mount_fc(fc, path, mnt_flags);
3360 int finish_automount(struct vfsmount *m, const struct path *path)
3362 struct dentry *dentry = path->dentry;
3363 struct mountpoint *mp;
3372 mnt = real_mount(m);
3373 /* The new mount record should have at least 2 refs to prevent it being
3374 * expired before we get a chance to add it
3376 BUG_ON(mnt_get_count(mnt) < 2);
3378 if (m->mnt_sb == path->mnt->mnt_sb &&
3379 m->mnt_root == dentry) {
3385 * we don't want to use lock_mount() - in this case finding something
3386 * that overmounts our mountpoint to be means "quitely drop what we've
3387 * got", not "try to mount it on top".
3389 inode_lock(dentry->d_inode);
3391 if (unlikely(cant_mount(dentry))) {
3393 goto discard_locked;
3395 if (path_overmounted(path)) {
3397 goto discard_locked;
3399 mp = get_mountpoint(dentry);
3402 goto discard_locked;
3405 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3414 inode_unlock(dentry->d_inode);
3416 /* remove m from any expiration list it may be on */
3417 if (!list_empty(&mnt->mnt_expire)) {
3419 list_del_init(&mnt->mnt_expire);
3428 * mnt_set_expiry - Put a mount on an expiration list
3429 * @mnt: The mount to list.
3430 * @expiry_list: The list to add the mount to.
3432 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3436 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3440 EXPORT_SYMBOL(mnt_set_expiry);
3443 * process a list of expirable mountpoints with the intent of discarding any
3444 * mountpoints that aren't in use and haven't been touched since last we came
3447 void mark_mounts_for_expiry(struct list_head *mounts)
3449 struct mount *mnt, *next;
3450 LIST_HEAD(graveyard);
3452 if (list_empty(mounts))
3458 /* extract from the expiration list every vfsmount that matches the
3459 * following criteria:
3460 * - only referenced by its parent vfsmount
3461 * - still marked for expiry (marked on the last call here; marks are
3462 * cleared by mntput())
3464 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3465 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3466 propagate_mount_busy(mnt, 1))
3468 list_move(&mnt->mnt_expire, &graveyard);
3470 while (!list_empty(&graveyard)) {
3471 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3472 touch_mnt_namespace(mnt->mnt_ns);
3473 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3475 unlock_mount_hash();
3479 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3482 * Ripoff of 'select_parent()'
3484 * search the list of submounts for a given mountpoint, and move any
3485 * shrinkable submounts to the 'graveyard' list.
3487 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3489 struct mount *this_parent = parent;
3490 struct list_head *next;
3494 next = this_parent->mnt_mounts.next;
3496 while (next != &this_parent->mnt_mounts) {
3497 struct list_head *tmp = next;
3498 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3501 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3504 * Descend a level if the d_mounts list is non-empty.
3506 if (!list_empty(&mnt->mnt_mounts)) {
3511 if (!propagate_mount_busy(mnt, 1)) {
3512 list_move_tail(&mnt->mnt_expire, graveyard);
3517 * All done at this level ... ascend and resume the search
3519 if (this_parent != parent) {
3520 next = this_parent->mnt_child.next;
3521 this_parent = this_parent->mnt_parent;
3528 * process a list of expirable mountpoints with the intent of discarding any
3529 * submounts of a specific parent mountpoint
3531 * mount_lock must be held for write
3533 static void shrink_submounts(struct mount *mnt)
3535 LIST_HEAD(graveyard);
3538 /* extract submounts of 'mountpoint' from the expiration list */
3539 while (select_submounts(mnt, &graveyard)) {
3540 while (!list_empty(&graveyard)) {
3541 m = list_first_entry(&graveyard, struct mount,
3543 touch_mnt_namespace(m->mnt_ns);
3544 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3549 static void *copy_mount_options(const void __user * data)
3552 unsigned left, offset;
3557 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3559 return ERR_PTR(-ENOMEM);
3561 left = copy_from_user(copy, data, PAGE_SIZE);
3564 * Not all architectures have an exact copy_from_user(). Resort to
3567 offset = PAGE_SIZE - left;
3570 if (get_user(c, (const char __user *)data + offset))
3577 if (left == PAGE_SIZE) {
3579 return ERR_PTR(-EFAULT);
3585 static char *copy_mount_string(const void __user *data)
3587 return data ? strndup_user(data, PATH_MAX) : NULL;
3591 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3592 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3594 * data is a (void *) that can point to any structure up to
3595 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3596 * information (or be NULL).
3598 * Pre-0.97 versions of mount() didn't have a flags word.
3599 * When the flags word was introduced its top half was required
3600 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3601 * Therefore, if this magic number is present, it carries no information
3602 * and must be discarded.
3604 int path_mount(const char *dev_name, struct path *path,
3605 const char *type_page, unsigned long flags, void *data_page)
3607 unsigned int mnt_flags = 0, sb_flags;
3611 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3612 flags &= ~MS_MGC_MSK;
3614 /* Basic sanity checks */
3616 ((char *)data_page)[PAGE_SIZE - 1] = 0;
3618 if (flags & MS_NOUSER)
3621 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3626 if (flags & SB_MANDLOCK)
3629 /* Default to relatime unless overriden */
3630 if (!(flags & MS_NOATIME))
3631 mnt_flags |= MNT_RELATIME;
3633 /* Separate the per-mountpoint flags */
3634 if (flags & MS_NOSUID)
3635 mnt_flags |= MNT_NOSUID;
3636 if (flags & MS_NODEV)
3637 mnt_flags |= MNT_NODEV;
3638 if (flags & MS_NOEXEC)
3639 mnt_flags |= MNT_NOEXEC;
3640 if (flags & MS_NOATIME)
3641 mnt_flags |= MNT_NOATIME;
3642 if (flags & MS_NODIRATIME)
3643 mnt_flags |= MNT_NODIRATIME;
3644 if (flags & MS_STRICTATIME)
3645 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3646 if (flags & MS_RDONLY)
3647 mnt_flags |= MNT_READONLY;
3648 if (flags & MS_NOSYMFOLLOW)
3649 mnt_flags |= MNT_NOSYMFOLLOW;
3651 /* The default atime for remount is preservation */
3652 if ((flags & MS_REMOUNT) &&
3653 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3654 MS_STRICTATIME)) == 0)) {
3655 mnt_flags &= ~MNT_ATIME_MASK;
3656 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3659 sb_flags = flags & (SB_RDONLY |
3668 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3669 return do_reconfigure_mnt(path, mnt_flags);
3670 if (flags & MS_REMOUNT)
3671 return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3672 if (flags & MS_BIND)
3673 return do_loopback(path, dev_name, flags & MS_REC);
3674 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3675 return do_change_type(path, flags);
3676 if (flags & MS_MOVE)
3677 return do_move_mount_old(path, dev_name);
3679 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3683 long do_mount(const char *dev_name, const char __user *dir_name,
3684 const char *type_page, unsigned long flags, void *data_page)
3689 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3692 ret = path_mount(dev_name, &path, type_page, flags, data_page);
3697 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3699 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3702 static void dec_mnt_namespaces(struct ucounts *ucounts)
3704 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3707 static void free_mnt_ns(struct mnt_namespace *ns)
3709 if (!is_anon_ns(ns))
3710 ns_free_inum(&ns->ns);
3711 dec_mnt_namespaces(ns->ucounts);
3712 put_user_ns(ns->user_ns);
3717 * Assign a sequence number so we can detect when we attempt to bind
3718 * mount a reference to an older mount namespace into the current
3719 * mount namespace, preventing reference counting loops. A 64bit
3720 * number incrementing at 10Ghz will take 12,427 years to wrap which
3721 * is effectively never, so we can ignore the possibility.
3723 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3725 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3727 struct mnt_namespace *new_ns;
3728 struct ucounts *ucounts;
3731 ucounts = inc_mnt_namespaces(user_ns);
3733 return ERR_PTR(-ENOSPC);
3735 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3737 dec_mnt_namespaces(ucounts);
3738 return ERR_PTR(-ENOMEM);
3741 ret = ns_alloc_inum(&new_ns->ns);
3744 dec_mnt_namespaces(ucounts);
3745 return ERR_PTR(ret);
3748 new_ns->ns.ops = &mntns_operations;
3750 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3751 refcount_set(&new_ns->ns.count, 1);
3752 new_ns->mounts = RB_ROOT;
3753 init_waitqueue_head(&new_ns->poll);
3754 new_ns->user_ns = get_user_ns(user_ns);
3755 new_ns->ucounts = ucounts;
3760 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3761 struct user_namespace *user_ns, struct fs_struct *new_fs)
3763 struct mnt_namespace *new_ns;
3764 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3765 struct mount *p, *q;
3772 if (likely(!(flags & CLONE_NEWNS))) {
3779 new_ns = alloc_mnt_ns(user_ns, false);
3784 /* First pass: copy the tree topology */
3785 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3786 if (user_ns != ns->user_ns)
3787 copy_flags |= CL_SHARED_TO_SLAVE;
3788 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3791 free_mnt_ns(new_ns);
3792 return ERR_CAST(new);
3794 if (user_ns != ns->user_ns) {
3797 unlock_mount_hash();
3802 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3803 * as belonging to new namespace. We have already acquired a private
3804 * fs_struct, so tsk->fs->lock is not needed.
3809 mnt_add_to_ns(new_ns, q);
3810 new_ns->nr_mounts++;
3812 if (&p->mnt == new_fs->root.mnt) {
3813 new_fs->root.mnt = mntget(&q->mnt);
3816 if (&p->mnt == new_fs->pwd.mnt) {
3817 new_fs->pwd.mnt = mntget(&q->mnt);
3821 p = next_mnt(p, old);
3822 q = next_mnt(q, new);
3825 // an mntns binding we'd skipped?
3826 while (p->mnt.mnt_root != q->mnt.mnt_root)
3827 p = next_mnt(skip_mnt_tree(p), old);
3839 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3841 struct mount *mnt = real_mount(m);
3842 struct mnt_namespace *ns;
3843 struct super_block *s;
3847 ns = alloc_mnt_ns(&init_user_ns, true);
3850 return ERR_CAST(ns);
3854 mnt_add_to_ns(ns, mnt);
3856 err = vfs_path_lookup(m->mnt_root, m,
3857 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3862 return ERR_PTR(err);
3864 /* trade a vfsmount reference for active sb one */
3865 s = path.mnt->mnt_sb;
3866 atomic_inc(&s->s_active);
3868 /* lock the sucker */
3869 down_write(&s->s_umount);
3870 /* ... and return the root of (sub)tree on it */
3873 EXPORT_SYMBOL(mount_subtree);
3875 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3876 char __user *, type, unsigned long, flags, void __user *, data)
3883 kernel_type = copy_mount_string(type);
3884 ret = PTR_ERR(kernel_type);
3885 if (IS_ERR(kernel_type))
3888 kernel_dev = copy_mount_string(dev_name);
3889 ret = PTR_ERR(kernel_dev);
3890 if (IS_ERR(kernel_dev))
3893 options = copy_mount_options(data);
3894 ret = PTR_ERR(options);
3895 if (IS_ERR(options))
3898 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3909 #define FSMOUNT_VALID_FLAGS \
3910 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
3911 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
3912 MOUNT_ATTR_NOSYMFOLLOW)
3914 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3916 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3917 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3919 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
3921 unsigned int mnt_flags = 0;
3923 if (attr_flags & MOUNT_ATTR_RDONLY)
3924 mnt_flags |= MNT_READONLY;
3925 if (attr_flags & MOUNT_ATTR_NOSUID)
3926 mnt_flags |= MNT_NOSUID;
3927 if (attr_flags & MOUNT_ATTR_NODEV)
3928 mnt_flags |= MNT_NODEV;
3929 if (attr_flags & MOUNT_ATTR_NOEXEC)
3930 mnt_flags |= MNT_NOEXEC;
3931 if (attr_flags & MOUNT_ATTR_NODIRATIME)
3932 mnt_flags |= MNT_NODIRATIME;
3933 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
3934 mnt_flags |= MNT_NOSYMFOLLOW;
3940 * Create a kernel mount representation for a new, prepared superblock
3941 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3943 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3944 unsigned int, attr_flags)
3946 struct mnt_namespace *ns;
3947 struct fs_context *fc;
3949 struct path newmount;
3952 unsigned int mnt_flags = 0;
3958 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3961 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
3964 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
3966 switch (attr_flags & MOUNT_ATTR__ATIME) {
3967 case MOUNT_ATTR_STRICTATIME:
3969 case MOUNT_ATTR_NOATIME:
3970 mnt_flags |= MNT_NOATIME;
3972 case MOUNT_ATTR_RELATIME:
3973 mnt_flags |= MNT_RELATIME;
3984 if (f.file->f_op != &fscontext_fops)
3987 fc = f.file->private_data;
3989 ret = mutex_lock_interruptible(&fc->uapi_mutex);
3993 /* There must be a valid superblock or we can't mount it */
3999 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
4000 pr_warn("VFS: Mount too revealing\n");
4005 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
4008 if (fc->sb_flags & SB_MANDLOCK)
4011 newmount.mnt = vfs_create_mount(fc);
4012 if (IS_ERR(newmount.mnt)) {
4013 ret = PTR_ERR(newmount.mnt);
4016 newmount.dentry = dget(fc->root);
4017 newmount.mnt->mnt_flags = mnt_flags;
4019 /* We've done the mount bit - now move the file context into more or
4020 * less the same state as if we'd done an fspick(). We don't want to
4021 * do any memory allocation or anything like that at this point as we
4022 * don't want to have to handle any errors incurred.
4024 vfs_clean_context(fc);
4026 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
4031 mnt = real_mount(newmount.mnt);
4034 mnt_add_to_ns(ns, mnt);
4035 mntget(newmount.mnt);
4037 /* Attach to an apparent O_PATH fd with a note that we need to unmount
4038 * it, not just simply put it.
4040 file = dentry_open(&newmount, O_PATH, fc->cred);
4042 dissolve_on_fput(newmount.mnt);
4043 ret = PTR_ERR(file);
4046 file->f_mode |= FMODE_NEED_UNMOUNT;
4048 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
4050 fd_install(ret, file);
4055 path_put(&newmount);
4057 mutex_unlock(&fc->uapi_mutex);
4064 * Move a mount from one place to another. In combination with
4065 * fsopen()/fsmount() this is used to install a new mount and in combination
4066 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4069 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4071 SYSCALL_DEFINE5(move_mount,
4072 int, from_dfd, const char __user *, from_pathname,
4073 int, to_dfd, const char __user *, to_pathname,
4074 unsigned int, flags)
4076 struct path from_path, to_path;
4077 unsigned int lflags;
4083 if (flags & ~MOVE_MOUNT__MASK)
4086 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
4087 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
4090 /* If someone gives a pathname, they aren't permitted to move
4091 * from an fd that requires unmount as we can't get at the flag
4092 * to clear it afterwards.
4095 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4096 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4097 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4099 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
4104 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4105 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4106 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4108 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
4112 ret = security_move_mount(&from_path, &to_path);
4116 if (flags & MOVE_MOUNT_SET_GROUP)
4117 ret = do_set_group(&from_path, &to_path);
4119 ret = do_move_mount(&from_path, &to_path,
4120 (flags & MOVE_MOUNT_BENEATH));
4125 path_put(&from_path);
4130 * Return true if path is reachable from root
4132 * namespace_sem or mount_lock is held
4134 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4135 const struct path *root)
4137 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4138 dentry = mnt->mnt_mountpoint;
4139 mnt = mnt->mnt_parent;
4141 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4144 bool path_is_under(const struct path *path1, const struct path *path2)
4147 read_seqlock_excl(&mount_lock);
4148 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4149 read_sequnlock_excl(&mount_lock);
4152 EXPORT_SYMBOL(path_is_under);
4155 * pivot_root Semantics:
4156 * Moves the root file system of the current process to the directory put_old,
4157 * makes new_root as the new root file system of the current process, and sets
4158 * root/cwd of all processes which had them on the current root to new_root.
4161 * The new_root and put_old must be directories, and must not be on the
4162 * same file system as the current process root. The put_old must be
4163 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4164 * pointed to by put_old must yield the same directory as new_root. No other
4165 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4167 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4168 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4169 * in this situation.
4172 * - we don't move root/cwd if they are not at the root (reason: if something
4173 * cared enough to change them, it's probably wrong to force them elsewhere)
4174 * - it's okay to pick a root that isn't the root of a file system, e.g.
4175 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4176 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4179 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4180 const char __user *, put_old)
4182 struct path new, old, root;
4183 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
4184 struct mountpoint *old_mp, *root_mp;
4190 error = user_path_at(AT_FDCWD, new_root,
4191 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
4195 error = user_path_at(AT_FDCWD, put_old,
4196 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
4200 error = security_sb_pivotroot(&old, &new);
4204 get_fs_root(current->fs, &root);
4205 old_mp = lock_mount(&old);
4206 error = PTR_ERR(old_mp);
4211 new_mnt = real_mount(new.mnt);
4212 root_mnt = real_mount(root.mnt);
4213 old_mnt = real_mount(old.mnt);
4214 ex_parent = new_mnt->mnt_parent;
4215 root_parent = root_mnt->mnt_parent;
4216 if (IS_MNT_SHARED(old_mnt) ||
4217 IS_MNT_SHARED(ex_parent) ||
4218 IS_MNT_SHARED(root_parent))
4220 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4222 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4225 if (d_unlinked(new.dentry))
4228 if (new_mnt == root_mnt || old_mnt == root_mnt)
4229 goto out4; /* loop, on the same file system */
4231 if (!path_mounted(&root))
4232 goto out4; /* not a mountpoint */
4233 if (!mnt_has_parent(root_mnt))
4234 goto out4; /* not attached */
4235 if (!path_mounted(&new))
4236 goto out4; /* not a mountpoint */
4237 if (!mnt_has_parent(new_mnt))
4238 goto out4; /* not attached */
4239 /* make sure we can reach put_old from new_root */
4240 if (!is_path_reachable(old_mnt, old.dentry, &new))
4242 /* make certain new is below the root */
4243 if (!is_path_reachable(new_mnt, new.dentry, &root))
4246 umount_mnt(new_mnt);
4247 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
4248 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4249 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4250 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4252 /* mount old root on put_old */
4253 attach_mnt(root_mnt, old_mnt, old_mp, false);
4254 /* mount new_root on / */
4255 attach_mnt(new_mnt, root_parent, root_mp, false);
4256 mnt_add_count(root_parent, -1);
4257 touch_mnt_namespace(current->nsproxy->mnt_ns);
4258 /* A moved mount should not expire automatically */
4259 list_del_init(&new_mnt->mnt_expire);
4260 put_mountpoint(root_mp);
4261 unlock_mount_hash();
4262 chroot_fs_refs(&root, &new);
4265 unlock_mount(old_mp);
4267 mntput_no_expire(ex_parent);
4278 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4280 unsigned int flags = mnt->mnt.mnt_flags;
4282 /* flags to clear */
4283 flags &= ~kattr->attr_clr;
4284 /* flags to raise */
4285 flags |= kattr->attr_set;
4290 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4292 struct vfsmount *m = &mnt->mnt;
4293 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4295 if (!kattr->mnt_idmap)
4299 * Creating an idmapped mount with the filesystem wide idmapping
4300 * doesn't make sense so block that. We don't allow mushy semantics.
4302 if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
4306 * Once a mount has been idmapped we don't allow it to change its
4307 * mapping. It makes things simpler and callers can just create
4308 * another bind-mount they can idmap if they want to.
4310 if (is_idmapped_mnt(m))
4313 /* The underlying filesystem doesn't support idmapped mounts yet. */
4314 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4317 /* We're not controlling the superblock. */
4318 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4321 /* Mount has already been visible in the filesystem hierarchy. */
4322 if (!is_anon_ns(mnt->mnt_ns))
4329 * mnt_allow_writers() - check whether the attribute change allows writers
4330 * @kattr: the new mount attributes
4331 * @mnt: the mount to which @kattr will be applied
4333 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4335 * Return: true if writers need to be held, false if not
4337 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4338 const struct mount *mnt)
4340 return (!(kattr->attr_set & MNT_READONLY) ||
4341 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4345 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4350 for (m = mnt; m; m = next_mnt(m, mnt)) {
4351 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4356 err = can_idmap_mount(kattr, m);
4360 if (!mnt_allow_writers(kattr, m)) {
4361 err = mnt_hold_writers(m);
4366 if (!kattr->recurse)
4374 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4375 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4376 * mounts and needs to take care to include the first mount.
4378 for (p = mnt; p; p = next_mnt(p, mnt)) {
4379 /* If we had to hold writers unblock them. */
4380 if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4381 mnt_unhold_writers(p);
4384 * We're done once the first mount we changed got
4385 * MNT_WRITE_HOLD unset.
4394 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4396 if (!kattr->mnt_idmap)
4400 * Pairs with smp_load_acquire() in mnt_idmap().
4402 * Since we only allow a mount to change the idmapping once and
4403 * verified this in can_idmap_mount() we know that the mount has
4404 * @nop_mnt_idmap attached to it. So there's no need to drop any
4407 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4410 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4414 for (m = mnt; m; m = next_mnt(m, mnt)) {
4417 do_idmap_mount(kattr, m);
4418 flags = recalc_flags(kattr, m);
4419 WRITE_ONCE(m->mnt.mnt_flags, flags);
4421 /* If we had to hold writers unblock them. */
4422 if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4423 mnt_unhold_writers(m);
4425 if (kattr->propagation)
4426 change_mnt_propagation(m, kattr->propagation);
4427 if (!kattr->recurse)
4430 touch_mnt_namespace(mnt->mnt_ns);
4433 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4435 struct mount *mnt = real_mount(path->mnt);
4438 if (!path_mounted(path))
4441 if (kattr->mnt_userns) {
4442 struct mnt_idmap *mnt_idmap;
4444 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4445 if (IS_ERR(mnt_idmap))
4446 return PTR_ERR(mnt_idmap);
4447 kattr->mnt_idmap = mnt_idmap;
4450 if (kattr->propagation) {
4452 * Only take namespace_lock() if we're actually changing
4456 if (kattr->propagation == MS_SHARED) {
4457 err = invent_group_ids(mnt, kattr->recurse);
4468 /* Ensure that this isn't anything purely vfs internal. */
4469 if (!is_mounted(&mnt->mnt))
4473 * If this is an attached mount make sure it's located in the callers
4474 * mount namespace. If it's not don't let the caller interact with it.
4476 * If this mount doesn't have a parent it's most often simply a
4477 * detached mount with an anonymous mount namespace. IOW, something
4478 * that's simply not attached yet. But there are apparently also users
4479 * that do change mount properties on the rootfs itself. That obviously
4480 * neither has a parent nor is it a detached mount so we cannot
4481 * unconditionally check for detached mounts.
4483 if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
4487 * First, we get the mount tree in a shape where we can change mount
4488 * properties without failure. If we succeeded to do so we commit all
4489 * changes and if we failed we clean up.
4491 err = mount_setattr_prepare(kattr, mnt);
4493 mount_setattr_commit(kattr, mnt);
4496 unlock_mount_hash();
4498 if (kattr->propagation) {
4500 cleanup_group_ids(mnt, NULL);
4507 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4508 struct mount_kattr *kattr, unsigned int flags)
4511 struct ns_common *ns;
4512 struct user_namespace *mnt_userns;
4515 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4519 * We currently do not support clearing an idmapped mount. If this ever
4520 * is a use-case we can revisit this but for now let's keep it simple
4523 if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4526 if (attr->userns_fd > INT_MAX)
4529 f = fdget(attr->userns_fd);
4533 if (!proc_ns_file(f.file)) {
4538 ns = get_proc_ns(file_inode(f.file));
4539 if (ns->ops->type != CLONE_NEWUSER) {
4545 * The initial idmapping cannot be used to create an idmapped
4546 * mount. We use the initial idmapping as an indicator of a mount
4547 * that is not idmapped. It can simply be passed into helpers that
4548 * are aware of idmapped mounts as a convenient shortcut. A user
4549 * can just create a dedicated identity mapping to achieve the same
4552 mnt_userns = container_of(ns, struct user_namespace, ns);
4553 if (mnt_userns == &init_user_ns) {
4558 /* We're not controlling the target namespace. */
4559 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4564 kattr->mnt_userns = get_user_ns(mnt_userns);
4571 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4572 struct mount_kattr *kattr, unsigned int flags)
4574 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4576 if (flags & AT_NO_AUTOMOUNT)
4577 lookup_flags &= ~LOOKUP_AUTOMOUNT;
4578 if (flags & AT_SYMLINK_NOFOLLOW)
4579 lookup_flags &= ~LOOKUP_FOLLOW;
4580 if (flags & AT_EMPTY_PATH)
4581 lookup_flags |= LOOKUP_EMPTY;
4583 *kattr = (struct mount_kattr) {
4584 .lookup_flags = lookup_flags,
4585 .recurse = !!(flags & AT_RECURSIVE),
4588 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4590 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4592 kattr->propagation = attr->propagation;
4594 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4597 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4598 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4601 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4602 * users wanting to transition to a different atime setting cannot
4603 * simply specify the atime setting in @attr_set, but must also
4604 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4605 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4606 * @attr_clr and that @attr_set can't have any atime bits set if
4607 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4609 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4610 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4614 * Clear all previous time settings as they are mutually
4617 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4618 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4619 case MOUNT_ATTR_RELATIME:
4620 kattr->attr_set |= MNT_RELATIME;
4622 case MOUNT_ATTR_NOATIME:
4623 kattr->attr_set |= MNT_NOATIME;
4625 case MOUNT_ATTR_STRICTATIME:
4631 if (attr->attr_set & MOUNT_ATTR__ATIME)
4635 return build_mount_idmapped(attr, usize, kattr, flags);
4638 static void finish_mount_kattr(struct mount_kattr *kattr)
4640 put_user_ns(kattr->mnt_userns);
4641 kattr->mnt_userns = NULL;
4643 if (kattr->mnt_idmap)
4644 mnt_idmap_put(kattr->mnt_idmap);
4647 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4648 unsigned int, flags, struct mount_attr __user *, uattr,
4653 struct mount_attr attr;
4654 struct mount_kattr kattr;
4656 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4658 if (flags & ~(AT_EMPTY_PATH |
4660 AT_SYMLINK_NOFOLLOW |
4664 if (unlikely(usize > PAGE_SIZE))
4666 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4672 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4676 /* Don't bother walking through the mounts if this is a nop. */
4677 if (attr.attr_set == 0 &&
4678 attr.attr_clr == 0 &&
4679 attr.propagation == 0)
4682 err = build_mount_kattr(&attr, usize, &kattr, flags);
4686 err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4688 err = do_mount_setattr(&target, &kattr);
4691 finish_mount_kattr(&kattr);
4695 int show_path(struct seq_file *m, struct dentry *root)
4697 if (root->d_sb->s_op->show_path)
4698 return root->d_sb->s_op->show_path(m, root);
4700 seq_dentry(m, root, " \t\n\\");
4704 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
4706 struct mount *mnt = mnt_find_id_at(ns, id);
4708 if (!mnt || mnt->mnt_id_unique != id)
4715 struct statmount __user *buf;
4717 struct vfsmount *mnt;
4720 struct statmount sm;
4721 struct seq_file seq;
4724 static u64 mnt_to_attr_flags(struct vfsmount *mnt)
4726 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
4729 if (mnt_flags & MNT_READONLY)
4730 attr_flags |= MOUNT_ATTR_RDONLY;
4731 if (mnt_flags & MNT_NOSUID)
4732 attr_flags |= MOUNT_ATTR_NOSUID;
4733 if (mnt_flags & MNT_NODEV)
4734 attr_flags |= MOUNT_ATTR_NODEV;
4735 if (mnt_flags & MNT_NOEXEC)
4736 attr_flags |= MOUNT_ATTR_NOEXEC;
4737 if (mnt_flags & MNT_NODIRATIME)
4738 attr_flags |= MOUNT_ATTR_NODIRATIME;
4739 if (mnt_flags & MNT_NOSYMFOLLOW)
4740 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
4742 if (mnt_flags & MNT_NOATIME)
4743 attr_flags |= MOUNT_ATTR_NOATIME;
4744 else if (mnt_flags & MNT_RELATIME)
4745 attr_flags |= MOUNT_ATTR_RELATIME;
4747 attr_flags |= MOUNT_ATTR_STRICTATIME;
4749 if (is_idmapped_mnt(mnt))
4750 attr_flags |= MOUNT_ATTR_IDMAP;
4755 static u64 mnt_to_propagation_flags(struct mount *m)
4757 u64 propagation = 0;
4759 if (IS_MNT_SHARED(m))
4760 propagation |= MS_SHARED;
4761 if (IS_MNT_SLAVE(m))
4762 propagation |= MS_SLAVE;
4763 if (IS_MNT_UNBINDABLE(m))
4764 propagation |= MS_UNBINDABLE;
4766 propagation |= MS_PRIVATE;
4771 static void statmount_sb_basic(struct kstatmount *s)
4773 struct super_block *sb = s->mnt->mnt_sb;
4775 s->sm.mask |= STATMOUNT_SB_BASIC;
4776 s->sm.sb_dev_major = MAJOR(sb->s_dev);
4777 s->sm.sb_dev_minor = MINOR(sb->s_dev);
4778 s->sm.sb_magic = sb->s_magic;
4779 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
4782 static void statmount_mnt_basic(struct kstatmount *s)
4784 struct mount *m = real_mount(s->mnt);
4786 s->sm.mask |= STATMOUNT_MNT_BASIC;
4787 s->sm.mnt_id = m->mnt_id_unique;
4788 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
4789 s->sm.mnt_id_old = m->mnt_id;
4790 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
4791 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
4792 s->sm.mnt_propagation = mnt_to_propagation_flags(m);
4793 s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0;
4794 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
4797 static void statmount_propagate_from(struct kstatmount *s)
4799 struct mount *m = real_mount(s->mnt);
4801 s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
4802 if (IS_MNT_SLAVE(m))
4803 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root);
4806 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
4809 size_t start = seq->count;
4811 ret = show_path(seq, s->mnt->mnt_root);
4815 if (unlikely(seq_has_overflowed(seq)))
4819 * Unescape the result. It would be better if supplied string was not
4820 * escaped in the first place, but that's a pretty invasive change.
4822 seq->buf[seq->count] = '\0';
4824 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
4828 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
4830 struct vfsmount *mnt = s->mnt;
4831 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
4834 err = seq_path_root(seq, &mnt_path, &s->root, "");
4835 return err == SEQ_SKIP ? 0 : err;
4838 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
4840 struct super_block *sb = s->mnt->mnt_sb;
4842 seq_puts(seq, sb->s_type->name);
4846 static int statmount_string(struct kstatmount *s, u64 flag)
4850 struct seq_file *seq = &s->seq;
4851 struct statmount *sm = &s->sm;
4854 case STATMOUNT_FS_TYPE:
4855 sm->fs_type = seq->count;
4856 ret = statmount_fs_type(s, seq);
4858 case STATMOUNT_MNT_ROOT:
4859 sm->mnt_root = seq->count;
4860 ret = statmount_mnt_root(s, seq);
4862 case STATMOUNT_MNT_POINT:
4863 sm->mnt_point = seq->count;
4864 ret = statmount_mnt_point(s, seq);
4871 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
4873 if (kbufsize >= s->bufsize)
4876 /* signal a retry */
4877 if (unlikely(seq_has_overflowed(seq)))
4883 seq->buf[seq->count++] = '\0';
4888 static int copy_statmount_to_user(struct kstatmount *s)
4890 struct statmount *sm = &s->sm;
4891 struct seq_file *seq = &s->seq;
4892 char __user *str = ((char __user *)s->buf) + sizeof(*sm);
4893 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
4895 if (seq->count && copy_to_user(str, seq->buf, seq->count))
4898 /* Return the number of bytes copied to the buffer */
4899 sm->size = copysize + seq->count;
4900 if (copy_to_user(s->buf, sm, copysize))
4906 static int do_statmount(struct kstatmount *s)
4908 struct mount *m = real_mount(s->mnt);
4912 * Don't trigger audit denials. We just want to determine what
4913 * mounts to show users.
4915 if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
4916 !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
4919 err = security_sb_statfs(s->mnt->mnt_root);
4923 if (s->mask & STATMOUNT_SB_BASIC)
4924 statmount_sb_basic(s);
4926 if (s->mask & STATMOUNT_MNT_BASIC)
4927 statmount_mnt_basic(s);
4929 if (s->mask & STATMOUNT_PROPAGATE_FROM)
4930 statmount_propagate_from(s);
4932 if (s->mask & STATMOUNT_FS_TYPE)
4933 err = statmount_string(s, STATMOUNT_FS_TYPE);
4935 if (!err && s->mask & STATMOUNT_MNT_ROOT)
4936 err = statmount_string(s, STATMOUNT_MNT_ROOT);
4938 if (!err && s->mask & STATMOUNT_MNT_POINT)
4939 err = statmount_string(s, STATMOUNT_MNT_POINT);
4947 static inline bool retry_statmount(const long ret, size_t *seq_size)
4949 if (likely(ret != -EAGAIN))
4951 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
4953 if (unlikely(*seq_size > MAX_RW_COUNT))
4958 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
4959 struct statmount __user *buf, size_t bufsize,
4962 if (!access_ok(buf, bufsize))
4965 memset(ks, 0, sizeof(*ks));
4966 ks->mask = kreq->param;
4968 ks->bufsize = bufsize;
4969 ks->seq.size = seq_size;
4970 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
4976 static int copy_mnt_id_req(const struct mnt_id_req __user *req,
4977 struct mnt_id_req *kreq)
4982 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER0);
4984 ret = get_user(usize, &req->size);
4987 if (unlikely(usize > PAGE_SIZE))
4989 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
4991 memset(kreq, 0, sizeof(*kreq));
4992 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
4995 if (kreq->spare != 0)
5000 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
5001 struct statmount __user *, buf, size_t, bufsize,
5002 unsigned int, flags)
5004 struct vfsmount *mnt;
5005 struct mnt_id_req kreq;
5006 struct kstatmount ks;
5007 /* We currently support retrieval of 3 strings. */
5008 size_t seq_size = 3 * PATH_MAX;
5014 ret = copy_mnt_id_req(req, &kreq);
5019 ret = prepare_kstatmount(&ks, &kreq, buf, bufsize, seq_size);
5023 down_read(&namespace_sem);
5024 mnt = lookup_mnt_in_ns(kreq.mnt_id, current->nsproxy->mnt_ns);
5026 up_read(&namespace_sem);
5032 get_fs_root(current->fs, &ks.root);
5033 ret = do_statmount(&ks);
5035 up_read(&namespace_sem);
5038 ret = copy_statmount_to_user(&ks);
5040 if (retry_statmount(ret, &seq_size))
5045 static struct mount *listmnt_next(struct mount *curr)
5047 return node_to_mount(rb_next(&curr->mnt_node));
5050 static ssize_t do_listmount(struct mount *first, struct path *orig,
5051 u64 mnt_parent_id, u64 __user *mnt_ids,
5052 size_t nr_mnt_ids, const struct path *root)
5058 * Don't trigger audit denials. We just want to determine what
5059 * mounts to show users.
5061 if (!is_path_reachable(real_mount(orig->mnt), orig->dentry, root) &&
5062 !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
5065 ret = security_sb_statfs(orig->dentry);
5069 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r)) {
5070 if (r->mnt_id_unique == mnt_parent_id)
5072 if (!is_path_reachable(r, r->mnt.mnt_root, orig))
5074 if (put_user(r->mnt_id_unique, mnt_ids))
5083 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
5084 mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
5086 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
5087 struct mnt_id_req kreq;
5088 struct mount *first;
5089 struct path root, orig;
5090 u64 mnt_parent_id, last_mnt_id;
5091 const size_t maxcount = (size_t)-1 >> 3;
5097 if (unlikely(nr_mnt_ids > maxcount))
5100 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
5103 ret = copy_mnt_id_req(req, &kreq);
5106 mnt_parent_id = kreq.mnt_id;
5107 last_mnt_id = kreq.param;
5109 down_read(&namespace_sem);
5110 get_fs_root(current->fs, &root);
5111 if (mnt_parent_id == LSMT_ROOT) {
5115 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
5118 orig.dentry = orig.mnt->mnt_root;
5121 first = node_to_mount(rb_first(&ns->mounts));
5123 first = mnt_find_id_at(ns, last_mnt_id + 1);
5125 ret = do_listmount(first, &orig, mnt_parent_id, mnt_ids, nr_mnt_ids, &root);
5128 up_read(&namespace_sem);
5133 static void __init init_mount_tree(void)
5135 struct vfsmount *mnt;
5137 struct mnt_namespace *ns;
5140 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
5142 panic("Can't create rootfs");
5144 ns = alloc_mnt_ns(&init_user_ns, false);
5146 panic("Can't allocate initial namespace");
5147 m = real_mount(mnt);
5150 mnt_add_to_ns(ns, m);
5151 init_task.nsproxy->mnt_ns = ns;
5155 root.dentry = mnt->mnt_root;
5156 mnt->mnt_flags |= MNT_LOCKED;
5158 set_fs_pwd(current->fs, &root);
5159 set_fs_root(current->fs, &root);
5162 void __init mnt_init(void)
5166 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
5167 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
5169 mount_hashtable = alloc_large_system_hash("Mount-cache",
5170 sizeof(struct hlist_head),
5173 &m_hash_shift, &m_hash_mask, 0, 0);
5174 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
5175 sizeof(struct hlist_head),
5178 &mp_hash_shift, &mp_hash_mask, 0, 0);
5180 if (!mount_hashtable || !mountpoint_hashtable)
5181 panic("Failed to allocate mount hash table\n");
5187 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
5189 fs_kobj = kobject_create_and_add("fs", NULL);
5191 printk(KERN_WARNING "%s: kobj create error\n", __func__);
5197 void put_mnt_ns(struct mnt_namespace *ns)
5199 if (!refcount_dec_and_test(&ns->ns.count))
5201 drop_collected_mounts(&ns->root->mnt);
5205 struct vfsmount *kern_mount(struct file_system_type *type)
5207 struct vfsmount *mnt;
5208 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
5211 * it is a longterm mount, don't release mnt until
5212 * we unmount before file sys is unregistered
5214 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
5218 EXPORT_SYMBOL_GPL(kern_mount);
5220 void kern_unmount(struct vfsmount *mnt)
5222 /* release long term mount so mount point can be released */
5224 mnt_make_shortterm(mnt);
5225 synchronize_rcu(); /* yecchhh... */
5229 EXPORT_SYMBOL(kern_unmount);
5231 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
5235 for (i = 0; i < num; i++)
5236 mnt_make_shortterm(mnt[i]);
5237 synchronize_rcu_expedited();
5238 for (i = 0; i < num; i++)
5241 EXPORT_SYMBOL(kern_unmount_array);
5243 bool our_mnt(struct vfsmount *mnt)
5245 return check_mnt(real_mount(mnt));
5248 bool current_chrooted(void)
5250 /* Does the current process have a non-standard root */
5251 struct path ns_root;
5252 struct path fs_root;
5255 /* Find the namespace root */
5256 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
5257 ns_root.dentry = ns_root.mnt->mnt_root;
5259 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
5262 get_fs_root(current->fs, &fs_root);
5264 chrooted = !path_equal(&fs_root, &ns_root);
5272 static bool mnt_already_visible(struct mnt_namespace *ns,
5273 const struct super_block *sb,
5276 int new_flags = *new_mnt_flags;
5277 struct mount *mnt, *n;
5278 bool visible = false;
5280 down_read(&namespace_sem);
5281 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
5282 struct mount *child;
5285 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
5288 /* This mount is not fully visible if it's root directory
5289 * is not the root directory of the filesystem.
5291 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
5294 /* A local view of the mount flags */
5295 mnt_flags = mnt->mnt.mnt_flags;
5297 /* Don't miss readonly hidden in the superblock flags */
5298 if (sb_rdonly(mnt->mnt.mnt_sb))
5299 mnt_flags |= MNT_LOCK_READONLY;
5301 /* Verify the mount flags are equal to or more permissive
5302 * than the proposed new mount.
5304 if ((mnt_flags & MNT_LOCK_READONLY) &&
5305 !(new_flags & MNT_READONLY))
5307 if ((mnt_flags & MNT_LOCK_ATIME) &&
5308 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
5311 /* This mount is not fully visible if there are any
5312 * locked child mounts that cover anything except for
5313 * empty directories.
5315 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
5316 struct inode *inode = child->mnt_mountpoint->d_inode;
5317 /* Only worry about locked mounts */
5318 if (!(child->mnt.mnt_flags & MNT_LOCKED))
5320 /* Is the directory permanetly empty? */
5321 if (!is_empty_dir_inode(inode))
5324 /* Preserve the locked attributes */
5325 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
5332 up_read(&namespace_sem);
5336 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
5338 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
5339 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
5340 unsigned long s_iflags;
5342 if (ns->user_ns == &init_user_ns)
5345 /* Can this filesystem be too revealing? */
5346 s_iflags = sb->s_iflags;
5347 if (!(s_iflags & SB_I_USERNS_VISIBLE))
5350 if ((s_iflags & required_iflags) != required_iflags) {
5351 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
5356 return !mnt_already_visible(ns, sb, new_mnt_flags);
5359 bool mnt_may_suid(struct vfsmount *mnt)
5362 * Foreign mounts (accessed via fchdir or through /proc
5363 * symlinks) are always treated as if they are nosuid. This
5364 * prevents namespaces from trusting potentially unsafe
5365 * suid/sgid bits, file caps, or security labels that originate
5366 * in other namespaces.
5368 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
5369 current_in_userns(mnt->mnt_sb->s_user_ns);
5372 static struct ns_common *mntns_get(struct task_struct *task)
5374 struct ns_common *ns = NULL;
5375 struct nsproxy *nsproxy;
5378 nsproxy = task->nsproxy;
5380 ns = &nsproxy->mnt_ns->ns;
5381 get_mnt_ns(to_mnt_ns(ns));
5388 static void mntns_put(struct ns_common *ns)
5390 put_mnt_ns(to_mnt_ns(ns));
5393 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
5395 struct nsproxy *nsproxy = nsset->nsproxy;
5396 struct fs_struct *fs = nsset->fs;
5397 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
5398 struct user_namespace *user_ns = nsset->cred->user_ns;
5402 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
5403 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
5404 !ns_capable(user_ns, CAP_SYS_ADMIN))
5407 if (is_anon_ns(mnt_ns))
5414 old_mnt_ns = nsproxy->mnt_ns;
5415 nsproxy->mnt_ns = mnt_ns;
5418 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
5419 "/", LOOKUP_DOWN, &root);
5421 /* revert to old namespace */
5422 nsproxy->mnt_ns = old_mnt_ns;
5427 put_mnt_ns(old_mnt_ns);
5429 /* Update the pwd and root */
5430 set_fs_pwd(fs, &root);
5431 set_fs_root(fs, &root);
5437 static struct user_namespace *mntns_owner(struct ns_common *ns)
5439 return to_mnt_ns(ns)->user_ns;
5442 const struct proc_ns_operations mntns_operations = {
5444 .type = CLONE_NEWNS,
5447 .install = mntns_install,
5448 .owner = mntns_owner,
5451 #ifdef CONFIG_SYSCTL
5452 static struct ctl_table fs_namespace_sysctls[] = {
5454 .procname = "mount-max",
5455 .data = &sysctl_mount_max,
5456 .maxlen = sizeof(unsigned int),
5458 .proc_handler = proc_dointvec_minmax,
5459 .extra1 = SYSCTL_ONE,
5463 static int __init init_fs_namespace_sysctls(void)
5465 register_sysctl_init("fs", fs_namespace_sysctls);
5468 fs_initcall(init_fs_namespace_sysctls);
5470 #endif /* CONFIG_SYSCTL */