1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_rtalloc.h"
15 #include "xfs_iwalk.h"
16 #include "xfs_itable.h"
17 #include "xfs_error.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_fsops.h"
22 #include "xfs_discard.h"
23 #include "xfs_quota.h"
24 #include "xfs_export.h"
25 #include "xfs_trace.h"
26 #include "xfs_icache.h"
27 #include "xfs_trans.h"
29 #include "xfs_btree.h"
30 #include <linux/fsmap.h>
31 #include "xfs_fsmap.h"
32 #include "scrub/xfs_scrub.h"
35 #include "xfs_health.h"
36 #include "xfs_reflink.h"
37 #include "xfs_ioctl.h"
38 #include "xfs_da_format.h"
39 #include "xfs_da_btree.h"
41 #include <linux/mount.h>
42 #include <linux/namei.h>
45 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
46 * a file or fs handle.
48 * XFS_IOC_PATH_TO_FSHANDLE
49 * returns fs handle for a mount point or path within that mount point
50 * XFS_IOC_FD_TO_HANDLE
51 * returns full handle for a FD opened in user space
52 * XFS_IOC_PATH_TO_HANDLE
53 * returns full handle for a path
58 xfs_fsop_handlereq_t *hreq)
68 if (cmd == XFS_IOC_FD_TO_HANDLE) {
72 inode = file_inode(f.file);
74 error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
77 inode = d_inode(path.dentry);
82 * We can only generate handles for inodes residing on a XFS filesystem,
83 * and only for regular files, directories or symbolic links.
86 if (inode->i_sb->s_magic != XFS_SB_MAGIC)
90 if (!S_ISREG(inode->i_mode) &&
91 !S_ISDIR(inode->i_mode) &&
92 !S_ISLNK(inode->i_mode))
96 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
98 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
100 * This handle only contains an fsid, zero the rest.
102 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
103 hsize = sizeof(xfs_fsid_t);
105 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
106 sizeof(handle.ha_fid.fid_len);
107 handle.ha_fid.fid_pad = 0;
108 handle.ha_fid.fid_gen = inode->i_generation;
109 handle.ha_fid.fid_ino = ip->i_ino;
110 hsize = sizeof(xfs_handle_t);
114 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
115 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
121 if (cmd == XFS_IOC_FD_TO_HANDLE)
129 * No need to do permission checks on the various pathname components
130 * as the handle operations are privileged.
133 xfs_handle_acceptable(
135 struct dentry *dentry)
141 * Convert userspace handle data into a dentry.
144 xfs_handle_to_dentry(
145 struct file *parfilp,
146 void __user *uhandle,
150 struct xfs_fid64 fid;
153 * Only allow handle opens under a directory.
155 if (!S_ISDIR(file_inode(parfilp)->i_mode))
156 return ERR_PTR(-ENOTDIR);
158 if (hlen != sizeof(xfs_handle_t))
159 return ERR_PTR(-EINVAL);
160 if (copy_from_user(&handle, uhandle, hlen))
161 return ERR_PTR(-EFAULT);
162 if (handle.ha_fid.fid_len !=
163 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
164 return ERR_PTR(-EINVAL);
166 memset(&fid, 0, sizeof(struct fid));
167 fid.ino = handle.ha_fid.fid_ino;
168 fid.gen = handle.ha_fid.fid_gen;
170 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
171 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
172 xfs_handle_acceptable, NULL);
175 STATIC struct dentry *
176 xfs_handlereq_to_dentry(
177 struct file *parfilp,
178 xfs_fsop_handlereq_t *hreq)
180 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
185 struct file *parfilp,
186 xfs_fsop_handlereq_t *hreq)
188 const struct cred *cred = current_cred();
194 struct dentry *dentry;
198 if (!capable(CAP_SYS_ADMIN))
201 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
203 return PTR_ERR(dentry);
204 inode = d_inode(dentry);
206 /* Restrict xfs_open_by_handle to directories & regular files. */
207 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
212 #if BITS_PER_LONG != 32
213 hreq->oflags |= O_LARGEFILE;
216 permflag = hreq->oflags;
217 fmode = OPEN_FMODE(permflag);
218 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
219 (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
224 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
229 /* Can't write directories. */
230 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
235 fd = get_unused_fd_flags(0);
241 path.mnt = parfilp->f_path.mnt;
242 path.dentry = dentry;
243 filp = dentry_open(&path, hreq->oflags, cred);
247 return PTR_ERR(filp);
250 if (S_ISREG(inode->i_mode)) {
251 filp->f_flags |= O_NOATIME;
252 filp->f_mode |= FMODE_NOCMTIME;
255 fd_install(fd, filp);
264 xfs_readlink_by_handle(
265 struct file *parfilp,
266 xfs_fsop_handlereq_t *hreq)
268 struct dentry *dentry;
272 if (!capable(CAP_SYS_ADMIN))
275 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
277 return PTR_ERR(dentry);
279 /* Restrict this handle operation to symlinks only. */
280 if (!d_is_symlink(dentry)) {
285 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
290 error = vfs_readlink(dentry, hreq->ohandle, olen);
298 * Format an attribute and copy it out to the user's buffer.
299 * Take care to check values and protect against them changing later,
300 * we may be reading them directly out of a user buffer.
303 xfs_ioc_attr_put_listent(
304 struct xfs_attr_list_context *context,
310 struct xfs_attrlist *alist = context->buffer;
311 struct xfs_attrlist_ent *aep;
314 ASSERT(!context->seen_enough);
315 ASSERT(context->count >= 0);
316 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
317 ASSERT(context->firstu >= sizeof(*alist));
318 ASSERT(context->firstu <= context->bufsize);
321 * Only list entries in the right namespace.
323 if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
326 arraytop = sizeof(*alist) +
327 context->count * sizeof(alist->al_offset[0]);
329 /* decrement by the actual bytes used by the attr */
330 context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
331 namelen + 1, sizeof(uint32_t));
332 if (context->firstu < arraytop) {
333 trace_xfs_attr_list_full(context);
335 context->seen_enough = 1;
339 aep = context->buffer + context->firstu;
340 aep->a_valuelen = valuelen;
341 memcpy(aep->a_name, name, namelen);
342 aep->a_name[namelen] = 0;
343 alist->al_offset[context->count++] = context->firstu;
344 alist->al_count = context->count;
345 trace_xfs_attr_list_add(context);
352 if (ioc_flags & XFS_IOC_ATTR_ROOT)
353 return XFS_ATTR_ROOT;
354 if (ioc_flags & XFS_IOC_ATTR_SECURE)
355 return XFS_ATTR_SECURE;
363 if (ioc_flags & XFS_IOC_ATTR_CREATE)
365 if (ioc_flags & XFS_IOC_ATTR_REPLACE)
366 return XATTR_REPLACE;
372 struct xfs_inode *dp,
376 struct xfs_attrlist_cursor __user *ucursor)
378 struct xfs_attr_list_context context = { };
379 struct xfs_attrlist *alist;
383 if (bufsize < sizeof(struct xfs_attrlist) ||
384 bufsize > XFS_XATTR_LIST_MAX)
388 * Reject flags, only allow namespaces.
390 if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
392 if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
396 * Validate the cursor.
398 if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
400 if (context.cursor.pad1 || context.cursor.pad2)
402 if (!context.cursor.initted &&
403 (context.cursor.hashval || context.cursor.blkno ||
404 context.cursor.offset))
407 buffer = kmem_zalloc_large(bufsize, 0);
412 * Initialize the output buffer.
416 context.attr_filter = xfs_attr_filter(flags);
417 context.buffer = buffer;
418 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
419 context.firstu = context.bufsize;
420 context.put_listent = xfs_ioc_attr_put_listent;
422 alist = context.buffer;
425 alist->al_offset[0] = context.bufsize;
427 error = xfs_attr_list(&context);
431 if (copy_to_user(ubuf, buffer, bufsize) ||
432 copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
440 xfs_attrlist_by_handle(
441 struct file *parfilp,
442 struct xfs_fsop_attrlist_handlereq __user *p)
444 struct xfs_fsop_attrlist_handlereq al_hreq;
445 struct dentry *dentry;
448 if (!capable(CAP_SYS_ADMIN))
450 if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
453 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
455 return PTR_ERR(dentry);
457 error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
458 al_hreq.buflen, al_hreq.flags, &p->pos);
464 xfs_attrmulti_attr_get(
467 unsigned char __user *ubuf,
471 struct xfs_da_args args = {
473 .attr_filter = xfs_attr_filter(flags),
474 .attr_flags = xfs_attr_flags(flags),
476 .namelen = strlen(name),
481 if (*len > XFS_XATTR_SIZE_MAX)
484 args.value = kmem_zalloc_large(*len, 0);
488 error = xfs_attr_get(&args);
492 *len = args.valuelen;
493 if (copy_to_user(ubuf, args.value, args.valuelen))
497 kmem_free(args.value);
502 xfs_attrmulti_attr_set(
505 const unsigned char __user *ubuf,
509 struct xfs_da_args args = {
511 .attr_filter = xfs_attr_filter(flags),
512 .attr_flags = xfs_attr_flags(flags),
514 .namelen = strlen(name),
518 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
522 if (len > XFS_XATTR_SIZE_MAX)
524 args.value = memdup_user(ubuf, len);
525 if (IS_ERR(args.value))
526 return PTR_ERR(args.value);
530 error = xfs_attr_set(&args);
531 if (!error && (flags & XFS_IOC_ATTR_ROOT))
532 xfs_forget_acl(inode, name);
538 xfs_ioc_attrmulti_one(
539 struct file *parfilp,
550 if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
553 name = strndup_user(uname, MAXNAMELEN);
555 return PTR_ERR(name);
559 error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
566 error = mnt_want_write_file(parfilp);
569 error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
570 mnt_drop_write_file(parfilp);
582 xfs_attrmulti_by_handle(
583 struct file *parfilp,
587 xfs_attr_multiop_t *ops;
588 xfs_fsop_attrmulti_handlereq_t am_hreq;
589 struct dentry *dentry;
590 unsigned int i, size;
592 if (!capable(CAP_SYS_ADMIN))
594 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
598 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
601 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
603 return PTR_ERR(dentry);
606 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
607 if (!size || size > 16 * PAGE_SIZE)
610 ops = memdup_user(am_hreq.ops, size);
612 error = PTR_ERR(ops);
617 for (i = 0; i < am_hreq.opcount; i++) {
618 ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
619 d_inode(dentry), ops[i].am_opcode,
620 ops[i].am_attrname, ops[i].am_attrvalue,
621 &ops[i].am_length, ops[i].am_flags);
624 if (copy_to_user(am_hreq.ops, ops, size))
638 struct inode *inode = file_inode(filp);
639 struct xfs_inode *ip = XFS_I(inode);
641 enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
642 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
645 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
648 if (!(filp->f_mode & FMODE_WRITE))
651 if (!S_ISREG(inode->i_mode))
654 if (xfs_is_always_cow_inode(ip))
657 if (filp->f_flags & O_DSYNC)
658 flags |= XFS_PREALLOC_SYNC;
659 if (filp->f_mode & FMODE_NOCMTIME)
660 flags |= XFS_PREALLOC_INVISIBLE;
662 error = mnt_want_write_file(filp);
666 xfs_ilock(ip, iolock);
667 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
670 inode_dio_wait(inode);
672 switch (bf->l_whence) {
676 bf->l_start += filp->f_pos;
679 bf->l_start += XFS_ISIZE(ip);
686 if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
691 if (bf->l_start > XFS_ISIZE(ip)) {
692 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
693 bf->l_start - XFS_ISIZE(ip), 0);
698 iattr.ia_valid = ATTR_SIZE;
699 iattr.ia_size = bf->l_start;
700 error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
704 error = xfs_update_prealloc_flags(ip, flags);
707 xfs_iunlock(ip, iolock);
708 mnt_drop_write_file(filp);
712 /* Return 0 on success or positive error */
714 xfs_fsbulkstat_one_fmt(
715 struct xfs_ibulk *breq,
716 const struct xfs_bulkstat *bstat)
718 struct xfs_bstat bs1;
720 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
721 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
723 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
728 struct xfs_ibulk *breq,
729 const struct xfs_inumbers *igrp)
731 struct xfs_inogrp ig1;
733 xfs_inumbers_to_inogrp(&ig1, igrp);
734 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
736 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
745 struct xfs_fsop_bulkreq bulkreq;
746 struct xfs_ibulk breq = {
753 /* done = 1 if there are more stats to get and if bulkstat */
754 /* should be called again (unused here, but used in dmapi) */
756 if (!capable(CAP_SYS_ADMIN))
759 if (XFS_FORCED_SHUTDOWN(mp))
762 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
765 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
768 if (bulkreq.icount <= 0)
771 if (bulkreq.ubuffer == NULL)
774 breq.ubuffer = bulkreq.ubuffer;
775 breq.icount = bulkreq.icount;
778 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
779 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
780 * that *lastip contains either zero or the number of the last inode to
781 * be examined by the previous call and return results starting with
782 * the next inode after that. The new bulk request back end functions
783 * take the inode to start with, so we have to compute the startino
784 * parameter from lastino to maintain correct function. lastino == 0
785 * is a special case because it has traditionally meant "first inode
788 if (cmd == XFS_IOC_FSINUMBERS) {
789 breq.startino = lastino ? lastino + 1 : 0;
790 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
791 lastino = breq.startino - 1;
792 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
793 breq.startino = lastino;
795 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
796 } else { /* XFS_IOC_FSBULKSTAT */
797 breq.startino = lastino ? lastino + 1 : 0;
798 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
799 lastino = breq.startino - 1;
805 if (bulkreq.lastip != NULL &&
806 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
809 if (bulkreq.ocount != NULL &&
810 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
816 /* Return 0 on success or positive error */
819 struct xfs_ibulk *breq,
820 const struct xfs_bulkstat *bstat)
822 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
824 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
828 * Check the incoming bulk request @hdr from userspace and initialize the
829 * internal @breq bulk request appropriately. Returns 0 if the bulk request
830 * should proceed; -ECANCELED if there's nothing to do; or the usual
831 * negative error code.
835 struct xfs_mount *mp,
836 struct xfs_bulk_ireq *hdr,
837 struct xfs_ibulk *breq,
838 void __user *ubuffer)
840 if (hdr->icount == 0 ||
841 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
842 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
845 breq->startino = hdr->ino;
846 breq->ubuffer = ubuffer;
847 breq->icount = hdr->icount;
852 * The @ino parameter is a special value, so we must look it up here.
853 * We're not allowed to have IREQ_AGNO, and we only return one inode
856 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
857 if (hdr->flags & XFS_BULK_IREQ_AGNO)
861 case XFS_BULK_IREQ_SPECIAL_ROOT:
862 hdr->ino = mp->m_sb.sb_rootino;
871 * The IREQ_AGNO flag means that we only want results from a given AG.
872 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is
873 * beyond the specified AG then we return no results.
875 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
876 if (hdr->agno >= mp->m_sb.sb_agcount)
879 if (breq->startino == 0)
880 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
881 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
884 breq->flags |= XFS_IBULK_SAME_AG;
886 /* Asking for an inode past the end of the AG? We're done! */
887 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
889 } else if (hdr->agno)
892 /* Asking for an inode past the end of the FS? We're done! */
893 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
900 * Update the userspace bulk request @hdr to reflect the end state of the
901 * internal bulk request @breq.
904 xfs_bulk_ireq_teardown(
905 struct xfs_bulk_ireq *hdr,
906 struct xfs_ibulk *breq)
908 hdr->ino = breq->startino;
909 hdr->ocount = breq->ocount;
912 /* Handle the v5 bulkstat ioctl. */
915 struct xfs_mount *mp,
917 struct xfs_bulkstat_req __user *arg)
919 struct xfs_bulk_ireq hdr;
920 struct xfs_ibulk breq = {
925 if (!capable(CAP_SYS_ADMIN))
928 if (XFS_FORCED_SHUTDOWN(mp))
931 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
934 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
935 if (error == -ECANCELED)
940 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
945 xfs_bulk_ireq_teardown(&hdr, &breq);
946 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
954 struct xfs_ibulk *breq,
955 const struct xfs_inumbers *igrp)
957 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
959 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
962 /* Handle the v5 inumbers ioctl. */
965 struct xfs_mount *mp,
967 struct xfs_inumbers_req __user *arg)
969 struct xfs_bulk_ireq hdr;
970 struct xfs_ibulk breq = {
975 if (!capable(CAP_SYS_ADMIN))
978 if (XFS_FORCED_SHUTDOWN(mp))
981 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
984 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
985 if (error == -ECANCELED)
990 error = xfs_inumbers(&breq, xfs_inumbers_fmt);
995 xfs_bulk_ireq_teardown(&hdr, &breq);
996 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
1004 struct xfs_mount *mp,
1008 struct xfs_fsop_geom fsgeo;
1011 xfs_fs_geometry(&mp->m_sb, &fsgeo, struct_version);
1013 if (struct_version <= 3)
1014 len = sizeof(struct xfs_fsop_geom_v1);
1015 else if (struct_version == 4)
1016 len = sizeof(struct xfs_fsop_geom_v4);
1018 xfs_fsop_geom_health(mp, &fsgeo);
1019 len = sizeof(fsgeo);
1022 if (copy_to_user(arg, &fsgeo, len))
1028 xfs_ioc_ag_geometry(
1029 struct xfs_mount *mp,
1032 struct xfs_ag_geometry ageo;
1035 if (copy_from_user(&ageo, arg, sizeof(ageo)))
1039 if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
1042 error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
1046 if (copy_to_user(arg, &ageo, sizeof(ageo)))
1052 * Linux extended inode flags interface.
1056 xfs_merge_ioc_xflags(
1060 unsigned int xflags = start;
1062 if (flags & FS_IMMUTABLE_FL)
1063 xflags |= FS_XFLAG_IMMUTABLE;
1065 xflags &= ~FS_XFLAG_IMMUTABLE;
1066 if (flags & FS_APPEND_FL)
1067 xflags |= FS_XFLAG_APPEND;
1069 xflags &= ~FS_XFLAG_APPEND;
1070 if (flags & FS_SYNC_FL)
1071 xflags |= FS_XFLAG_SYNC;
1073 xflags &= ~FS_XFLAG_SYNC;
1074 if (flags & FS_NOATIME_FL)
1075 xflags |= FS_XFLAG_NOATIME;
1077 xflags &= ~FS_XFLAG_NOATIME;
1078 if (flags & FS_NODUMP_FL)
1079 xflags |= FS_XFLAG_NODUMP;
1081 xflags &= ~FS_XFLAG_NODUMP;
1090 unsigned int flags = 0;
1092 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1093 flags |= FS_IMMUTABLE_FL;
1094 if (di_flags & XFS_DIFLAG_APPEND)
1095 flags |= FS_APPEND_FL;
1096 if (di_flags & XFS_DIFLAG_SYNC)
1097 flags |= FS_SYNC_FL;
1098 if (di_flags & XFS_DIFLAG_NOATIME)
1099 flags |= FS_NOATIME_FL;
1100 if (di_flags & XFS_DIFLAG_NODUMP)
1101 flags |= FS_NODUMP_FL;
1107 struct xfs_inode *ip,
1111 simple_fill_fsxattr(fa, xfs_ip2xflags(ip));
1112 fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
1113 fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
1114 ip->i_mount->m_sb.sb_blocklog;
1115 fa->fsx_projid = ip->i_d.di_projid;
1119 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
1120 fa->fsx_nextents = xfs_iext_count(ip->i_afp);
1122 fa->fsx_nextents = ip->i_d.di_anextents;
1124 fa->fsx_nextents = 0;
1126 if (ip->i_df.if_flags & XFS_IFEXTENTS)
1127 fa->fsx_nextents = xfs_iext_count(&ip->i_df);
1129 fa->fsx_nextents = ip->i_d.di_nextents;
1141 xfs_ilock(ip, XFS_ILOCK_SHARED);
1142 xfs_fill_fsxattr(ip, attr, &fa);
1143 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1145 if (copy_to_user(arg, &fa, sizeof(fa)))
1152 struct xfs_inode *ip,
1153 unsigned int xflags)
1155 /* can't set PREALLOC this way, just preserve it */
1157 (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
1159 if (xflags & FS_XFLAG_IMMUTABLE)
1160 di_flags |= XFS_DIFLAG_IMMUTABLE;
1161 if (xflags & FS_XFLAG_APPEND)
1162 di_flags |= XFS_DIFLAG_APPEND;
1163 if (xflags & FS_XFLAG_SYNC)
1164 di_flags |= XFS_DIFLAG_SYNC;
1165 if (xflags & FS_XFLAG_NOATIME)
1166 di_flags |= XFS_DIFLAG_NOATIME;
1167 if (xflags & FS_XFLAG_NODUMP)
1168 di_flags |= XFS_DIFLAG_NODUMP;
1169 if (xflags & FS_XFLAG_NODEFRAG)
1170 di_flags |= XFS_DIFLAG_NODEFRAG;
1171 if (xflags & FS_XFLAG_FILESTREAM)
1172 di_flags |= XFS_DIFLAG_FILESTREAM;
1173 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1174 if (xflags & FS_XFLAG_RTINHERIT)
1175 di_flags |= XFS_DIFLAG_RTINHERIT;
1176 if (xflags & FS_XFLAG_NOSYMLINKS)
1177 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1178 if (xflags & FS_XFLAG_EXTSZINHERIT)
1179 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1180 if (xflags & FS_XFLAG_PROJINHERIT)
1181 di_flags |= XFS_DIFLAG_PROJINHERIT;
1182 } else if (S_ISREG(VFS_I(ip)->i_mode)) {
1183 if (xflags & FS_XFLAG_REALTIME)
1184 di_flags |= XFS_DIFLAG_REALTIME;
1185 if (xflags & FS_XFLAG_EXTSIZE)
1186 di_flags |= XFS_DIFLAG_EXTSIZE;
1194 struct xfs_inode *ip,
1195 unsigned int xflags)
1197 uint64_t di_flags2 =
1198 (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
1200 if (xflags & FS_XFLAG_DAX)
1201 di_flags2 |= XFS_DIFLAG2_DAX;
1202 if (xflags & FS_XFLAG_COWEXTSIZE)
1203 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
1209 xfs_diflags_to_linux(
1210 struct xfs_inode *ip)
1212 struct inode *inode = VFS_I(ip);
1213 unsigned int xflags = xfs_ip2xflags(ip);
1215 if (xflags & FS_XFLAG_IMMUTABLE)
1216 inode->i_flags |= S_IMMUTABLE;
1218 inode->i_flags &= ~S_IMMUTABLE;
1219 if (xflags & FS_XFLAG_APPEND)
1220 inode->i_flags |= S_APPEND;
1222 inode->i_flags &= ~S_APPEND;
1223 if (xflags & FS_XFLAG_SYNC)
1224 inode->i_flags |= S_SYNC;
1226 inode->i_flags &= ~S_SYNC;
1227 if (xflags & FS_XFLAG_NOATIME)
1228 inode->i_flags |= S_NOATIME;
1230 inode->i_flags &= ~S_NOATIME;
1231 #if 0 /* disabled until the flag switching races are sorted out */
1232 if (xflags & FS_XFLAG_DAX)
1233 inode->i_flags |= S_DAX;
1235 inode->i_flags &= ~S_DAX;
1240 xfs_ioctl_setattr_xflags(
1241 struct xfs_trans *tp,
1242 struct xfs_inode *ip,
1245 struct xfs_mount *mp = ip->i_mount;
1248 /* Can't change realtime flag if any extents are allocated. */
1249 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
1250 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
1253 /* If realtime flag is set then must have realtime device */
1254 if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
1255 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1256 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
1260 /* Clear reflink if we are actually able to set the rt flag. */
1261 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
1262 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1264 /* Don't allow us to set DAX mode for a reflinked file for now. */
1265 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
1268 /* diflags2 only valid for v3 inodes. */
1269 di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1270 if (di_flags2 && ip->i_d.di_version < 3)
1273 ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
1274 ip->i_d.di_flags2 = di_flags2;
1276 xfs_diflags_to_linux(ip);
1277 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1278 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1279 XFS_STATS_INC(mp, xs_ig_attrchg);
1284 * If we are changing DAX flags, we have to ensure the file is clean and any
1285 * cached objects in the address space are invalidated and removed. This
1286 * requires us to lock out other IO and page faults similar to a truncate
1287 * operation. The locks need to be held until the transaction has been committed
1288 * so that the cache invalidation is atomic with respect to the DAX flag
1292 xfs_ioctl_setattr_dax_invalidate(
1293 struct xfs_inode *ip,
1297 struct inode *inode = VFS_I(ip);
1298 struct super_block *sb = inode->i_sb;
1304 * It is only valid to set the DAX flag on regular files and
1305 * directories on filesystems where the block size is equal to the page
1306 * size. On directories it serves as an inherited hint so we don't
1307 * have to check the device for dax support or flush pagecache.
1309 if (fa->fsx_xflags & FS_XFLAG_DAX) {
1310 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
1312 if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
1316 /* If the DAX state is not changing, we have nothing to do here. */
1317 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
1319 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
1322 if (S_ISDIR(inode->i_mode))
1325 /* lock, flush and invalidate mapping in preparation for flag change */
1326 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1327 error = filemap_write_and_wait(inode->i_mapping);
1330 error = invalidate_inode_pages2(inode->i_mapping);
1334 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
1338 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1344 * Set up the transaction structure for the setattr operation, checking that we
1345 * have permission to do so. On success, return a clean transaction and the
1346 * inode locked exclusively ready for further operation specific checks. On
1347 * failure, return an error without modifying or locking the inode.
1349 * The inode might already be IO locked on call. If this is the case, it is
1350 * indicated in @join_flags and we take full responsibility for ensuring they
1351 * are unlocked from now on. Hence if we have an error here, we still have to
1352 * unlock them. Otherwise, once they are joined to the transaction, they will
1353 * be unlocked on commit/cancel.
1355 static struct xfs_trans *
1356 xfs_ioctl_setattr_get_trans(
1357 struct xfs_inode *ip,
1360 struct xfs_mount *mp = ip->i_mount;
1361 struct xfs_trans *tp;
1364 if (mp->m_flags & XFS_MOUNT_RDONLY)
1367 if (XFS_FORCED_SHUTDOWN(mp))
1370 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1374 xfs_ilock(ip, XFS_ILOCK_EXCL);
1375 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
1379 * CAP_FOWNER overrides the following restrictions:
1381 * The user ID of the calling process must be equal to the file owner
1382 * ID, except in cases where the CAP_FSETID capability is applicable.
1384 if (!inode_owner_or_capable(VFS_I(ip))) {
1389 if (mp->m_flags & XFS_MOUNT_WSYNC)
1390 xfs_trans_set_sync(tp);
1395 xfs_trans_cancel(tp);
1398 xfs_iunlock(ip, join_flags);
1399 return ERR_PTR(error);
1403 * extent size hint validation is somewhat cumbersome. Rules are:
1405 * 1. extent size hint is only valid for directories and regular files
1406 * 2. FS_XFLAG_EXTSIZE is only valid for regular files
1407 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
1408 * 4. can only be changed on regular files if no extents are allocated
1409 * 5. can be changed on directories at any time
1410 * 6. extsize hint of 0 turns off hints, clears inode flags.
1411 * 7. Extent size must be a multiple of the appropriate block size.
1412 * 8. for non-realtime files, the extent size hint must be limited
1413 * to half the AG size to avoid alignment extending the extent beyond the
1416 * Please keep this function in sync with xfs_scrub_inode_extsize.
1419 xfs_ioctl_setattr_check_extsize(
1420 struct xfs_inode *ip,
1423 struct xfs_mount *mp = ip->i_mount;
1425 xfs_fsblock_t extsize_fsb;
1427 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
1428 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
1431 if (fa->fsx_extsize == 0)
1434 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1435 if (extsize_fsb > MAXEXTLEN)
1438 if (XFS_IS_REALTIME_INODE(ip) ||
1439 (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
1440 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
1442 size = mp->m_sb.sb_blocksize;
1443 if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
1447 if (fa->fsx_extsize % size)
1454 * CoW extent size hint validation rules are:
1456 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
1457 * The inode does not have to have any shared blocks, but it must be a v3.
1458 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
1459 * for a directory, the hint is propagated to new files.
1460 * 3. Can be changed on files & directories at any time.
1461 * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
1462 * 5. Extent size must be a multiple of the appropriate block size.
1463 * 6. The extent size hint must be limited to half the AG size to avoid
1464 * alignment extending the extent beyond the limits of the AG.
1466 * Please keep this function in sync with xfs_scrub_inode_cowextsize.
1469 xfs_ioctl_setattr_check_cowextsize(
1470 struct xfs_inode *ip,
1473 struct xfs_mount *mp = ip->i_mount;
1475 xfs_fsblock_t cowextsize_fsb;
1477 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
1480 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
1481 ip->i_d.di_version != 3)
1484 if (fa->fsx_cowextsize == 0)
1487 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
1488 if (cowextsize_fsb > MAXEXTLEN)
1491 size = mp->m_sb.sb_blocksize;
1492 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
1495 if (fa->fsx_cowextsize % size)
1502 xfs_ioctl_setattr_check_projid(
1503 struct xfs_inode *ip,
1506 /* Disallow 32bit project ids if projid32bit feature is not enabled. */
1507 if (fa->fsx_projid > (uint16_t)-1 &&
1508 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1518 struct fsxattr old_fa;
1519 struct xfs_mount *mp = ip->i_mount;
1520 struct xfs_trans *tp;
1521 struct xfs_dquot *udqp = NULL;
1522 struct xfs_dquot *pdqp = NULL;
1523 struct xfs_dquot *olddquot = NULL;
1527 trace_xfs_ioctl_setattr(ip);
1529 code = xfs_ioctl_setattr_check_projid(ip, fa);
1534 * If disk quotas is on, we make sure that the dquots do exist on disk,
1535 * before we start any other transactions. Trying to do this later
1536 * is messy. We don't care to take a readlock to look at the ids
1537 * in inode here, because we can't hold it across the trans_reserve.
1538 * If the IDs do change before we take the ilock, we're covered
1539 * because the i_*dquot fields will get updated anyway.
1541 if (XFS_IS_QUOTA_ON(mp)) {
1542 code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
1543 VFS_I(ip)->i_gid, fa->fsx_projid,
1544 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
1550 * Changing DAX config may require inode locking for mapping
1551 * invalidation. These need to be held all the way to transaction commit
1552 * or cancel time, so need to be passed through to
1553 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1556 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
1558 goto error_free_dquots;
1560 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
1563 goto error_free_dquots;
1566 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
1567 ip->i_d.di_projid != fa->fsx_projid) {
1568 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
1569 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
1570 if (code) /* out of quota */
1571 goto error_trans_cancel;
1574 xfs_fill_fsxattr(ip, false, &old_fa);
1575 code = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
1577 goto error_trans_cancel;
1579 code = xfs_ioctl_setattr_check_extsize(ip, fa);
1581 goto error_trans_cancel;
1583 code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
1585 goto error_trans_cancel;
1587 code = xfs_ioctl_setattr_xflags(tp, ip, fa);
1589 goto error_trans_cancel;
1592 * Change file ownership. Must be the owner or privileged. CAP_FSETID
1593 * overrides the following restrictions:
1595 * The set-user-ID and set-group-ID bits of a file will be cleared upon
1596 * successful return from chown()
1599 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
1600 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
1601 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
1603 /* Change the ownerships and register project quota modifications */
1604 if (ip->i_d.di_projid != fa->fsx_projid) {
1605 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1606 olddquot = xfs_qm_vop_chown(tp, ip,
1607 &ip->i_pdquot, pdqp);
1609 ASSERT(ip->i_d.di_version > 1);
1610 ip->i_d.di_projid = fa->fsx_projid;
1614 * Only set the extent size hint if we've already determined that the
1615 * extent size hint should be set on the inode. If no extent size flags
1616 * are set on the inode then unconditionally clear the extent size hint.
1618 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1619 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1621 ip->i_d.di_extsize = 0;
1622 if (ip->i_d.di_version == 3 &&
1623 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1624 ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
1625 mp->m_sb.sb_blocklog;
1627 ip->i_d.di_cowextsize = 0;
1629 code = xfs_trans_commit(tp);
1632 * Release any dquot(s) the inode had kept before chown.
1634 xfs_qm_dqrele(olddquot);
1635 xfs_qm_dqrele(udqp);
1636 xfs_qm_dqrele(pdqp);
1641 xfs_trans_cancel(tp);
1643 xfs_qm_dqrele(udqp);
1644 xfs_qm_dqrele(pdqp);
1657 if (copy_from_user(&fa, arg, sizeof(fa)))
1660 error = mnt_want_write_file(filp);
1663 error = xfs_ioctl_setattr(ip, &fa);
1664 mnt_drop_write_file(filp);
1675 flags = xfs_di2lxflags(ip->i_d.di_flags);
1676 if (copy_to_user(arg, &flags, sizeof(flags)))
1683 struct xfs_inode *ip,
1687 struct xfs_trans *tp;
1689 struct fsxattr old_fa;
1694 if (copy_from_user(&flags, arg, sizeof(flags)))
1697 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1698 FS_NOATIME_FL | FS_NODUMP_FL | \
1702 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
1704 error = mnt_want_write_file(filp);
1709 * Changing DAX config may require inode locking for mapping
1710 * invalidation. These need to be held all the way to transaction commit
1711 * or cancel time, so need to be passed through to
1712 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1715 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
1717 goto out_drop_write;
1719 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
1721 error = PTR_ERR(tp);
1722 goto out_drop_write;
1725 xfs_fill_fsxattr(ip, false, &old_fa);
1726 error = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, &fa);
1728 xfs_trans_cancel(tp);
1729 goto out_drop_write;
1732 error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
1734 xfs_trans_cancel(tp);
1735 goto out_drop_write;
1738 error = xfs_trans_commit(tp);
1740 mnt_drop_write_file(filp);
1747 struct getbmapx __user *u,
1750 if (put_user(p->bmv_offset, &u->bmv_offset) ||
1751 put_user(p->bmv_block, &u->bmv_block) ||
1752 put_user(p->bmv_length, &u->bmv_length) ||
1753 put_user(0, &u->bmv_count) ||
1754 put_user(0, &u->bmv_entries))
1756 if (recsize < sizeof(struct getbmapx))
1758 if (put_user(0, &u->bmv_iflags) ||
1759 put_user(p->bmv_oflags, &u->bmv_oflags) ||
1760 put_user(0, &u->bmv_unused1) ||
1761 put_user(0, &u->bmv_unused2))
1772 struct getbmapx bmx = { 0 };
1773 struct kgetbmap *buf;
1778 case XFS_IOC_GETBMAPA:
1779 bmx.bmv_iflags = BMV_IF_ATTRFORK;
1781 case XFS_IOC_GETBMAP:
1782 if (file->f_mode & FMODE_NOCMTIME)
1783 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
1784 /* struct getbmap is a strict subset of struct getbmapx. */
1785 recsize = sizeof(struct getbmap);
1787 case XFS_IOC_GETBMAPX:
1788 recsize = sizeof(struct getbmapx);
1794 if (copy_from_user(&bmx, arg, recsize))
1797 if (bmx.bmv_count < 2)
1799 if (bmx.bmv_count > ULONG_MAX / recsize)
1802 buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0);
1806 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
1811 if (copy_to_user(arg, &bmx, recsize))
1815 for (i = 0; i < bmx.bmv_entries; i++) {
1816 if (!xfs_getbmap_format(buf + i, arg, recsize))
1827 struct getfsmap_info {
1828 struct xfs_mount *mp;
1829 struct fsmap_head __user *data;
1835 xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
1837 struct getfsmap_info *info = priv;
1840 trace_xfs_getfsmap_mapping(info->mp, xfm);
1842 info->last_flags = xfm->fmr_flags;
1843 xfs_fsmap_from_internal(&fm, xfm);
1844 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
1845 sizeof(struct fsmap)))
1853 struct xfs_inode *ip,
1854 struct fsmap_head __user *arg)
1856 struct getfsmap_info info = { NULL };
1857 struct xfs_fsmap_head xhead = {0};
1858 struct fsmap_head head;
1859 bool aborted = false;
1862 if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
1864 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
1865 memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
1866 sizeof(head.fmh_keys[0].fmr_reserved)) ||
1867 memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
1868 sizeof(head.fmh_keys[1].fmr_reserved)))
1871 xhead.fmh_iflags = head.fmh_iflags;
1872 xhead.fmh_count = head.fmh_count;
1873 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
1874 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
1876 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1877 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
1879 info.mp = ip->i_mount;
1881 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
1882 if (error == -ECANCELED) {
1888 /* If we didn't abort, set the "last" flag in the last fmx */
1889 if (!aborted && info.idx) {
1890 info.last_flags |= FMR_OF_LAST;
1891 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
1892 &info.last_flags, sizeof(info.last_flags)))
1896 /* copy back header */
1897 head.fmh_entries = xhead.fmh_entries;
1898 head.fmh_oflags = xhead.fmh_oflags;
1899 if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
1906 xfs_ioc_scrub_metadata(
1907 struct xfs_inode *ip,
1910 struct xfs_scrub_metadata scrub;
1913 if (!capable(CAP_SYS_ADMIN))
1916 if (copy_from_user(&scrub, arg, sizeof(scrub)))
1919 error = xfs_scrub_metadata(ip, &scrub);
1923 if (copy_to_user(arg, &scrub, sizeof(scrub)))
1933 xfs_inode_t *ip, *tip;
1937 /* Pull information for the target fd */
1938 f = fdget((int)sxp->sx_fdtarget);
1944 if (!(f.file->f_mode & FMODE_WRITE) ||
1945 !(f.file->f_mode & FMODE_READ) ||
1946 (f.file->f_flags & O_APPEND)) {
1951 tmp = fdget((int)sxp->sx_fdtmp);
1957 if (!(tmp.file->f_mode & FMODE_WRITE) ||
1958 !(tmp.file->f_mode & FMODE_READ) ||
1959 (tmp.file->f_flags & O_APPEND)) {
1961 goto out_put_tmp_file;
1964 if (IS_SWAPFILE(file_inode(f.file)) ||
1965 IS_SWAPFILE(file_inode(tmp.file))) {
1967 goto out_put_tmp_file;
1971 * We need to ensure that the fds passed in point to XFS inodes
1972 * before we cast and access them as XFS structures as we have no
1973 * control over what the user passes us here.
1975 if (f.file->f_op != &xfs_file_operations ||
1976 tmp.file->f_op != &xfs_file_operations) {
1978 goto out_put_tmp_file;
1981 ip = XFS_I(file_inode(f.file));
1982 tip = XFS_I(file_inode(tmp.file));
1984 if (ip->i_mount != tip->i_mount) {
1986 goto out_put_tmp_file;
1989 if (ip->i_ino == tip->i_ino) {
1991 goto out_put_tmp_file;
1994 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1996 goto out_put_tmp_file;
1999 error = xfs_swap_extents(ip, tip, sxp);
2011 struct xfs_mount *mp,
2012 char __user *user_label)
2014 struct xfs_sb *sbp = &mp->m_sb;
2015 char label[XFSLABEL_MAX + 1];
2018 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
2020 /* 1 larger than sb_fname, so this ensures a trailing NUL char */
2021 memset(label, 0, sizeof(label));
2022 spin_lock(&mp->m_sb_lock);
2023 strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
2024 spin_unlock(&mp->m_sb_lock);
2026 if (copy_to_user(user_label, label, sizeof(label)))
2034 struct xfs_mount *mp,
2035 char __user *newlabel)
2037 struct xfs_sb *sbp = &mp->m_sb;
2038 char label[XFSLABEL_MAX + 1];
2042 if (!capable(CAP_SYS_ADMIN))
2045 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
2046 * smaller, at 12 bytes. We copy one more to be sure we find the
2047 * (required) NULL character to test the incoming label length.
2048 * NB: The on disk label doesn't need to be null terminated.
2050 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
2052 len = strnlen(label, XFSLABEL_MAX + 1);
2053 if (len > sizeof(sbp->sb_fname))
2056 error = mnt_want_write_file(filp);
2060 spin_lock(&mp->m_sb_lock);
2061 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
2062 memcpy(sbp->sb_fname, label, len);
2063 spin_unlock(&mp->m_sb_lock);
2066 * Now we do several things to satisfy userspace.
2067 * In addition to normal logging of the primary superblock, we also
2068 * immediately write these changes to sector zero for the primary, then
2069 * update all backup supers (as xfs_db does for a label change), then
2070 * invalidate the block device page cache. This is so that any prior
2071 * buffered reads from userspace (i.e. from blkid) are invalidated,
2072 * and userspace will see the newly-written label.
2074 error = xfs_sync_sb_buf(mp);
2078 * growfs also updates backup supers so lock against that.
2080 mutex_lock(&mp->m_growlock);
2081 error = xfs_update_secondary_sbs(mp);
2082 mutex_unlock(&mp->m_growlock);
2084 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
2087 mnt_drop_write_file(filp);
2092 * Note: some of the ioctl's return positive numbers as a
2093 * byte count indicating success, such as readlink_by_handle.
2094 * So we don't "sign flip" like most other routines. This means
2095 * true errors need to be returned as a negative value.
2103 struct inode *inode = file_inode(filp);
2104 struct xfs_inode *ip = XFS_I(inode);
2105 struct xfs_mount *mp = ip->i_mount;
2106 void __user *arg = (void __user *)p;
2109 trace_xfs_file_ioctl(ip);
2113 return xfs_ioc_trim(mp, arg);
2114 case FS_IOC_GETFSLABEL:
2115 return xfs_ioc_getlabel(mp, arg);
2116 case FS_IOC_SETFSLABEL:
2117 return xfs_ioc_setlabel(filp, mp, arg);
2118 case XFS_IOC_ALLOCSP:
2119 case XFS_IOC_FREESP:
2120 case XFS_IOC_ALLOCSP64:
2121 case XFS_IOC_FREESP64: {
2124 if (copy_from_user(&bf, arg, sizeof(bf)))
2126 return xfs_ioc_space(filp, &bf);
2128 case XFS_IOC_DIOINFO: {
2129 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
2132 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
2133 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
2135 if (copy_to_user(arg, &da, sizeof(da)))
2140 case XFS_IOC_FSBULKSTAT_SINGLE:
2141 case XFS_IOC_FSBULKSTAT:
2142 case XFS_IOC_FSINUMBERS:
2143 return xfs_ioc_fsbulkstat(mp, cmd, arg);
2145 case XFS_IOC_BULKSTAT:
2146 return xfs_ioc_bulkstat(mp, cmd, arg);
2147 case XFS_IOC_INUMBERS:
2148 return xfs_ioc_inumbers(mp, cmd, arg);
2150 case XFS_IOC_FSGEOMETRY_V1:
2151 return xfs_ioc_fsgeometry(mp, arg, 3);
2152 case XFS_IOC_FSGEOMETRY_V4:
2153 return xfs_ioc_fsgeometry(mp, arg, 4);
2154 case XFS_IOC_FSGEOMETRY:
2155 return xfs_ioc_fsgeometry(mp, arg, 5);
2157 case XFS_IOC_AG_GEOMETRY:
2158 return xfs_ioc_ag_geometry(mp, arg);
2160 case XFS_IOC_GETVERSION:
2161 return put_user(inode->i_generation, (int __user *)arg);
2163 case XFS_IOC_FSGETXATTR:
2164 return xfs_ioc_fsgetxattr(ip, 0, arg);
2165 case XFS_IOC_FSGETXATTRA:
2166 return xfs_ioc_fsgetxattr(ip, 1, arg);
2167 case XFS_IOC_FSSETXATTR:
2168 return xfs_ioc_fssetxattr(ip, filp, arg);
2169 case XFS_IOC_GETXFLAGS:
2170 return xfs_ioc_getxflags(ip, arg);
2171 case XFS_IOC_SETXFLAGS:
2172 return xfs_ioc_setxflags(ip, filp, arg);
2174 case XFS_IOC_GETBMAP:
2175 case XFS_IOC_GETBMAPA:
2176 case XFS_IOC_GETBMAPX:
2177 return xfs_ioc_getbmap(filp, cmd, arg);
2179 case FS_IOC_GETFSMAP:
2180 return xfs_ioc_getfsmap(ip, arg);
2182 case XFS_IOC_SCRUB_METADATA:
2183 return xfs_ioc_scrub_metadata(ip, arg);
2185 case XFS_IOC_FD_TO_HANDLE:
2186 case XFS_IOC_PATH_TO_HANDLE:
2187 case XFS_IOC_PATH_TO_FSHANDLE: {
2188 xfs_fsop_handlereq_t hreq;
2190 if (copy_from_user(&hreq, arg, sizeof(hreq)))
2192 return xfs_find_handle(cmd, &hreq);
2194 case XFS_IOC_OPEN_BY_HANDLE: {
2195 xfs_fsop_handlereq_t hreq;
2197 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
2199 return xfs_open_by_handle(filp, &hreq);
2202 case XFS_IOC_READLINK_BY_HANDLE: {
2203 xfs_fsop_handlereq_t hreq;
2205 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
2207 return xfs_readlink_by_handle(filp, &hreq);
2209 case XFS_IOC_ATTRLIST_BY_HANDLE:
2210 return xfs_attrlist_by_handle(filp, arg);
2212 case XFS_IOC_ATTRMULTI_BY_HANDLE:
2213 return xfs_attrmulti_by_handle(filp, arg);
2215 case XFS_IOC_SWAPEXT: {
2216 struct xfs_swapext sxp;
2218 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
2220 error = mnt_want_write_file(filp);
2223 error = xfs_ioc_swapext(&sxp);
2224 mnt_drop_write_file(filp);
2228 case XFS_IOC_FSCOUNTS: {
2229 xfs_fsop_counts_t out;
2231 xfs_fs_counts(mp, &out);
2233 if (copy_to_user(arg, &out, sizeof(out)))
2238 case XFS_IOC_SET_RESBLKS: {
2239 xfs_fsop_resblks_t inout;
2242 if (!capable(CAP_SYS_ADMIN))
2245 if (mp->m_flags & XFS_MOUNT_RDONLY)
2248 if (copy_from_user(&inout, arg, sizeof(inout)))
2251 error = mnt_want_write_file(filp);
2255 /* input parameter is passed in resblks field of structure */
2257 error = xfs_reserve_blocks(mp, &in, &inout);
2258 mnt_drop_write_file(filp);
2262 if (copy_to_user(arg, &inout, sizeof(inout)))
2267 case XFS_IOC_GET_RESBLKS: {
2268 xfs_fsop_resblks_t out;
2270 if (!capable(CAP_SYS_ADMIN))
2273 error = xfs_reserve_blocks(mp, NULL, &out);
2277 if (copy_to_user(arg, &out, sizeof(out)))
2283 case XFS_IOC_FSGROWFSDATA: {
2284 xfs_growfs_data_t in;
2286 if (copy_from_user(&in, arg, sizeof(in)))
2289 error = mnt_want_write_file(filp);
2292 error = xfs_growfs_data(mp, &in);
2293 mnt_drop_write_file(filp);
2297 case XFS_IOC_FSGROWFSLOG: {
2298 xfs_growfs_log_t in;
2300 if (copy_from_user(&in, arg, sizeof(in)))
2303 error = mnt_want_write_file(filp);
2306 error = xfs_growfs_log(mp, &in);
2307 mnt_drop_write_file(filp);
2311 case XFS_IOC_FSGROWFSRT: {
2314 if (copy_from_user(&in, arg, sizeof(in)))
2317 error = mnt_want_write_file(filp);
2320 error = xfs_growfs_rt(mp, &in);
2321 mnt_drop_write_file(filp);
2325 case XFS_IOC_GOINGDOWN: {
2328 if (!capable(CAP_SYS_ADMIN))
2331 if (get_user(in, (uint32_t __user *)arg))
2334 return xfs_fs_goingdown(mp, in);
2337 case XFS_IOC_ERROR_INJECTION: {
2338 xfs_error_injection_t in;
2340 if (!capable(CAP_SYS_ADMIN))
2343 if (copy_from_user(&in, arg, sizeof(in)))
2346 return xfs_errortag_add(mp, in.errtag);
2349 case XFS_IOC_ERROR_CLEARALL:
2350 if (!capable(CAP_SYS_ADMIN))
2353 return xfs_errortag_clearall(mp);
2355 case XFS_IOC_FREE_EOFBLOCKS: {
2356 struct xfs_fs_eofblocks eofb;
2357 struct xfs_eofblocks keofb;
2359 if (!capable(CAP_SYS_ADMIN))
2362 if (mp->m_flags & XFS_MOUNT_RDONLY)
2365 if (copy_from_user(&eofb, arg, sizeof(eofb)))
2368 error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
2372 return xfs_icache_free_eofblocks(mp, &keofb);