1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/export.h>
10 #include <linux/errno.h>
11 #include <linux/file.h>
12 #include <linux/highuid.h>
14 #include <linux/namei.h>
15 #include <linux/security.h>
16 #include <linux/cred.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/compat.h>
21 #include <linux/uaccess.h>
22 #include <asm/unistd.h>
28 * generic_fillattr - Fill in the basic attributes from the inode struct
29 * @mnt_userns: user namespace of the mount the inode was found from
30 * @inode: Inode to use as the source
31 * @stat: Where to fill in the attributes
33 * Fill in the basic attributes in the kstat structure from data that's to be
34 * found on the VFS inode structure. This is the default if no getattr inode
35 * operation is supplied.
37 * If the inode has been found through an idmapped mount the user namespace of
38 * the vfsmount must be passed through @mnt_userns. This function will then
39 * take care to map the inode according to @mnt_userns before filling in the
40 * uid and gid filds. On non-idmapped mounts or if permission checking is to be
41 * performed on the raw inode simply passs init_user_ns.
43 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
46 stat->dev = inode->i_sb->s_dev;
47 stat->ino = inode->i_ino;
48 stat->mode = inode->i_mode;
49 stat->nlink = inode->i_nlink;
50 stat->uid = i_uid_into_mnt(mnt_userns, inode);
51 stat->gid = i_gid_into_mnt(mnt_userns, inode);
52 stat->rdev = inode->i_rdev;
53 stat->size = i_size_read(inode);
54 stat->atime = inode->i_atime;
55 stat->mtime = inode->i_mtime;
56 stat->ctime = inode->i_ctime;
57 stat->blksize = i_blocksize(inode);
58 stat->blocks = inode->i_blocks;
60 EXPORT_SYMBOL(generic_fillattr);
63 * vfs_getattr_nosec - getattr without security checks
64 * @path: file to get attributes from
65 * @stat: structure to return attributes in
66 * @request_mask: STATX_xxx flags indicating what the caller wants
67 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
69 * Get attributes without calling security_inode_getattr.
71 * Currently the only caller other than vfs_getattr is internal to the
72 * filehandle lookup code, which uses only the inode number and returns no
73 * attributes to any user. Any other code probably wants vfs_getattr.
75 int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
76 u32 request_mask, unsigned int query_flags)
78 struct inode *inode = d_backing_inode(path->dentry);
80 memset(stat, 0, sizeof(*stat));
81 stat->result_mask |= STATX_BASIC_STATS;
82 query_flags &= AT_STATX_SYNC_TYPE;
84 /* allow the fs to override these if it really wants to */
85 /* SB_NOATIME means filesystem supplies dummy atime value */
86 if (inode->i_sb->s_flags & SB_NOATIME)
87 stat->result_mask &= ~STATX_ATIME;
88 if (IS_AUTOMOUNT(inode))
89 stat->attributes |= STATX_ATTR_AUTOMOUNT;
92 stat->attributes |= STATX_ATTR_DAX;
94 if (inode->i_op->getattr)
95 return inode->i_op->getattr(path, stat, request_mask,
98 generic_fillattr(mnt_user_ns(path->mnt), inode, stat);
101 EXPORT_SYMBOL(vfs_getattr_nosec);
104 * vfs_getattr - Get the enhanced basic attributes of a file
105 * @path: The file of interest
106 * @stat: Where to return the statistics
107 * @request_mask: STATX_xxx flags indicating what the caller wants
108 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
110 * Ask the filesystem for a file's attributes. The caller must indicate in
111 * request_mask and query_flags to indicate what they want.
113 * If the file is remote, the filesystem can be forced to update the attributes
114 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
115 * suppress the update by passing AT_STATX_DONT_SYNC.
117 * Bits must have been set in request_mask to indicate which attributes the
118 * caller wants retrieving. Any such attribute not requested may be returned
119 * anyway, but the value may be approximate, and, if remote, may not have been
120 * synchronised with the server.
122 * 0 will be returned on success, and a -ve error code if unsuccessful.
124 int vfs_getattr(const struct path *path, struct kstat *stat,
125 u32 request_mask, unsigned int query_flags)
129 retval = security_inode_getattr(path);
132 return vfs_getattr_nosec(path, stat, request_mask, query_flags);
134 EXPORT_SYMBOL(vfs_getattr);
137 * vfs_fstat - Get the basic attributes by file descriptor
138 * @fd: The file descriptor referring to the file of interest
139 * @stat: The result structure to fill in.
141 * This function is a wrapper around vfs_getattr(). The main difference is
142 * that it uses a file descriptor to determine the file location.
144 * 0 will be returned on success, and a -ve error code if unsuccessful.
146 int vfs_fstat(int fd, struct kstat *stat)
154 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
160 * vfs_statx - Get basic and extra attributes by filename
161 * @dfd: A file descriptor representing the base dir for a relative filename
162 * @filename: The name of the file of interest
163 * @flags: Flags to control the query
164 * @stat: The result structure to fill in.
165 * @request_mask: STATX_xxx flags indicating what the caller wants
167 * This function is a wrapper around vfs_getattr(). The main difference is
168 * that it uses a filename and base directory to determine the file location.
169 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
170 * at the given name from being referenced.
172 * 0 will be returned on success, and a -ve error code if unsuccessful.
174 static int vfs_statx(int dfd, const char __user *filename, int flags,
175 struct kstat *stat, u32 request_mask)
178 unsigned lookup_flags = 0;
181 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
185 if (!(flags & AT_SYMLINK_NOFOLLOW))
186 lookup_flags |= LOOKUP_FOLLOW;
187 if (!(flags & AT_NO_AUTOMOUNT))
188 lookup_flags |= LOOKUP_AUTOMOUNT;
189 if (flags & AT_EMPTY_PATH)
190 lookup_flags |= LOOKUP_EMPTY;
193 error = user_path_at(dfd, filename, lookup_flags, &path);
197 error = vfs_getattr(&path, stat, request_mask, flags);
198 stat->mnt_id = real_mount(path.mnt)->mnt_id;
199 stat->result_mask |= STATX_MNT_ID;
200 if (path.mnt->mnt_root == path.dentry)
201 stat->attributes |= STATX_ATTR_MOUNT_ROOT;
202 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
204 if (retry_estale(error, lookup_flags)) {
205 lookup_flags |= LOOKUP_REVAL;
212 int vfs_fstatat(int dfd, const char __user *filename,
213 struct kstat *stat, int flags)
215 return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
216 stat, STATX_BASIC_STATS);
219 #ifdef __ARCH_WANT_OLD_STAT
222 * For backward compatibility? Maybe this should be moved
223 * into arch/i386 instead?
225 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
227 static int warncount = 5;
228 struct __old_kernel_stat tmp;
232 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
234 } else if (warncount < 0) {
235 /* it's laughable, but... */
239 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
240 tmp.st_dev = old_encode_dev(stat->dev);
241 tmp.st_ino = stat->ino;
242 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
244 tmp.st_mode = stat->mode;
245 tmp.st_nlink = stat->nlink;
246 if (tmp.st_nlink != stat->nlink)
248 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
249 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
250 tmp.st_rdev = old_encode_dev(stat->rdev);
251 #if BITS_PER_LONG == 32
252 if (stat->size > MAX_NON_LFS)
255 tmp.st_size = stat->size;
256 tmp.st_atime = stat->atime.tv_sec;
257 tmp.st_mtime = stat->mtime.tv_sec;
258 tmp.st_ctime = stat->ctime.tv_sec;
259 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
262 SYSCALL_DEFINE2(stat, const char __user *, filename,
263 struct __old_kernel_stat __user *, statbuf)
268 error = vfs_stat(filename, &stat);
272 return cp_old_stat(&stat, statbuf);
275 SYSCALL_DEFINE2(lstat, const char __user *, filename,
276 struct __old_kernel_stat __user *, statbuf)
281 error = vfs_lstat(filename, &stat);
285 return cp_old_stat(&stat, statbuf);
288 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
291 int error = vfs_fstat(fd, &stat);
294 error = cp_old_stat(&stat, statbuf);
299 #endif /* __ARCH_WANT_OLD_STAT */
301 #ifdef __ARCH_WANT_NEW_STAT
303 #if BITS_PER_LONG == 32
304 # define choose_32_64(a,b) a
306 # define choose_32_64(a,b) b
309 #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
310 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
312 #ifndef INIT_STRUCT_STAT_PADDING
313 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
316 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
320 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
322 #if BITS_PER_LONG == 32
323 if (stat->size > MAX_NON_LFS)
327 INIT_STRUCT_STAT_PADDING(tmp);
328 tmp.st_dev = encode_dev(stat->dev);
329 tmp.st_ino = stat->ino;
330 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
332 tmp.st_mode = stat->mode;
333 tmp.st_nlink = stat->nlink;
334 if (tmp.st_nlink != stat->nlink)
336 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
337 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
338 tmp.st_rdev = encode_dev(stat->rdev);
339 tmp.st_size = stat->size;
340 tmp.st_atime = stat->atime.tv_sec;
341 tmp.st_mtime = stat->mtime.tv_sec;
342 tmp.st_ctime = stat->ctime.tv_sec;
343 #ifdef STAT_HAVE_NSEC
344 tmp.st_atime_nsec = stat->atime.tv_nsec;
345 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
346 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
348 tmp.st_blocks = stat->blocks;
349 tmp.st_blksize = stat->blksize;
350 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
353 SYSCALL_DEFINE2(newstat, const char __user *, filename,
354 struct stat __user *, statbuf)
357 int error = vfs_stat(filename, &stat);
361 return cp_new_stat(&stat, statbuf);
364 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
365 struct stat __user *, statbuf)
370 error = vfs_lstat(filename, &stat);
374 return cp_new_stat(&stat, statbuf);
377 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
378 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
379 struct stat __user *, statbuf, int, flag)
384 error = vfs_fstatat(dfd, filename, &stat, flag);
387 return cp_new_stat(&stat, statbuf);
391 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
394 int error = vfs_fstat(fd, &stat);
397 error = cp_new_stat(&stat, statbuf);
403 static int do_readlinkat(int dfd, const char __user *pathname,
404 char __user *buf, int bufsiz)
409 unsigned int lookup_flags = LOOKUP_EMPTY;
415 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
417 struct inode *inode = d_backing_inode(path.dentry);
419 error = empty ? -ENOENT : -EINVAL;
421 * AFS mountpoints allow readlink(2) but are not symlinks
423 if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
424 error = security_inode_readlink(path.dentry);
427 error = vfs_readlink(path.dentry, buf, bufsiz);
431 if (retry_estale(error, lookup_flags)) {
432 lookup_flags |= LOOKUP_REVAL;
439 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
440 char __user *, buf, int, bufsiz)
442 return do_readlinkat(dfd, pathname, buf, bufsiz);
445 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
448 return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
452 /* ---------- LFS-64 ----------- */
453 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
455 #ifndef INIT_STRUCT_STAT64_PADDING
456 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
459 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
463 INIT_STRUCT_STAT64_PADDING(tmp);
465 /* mips has weird padding, so we don't get 64 bits there */
466 tmp.st_dev = new_encode_dev(stat->dev);
467 tmp.st_rdev = new_encode_dev(stat->rdev);
469 tmp.st_dev = huge_encode_dev(stat->dev);
470 tmp.st_rdev = huge_encode_dev(stat->rdev);
472 tmp.st_ino = stat->ino;
473 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
475 #ifdef STAT64_HAS_BROKEN_ST_INO
476 tmp.__st_ino = stat->ino;
478 tmp.st_mode = stat->mode;
479 tmp.st_nlink = stat->nlink;
480 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
481 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
482 tmp.st_atime = stat->atime.tv_sec;
483 tmp.st_atime_nsec = stat->atime.tv_nsec;
484 tmp.st_mtime = stat->mtime.tv_sec;
485 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
486 tmp.st_ctime = stat->ctime.tv_sec;
487 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
488 tmp.st_size = stat->size;
489 tmp.st_blocks = stat->blocks;
490 tmp.st_blksize = stat->blksize;
491 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
494 SYSCALL_DEFINE2(stat64, const char __user *, filename,
495 struct stat64 __user *, statbuf)
498 int error = vfs_stat(filename, &stat);
501 error = cp_new_stat64(&stat, statbuf);
506 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
507 struct stat64 __user *, statbuf)
510 int error = vfs_lstat(filename, &stat);
513 error = cp_new_stat64(&stat, statbuf);
518 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
521 int error = vfs_fstat(fd, &stat);
524 error = cp_new_stat64(&stat, statbuf);
529 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
530 struct stat64 __user *, statbuf, int, flag)
535 error = vfs_fstatat(dfd, filename, &stat, flag);
538 return cp_new_stat64(&stat, statbuf);
540 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
542 static noinline_for_stack int
543 cp_statx(const struct kstat *stat, struct statx __user *buffer)
547 memset(&tmp, 0, sizeof(tmp));
549 tmp.stx_mask = stat->result_mask;
550 tmp.stx_blksize = stat->blksize;
551 tmp.stx_attributes = stat->attributes;
552 tmp.stx_nlink = stat->nlink;
553 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
554 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
555 tmp.stx_mode = stat->mode;
556 tmp.stx_ino = stat->ino;
557 tmp.stx_size = stat->size;
558 tmp.stx_blocks = stat->blocks;
559 tmp.stx_attributes_mask = stat->attributes_mask;
560 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
561 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
562 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
563 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
564 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
565 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
566 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
567 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
568 tmp.stx_rdev_major = MAJOR(stat->rdev);
569 tmp.stx_rdev_minor = MINOR(stat->rdev);
570 tmp.stx_dev_major = MAJOR(stat->dev);
571 tmp.stx_dev_minor = MINOR(stat->dev);
572 tmp.stx_mnt_id = stat->mnt_id;
574 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
577 int do_statx(int dfd, const char __user *filename, unsigned flags,
578 unsigned int mask, struct statx __user *buffer)
583 if (mask & STATX__RESERVED)
585 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
588 error = vfs_statx(dfd, filename, flags, &stat, mask);
592 return cp_statx(&stat, buffer);
596 * sys_statx - System call to get enhanced stats
597 * @dfd: Base directory to pathwalk from *or* fd to stat.
598 * @filename: File to stat or "" with AT_EMPTY_PATH
599 * @flags: AT_* flags to control pathwalk.
600 * @mask: Parts of statx struct actually required.
601 * @buffer: Result buffer.
603 * Note that fstat() can be emulated by setting dfd to the fd of interest,
604 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
606 SYSCALL_DEFINE5(statx,
607 int, dfd, const char __user *, filename, unsigned, flags,
609 struct statx __user *, buffer)
611 return do_statx(dfd, filename, flags, mask, buffer);
615 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
617 struct compat_stat tmp;
619 if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
622 memset(&tmp, 0, sizeof(tmp));
623 tmp.st_dev = old_encode_dev(stat->dev);
624 tmp.st_ino = stat->ino;
625 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
627 tmp.st_mode = stat->mode;
628 tmp.st_nlink = stat->nlink;
629 if (tmp.st_nlink != stat->nlink)
631 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
632 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
633 tmp.st_rdev = old_encode_dev(stat->rdev);
634 if ((u64) stat->size > MAX_NON_LFS)
636 tmp.st_size = stat->size;
637 tmp.st_atime = stat->atime.tv_sec;
638 tmp.st_atime_nsec = stat->atime.tv_nsec;
639 tmp.st_mtime = stat->mtime.tv_sec;
640 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
641 tmp.st_ctime = stat->ctime.tv_sec;
642 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
643 tmp.st_blocks = stat->blocks;
644 tmp.st_blksize = stat->blksize;
645 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
648 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
649 struct compat_stat __user *, statbuf)
654 error = vfs_stat(filename, &stat);
657 return cp_compat_stat(&stat, statbuf);
660 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
661 struct compat_stat __user *, statbuf)
666 error = vfs_lstat(filename, &stat);
669 return cp_compat_stat(&stat, statbuf);
672 #ifndef __ARCH_WANT_STAT64
673 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
674 const char __user *, filename,
675 struct compat_stat __user *, statbuf, int, flag)
680 error = vfs_fstatat(dfd, filename, &stat, flag);
683 return cp_compat_stat(&stat, statbuf);
687 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
688 struct compat_stat __user *, statbuf)
691 int error = vfs_fstat(fd, &stat);
694 error = cp_compat_stat(&stat, statbuf);
699 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
700 void __inode_add_bytes(struct inode *inode, loff_t bytes)
702 inode->i_blocks += bytes >> 9;
704 inode->i_bytes += bytes;
705 if (inode->i_bytes >= 512) {
707 inode->i_bytes -= 512;
710 EXPORT_SYMBOL(__inode_add_bytes);
712 void inode_add_bytes(struct inode *inode, loff_t bytes)
714 spin_lock(&inode->i_lock);
715 __inode_add_bytes(inode, bytes);
716 spin_unlock(&inode->i_lock);
719 EXPORT_SYMBOL(inode_add_bytes);
721 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
723 inode->i_blocks -= bytes >> 9;
725 if (inode->i_bytes < bytes) {
727 inode->i_bytes += 512;
729 inode->i_bytes -= bytes;
732 EXPORT_SYMBOL(__inode_sub_bytes);
734 void inode_sub_bytes(struct inode *inode, loff_t bytes)
736 spin_lock(&inode->i_lock);
737 __inode_sub_bytes(inode, bytes);
738 spin_unlock(&inode->i_lock);
741 EXPORT_SYMBOL(inode_sub_bytes);
743 loff_t inode_get_bytes(struct inode *inode)
747 spin_lock(&inode->i_lock);
748 ret = __inode_get_bytes(inode);
749 spin_unlock(&inode->i_lock);
753 EXPORT_SYMBOL(inode_get_bytes);
755 void inode_set_bytes(struct inode *inode, loff_t bytes)
757 /* Caller is here responsible for sufficient locking
758 * (ie. inode->i_lock) */
759 inode->i_blocks = bytes >> 9;
760 inode->i_bytes = bytes & 511;
763 EXPORT_SYMBOL(inode_set_bytes);