1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/export.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
46 #include <linux/compat.h>
47 #include <linux/syscalls.h>
48 #include <linux/kprobes.h>
49 #include <linux/user_namespace.h>
50 #include <linux/binfmts.h>
52 #include <linux/sched.h>
53 #include <linux/sched/autogroup.h>
54 #include <linux/sched/loadavg.h>
55 #include <linux/sched/stat.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/coredump.h>
58 #include <linux/sched/task.h>
59 #include <linux/sched/cputime.h>
60 #include <linux/rcupdate.h>
61 #include <linux/uidgid.h>
62 #include <linux/cred.h>
64 #include <linux/nospec.h>
66 #include <linux/kmsg_dump.h>
67 /* Move somewhere else to avoid recompiling? */
68 #include <generated/utsrelease.h>
70 #include <linux/uaccess.h>
72 #include <asm/unistd.h>
76 #ifndef SET_UNALIGN_CTL
77 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
79 #ifndef GET_UNALIGN_CTL
80 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
83 # define SET_FPEMU_CTL(a, b) (-EINVAL)
86 # define GET_FPEMU_CTL(a, b) (-EINVAL)
89 # define SET_FPEXC_CTL(a, b) (-EINVAL)
92 # define GET_FPEXC_CTL(a, b) (-EINVAL)
95 # define GET_ENDIAN(a, b) (-EINVAL)
98 # define SET_ENDIAN(a, b) (-EINVAL)
101 # define GET_TSC_CTL(a) (-EINVAL)
104 # define SET_TSC_CTL(a) (-EINVAL)
107 # define GET_FP_MODE(a) (-EINVAL)
110 # define SET_FP_MODE(a,b) (-EINVAL)
113 # define SVE_SET_VL(a) (-EINVAL)
116 # define SVE_GET_VL() (-EINVAL)
118 #ifndef PAC_RESET_KEYS
119 # define PAC_RESET_KEYS(a, b) (-EINVAL)
123 * this is where the system-wide overflow UID and GID are defined, for
124 * architectures that now have 32-bit UID/GID but didn't in the past
127 int overflowuid = DEFAULT_OVERFLOWUID;
128 int overflowgid = DEFAULT_OVERFLOWGID;
130 EXPORT_SYMBOL(overflowuid);
131 EXPORT_SYMBOL(overflowgid);
134 * the same as above, but for filesystems which can only store a 16-bit
135 * UID and GID. as such, this is needed on all architectures
138 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
139 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
141 EXPORT_SYMBOL(fs_overflowuid);
142 EXPORT_SYMBOL(fs_overflowgid);
145 * Returns true if current's euid is same as p's uid or euid,
146 * or has CAP_SYS_NICE to p's user_ns.
148 * Called with rcu_read_lock, creds are safe
150 static bool set_one_prio_perm(struct task_struct *p)
152 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
154 if (uid_eq(pcred->uid, cred->euid) ||
155 uid_eq(pcred->euid, cred->euid))
157 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
163 * set the priority of a task
164 * - the caller must hold the RCU read lock
166 static int set_one_prio(struct task_struct *p, int niceval, int error)
170 if (!set_one_prio_perm(p)) {
174 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
178 no_nice = security_task_setnice(p, niceval);
185 set_user_nice(p, niceval);
190 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
192 struct task_struct *g, *p;
193 struct user_struct *user;
194 const struct cred *cred = current_cred();
199 if (which > PRIO_USER || which < PRIO_PROCESS)
202 /* normalize: avoid signed division (rounding problems) */
204 if (niceval < MIN_NICE)
206 if (niceval > MAX_NICE)
210 read_lock(&tasklist_lock);
214 p = find_task_by_vpid(who);
218 error = set_one_prio(p, niceval, error);
222 pgrp = find_vpid(who);
224 pgrp = task_pgrp(current);
225 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
226 error = set_one_prio(p, niceval, error);
227 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
230 uid = make_kuid(cred->user_ns, who);
234 else if (!uid_eq(uid, cred->uid)) {
235 user = find_user(uid);
237 goto out_unlock; /* No processes for this user */
239 do_each_thread(g, p) {
240 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
241 error = set_one_prio(p, niceval, error);
242 } while_each_thread(g, p);
243 if (!uid_eq(uid, cred->uid))
244 free_uid(user); /* For find_user() */
248 read_unlock(&tasklist_lock);
255 * Ugh. To avoid negative return values, "getpriority()" will
256 * not return the normal nice-value, but a negated value that
257 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
258 * to stay compatible.
260 SYSCALL_DEFINE2(getpriority, int, which, int, who)
262 struct task_struct *g, *p;
263 struct user_struct *user;
264 const struct cred *cred = current_cred();
265 long niceval, retval = -ESRCH;
269 if (which > PRIO_USER || which < PRIO_PROCESS)
273 read_lock(&tasklist_lock);
277 p = find_task_by_vpid(who);
281 niceval = nice_to_rlimit(task_nice(p));
282 if (niceval > retval)
288 pgrp = find_vpid(who);
290 pgrp = task_pgrp(current);
291 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
292 niceval = nice_to_rlimit(task_nice(p));
293 if (niceval > retval)
295 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
298 uid = make_kuid(cred->user_ns, who);
302 else if (!uid_eq(uid, cred->uid)) {
303 user = find_user(uid);
305 goto out_unlock; /* No processes for this user */
307 do_each_thread(g, p) {
308 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
309 niceval = nice_to_rlimit(task_nice(p));
310 if (niceval > retval)
313 } while_each_thread(g, p);
314 if (!uid_eq(uid, cred->uid))
315 free_uid(user); /* for find_user() */
319 read_unlock(&tasklist_lock);
326 * Unprivileged users may change the real gid to the effective gid
327 * or vice versa. (BSD-style)
329 * If you set the real gid at all, or set the effective gid to a value not
330 * equal to the real gid, then the saved gid is set to the new effective gid.
332 * This makes it possible for a setgid program to completely drop its
333 * privileges, which is often a useful assertion to make when you are doing
334 * a security audit over a program.
336 * The general idea is that a program which uses just setregid() will be
337 * 100% compatible with BSD. A program which uses just setgid() will be
338 * 100% compatible with POSIX with saved IDs.
340 * SMP: There are not races, the GIDs are checked only by filesystem
341 * operations (as far as semantic preservation is concerned).
343 #ifdef CONFIG_MULTIUSER
344 long __sys_setregid(gid_t rgid, gid_t egid)
346 struct user_namespace *ns = current_user_ns();
347 const struct cred *old;
352 krgid = make_kgid(ns, rgid);
353 kegid = make_kgid(ns, egid);
355 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
357 if ((egid != (gid_t) -1) && !gid_valid(kegid))
360 new = prepare_creds();
363 old = current_cred();
366 if (rgid != (gid_t) -1) {
367 if (gid_eq(old->gid, krgid) ||
368 gid_eq(old->egid, krgid) ||
369 ns_capable(old->user_ns, CAP_SETGID))
374 if (egid != (gid_t) -1) {
375 if (gid_eq(old->gid, kegid) ||
376 gid_eq(old->egid, kegid) ||
377 gid_eq(old->sgid, kegid) ||
378 ns_capable(old->user_ns, CAP_SETGID))
384 if (rgid != (gid_t) -1 ||
385 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
386 new->sgid = new->egid;
387 new->fsgid = new->egid;
389 return commit_creds(new);
396 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
398 return __sys_setregid(rgid, egid);
402 * setgid() is implemented like SysV w/ SAVED_IDS
404 * SMP: Same implicit races as above.
406 long __sys_setgid(gid_t gid)
408 struct user_namespace *ns = current_user_ns();
409 const struct cred *old;
414 kgid = make_kgid(ns, gid);
415 if (!gid_valid(kgid))
418 new = prepare_creds();
421 old = current_cred();
424 if (ns_capable(old->user_ns, CAP_SETGID))
425 new->gid = new->egid = new->sgid = new->fsgid = kgid;
426 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
427 new->egid = new->fsgid = kgid;
431 return commit_creds(new);
438 SYSCALL_DEFINE1(setgid, gid_t, gid)
440 return __sys_setgid(gid);
444 * change the user struct in a credentials set to match the new UID
446 static int set_user(struct cred *new)
448 struct user_struct *new_user;
450 new_user = alloc_uid(new->uid);
455 * We don't fail in case of NPROC limit excess here because too many
456 * poorly written programs don't check set*uid() return code, assuming
457 * it never fails if called by root. We may still enforce NPROC limit
458 * for programs doing set*uid()+execve() by harmlessly deferring the
459 * failure to the execve() stage.
461 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
462 new_user != INIT_USER)
463 current->flags |= PF_NPROC_EXCEEDED;
465 current->flags &= ~PF_NPROC_EXCEEDED;
468 new->user = new_user;
473 * Unprivileged users may change the real uid to the effective uid
474 * or vice versa. (BSD-style)
476 * If you set the real uid at all, or set the effective uid to a value not
477 * equal to the real uid, then the saved uid is set to the new effective uid.
479 * This makes it possible for a setuid program to completely drop its
480 * privileges, which is often a useful assertion to make when you are doing
481 * a security audit over a program.
483 * The general idea is that a program which uses just setreuid() will be
484 * 100% compatible with BSD. A program which uses just setuid() will be
485 * 100% compatible with POSIX with saved IDs.
487 long __sys_setreuid(uid_t ruid, uid_t euid)
489 struct user_namespace *ns = current_user_ns();
490 const struct cred *old;
495 kruid = make_kuid(ns, ruid);
496 keuid = make_kuid(ns, euid);
498 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
500 if ((euid != (uid_t) -1) && !uid_valid(keuid))
503 new = prepare_creds();
506 old = current_cred();
509 if (ruid != (uid_t) -1) {
511 if (!uid_eq(old->uid, kruid) &&
512 !uid_eq(old->euid, kruid) &&
513 !ns_capable_setid(old->user_ns, CAP_SETUID))
517 if (euid != (uid_t) -1) {
519 if (!uid_eq(old->uid, keuid) &&
520 !uid_eq(old->euid, keuid) &&
521 !uid_eq(old->suid, keuid) &&
522 !ns_capable_setid(old->user_ns, CAP_SETUID))
526 if (!uid_eq(new->uid, old->uid)) {
527 retval = set_user(new);
531 if (ruid != (uid_t) -1 ||
532 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
533 new->suid = new->euid;
534 new->fsuid = new->euid;
536 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
540 return commit_creds(new);
547 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
549 return __sys_setreuid(ruid, euid);
553 * setuid() is implemented like SysV with SAVED_IDS
555 * Note that SAVED_ID's is deficient in that a setuid root program
556 * like sendmail, for example, cannot set its uid to be a normal
557 * user and then switch back, because if you're root, setuid() sets
558 * the saved uid too. If you don't like this, blame the bright people
559 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
560 * will allow a root program to temporarily drop privileges and be able to
561 * regain them by swapping the real and effective uid.
563 long __sys_setuid(uid_t uid)
565 struct user_namespace *ns = current_user_ns();
566 const struct cred *old;
571 kuid = make_kuid(ns, uid);
572 if (!uid_valid(kuid))
575 new = prepare_creds();
578 old = current_cred();
581 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
582 new->suid = new->uid = kuid;
583 if (!uid_eq(kuid, old->uid)) {
584 retval = set_user(new);
588 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
592 new->fsuid = new->euid = kuid;
594 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
598 return commit_creds(new);
605 SYSCALL_DEFINE1(setuid, uid_t, uid)
607 return __sys_setuid(uid);
612 * This function implements a generic ability to update ruid, euid,
613 * and suid. This allows you to implement the 4.4 compatible seteuid().
615 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
617 struct user_namespace *ns = current_user_ns();
618 const struct cred *old;
621 kuid_t kruid, keuid, ksuid;
623 kruid = make_kuid(ns, ruid);
624 keuid = make_kuid(ns, euid);
625 ksuid = make_kuid(ns, suid);
627 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
630 if ((euid != (uid_t) -1) && !uid_valid(keuid))
633 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
636 new = prepare_creds();
640 old = current_cred();
643 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
644 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
645 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
647 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
648 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
650 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
651 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
655 if (ruid != (uid_t) -1) {
657 if (!uid_eq(kruid, old->uid)) {
658 retval = set_user(new);
663 if (euid != (uid_t) -1)
665 if (suid != (uid_t) -1)
667 new->fsuid = new->euid;
669 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
673 return commit_creds(new);
680 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
682 return __sys_setresuid(ruid, euid, suid);
685 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
687 const struct cred *cred = current_cred();
689 uid_t ruid, euid, suid;
691 ruid = from_kuid_munged(cred->user_ns, cred->uid);
692 euid = from_kuid_munged(cred->user_ns, cred->euid);
693 suid = from_kuid_munged(cred->user_ns, cred->suid);
695 retval = put_user(ruid, ruidp);
697 retval = put_user(euid, euidp);
699 return put_user(suid, suidp);
705 * Same as above, but for rgid, egid, sgid.
707 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
709 struct user_namespace *ns = current_user_ns();
710 const struct cred *old;
713 kgid_t krgid, kegid, ksgid;
715 krgid = make_kgid(ns, rgid);
716 kegid = make_kgid(ns, egid);
717 ksgid = make_kgid(ns, sgid);
719 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
721 if ((egid != (gid_t) -1) && !gid_valid(kegid))
723 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
726 new = prepare_creds();
729 old = current_cred();
732 if (!ns_capable(old->user_ns, CAP_SETGID)) {
733 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
734 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
736 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
737 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
739 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
740 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
744 if (rgid != (gid_t) -1)
746 if (egid != (gid_t) -1)
748 if (sgid != (gid_t) -1)
750 new->fsgid = new->egid;
752 return commit_creds(new);
759 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
761 return __sys_setresgid(rgid, egid, sgid);
764 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
766 const struct cred *cred = current_cred();
768 gid_t rgid, egid, sgid;
770 rgid = from_kgid_munged(cred->user_ns, cred->gid);
771 egid = from_kgid_munged(cred->user_ns, cred->egid);
772 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
774 retval = put_user(rgid, rgidp);
776 retval = put_user(egid, egidp);
778 retval = put_user(sgid, sgidp);
786 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
787 * is used for "access()" and for the NFS daemon (letting nfsd stay at
788 * whatever uid it wants to). It normally shadows "euid", except when
789 * explicitly set by setfsuid() or for access..
791 long __sys_setfsuid(uid_t uid)
793 const struct cred *old;
798 old = current_cred();
799 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
801 kuid = make_kuid(old->user_ns, uid);
802 if (!uid_valid(kuid))
805 new = prepare_creds();
809 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
810 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
811 ns_capable_setid(old->user_ns, CAP_SETUID)) {
812 if (!uid_eq(kuid, old->fsuid)) {
814 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
827 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
829 return __sys_setfsuid(uid);
833 * Samma på svenska..
835 long __sys_setfsgid(gid_t gid)
837 const struct cred *old;
842 old = current_cred();
843 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
845 kgid = make_kgid(old->user_ns, gid);
846 if (!gid_valid(kgid))
849 new = prepare_creds();
853 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
854 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
855 ns_capable(old->user_ns, CAP_SETGID)) {
856 if (!gid_eq(kgid, old->fsgid)) {
870 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
872 return __sys_setfsgid(gid);
874 #endif /* CONFIG_MULTIUSER */
877 * sys_getpid - return the thread group id of the current process
879 * Note, despite the name, this returns the tgid not the pid. The tgid and
880 * the pid are identical unless CLONE_THREAD was specified on clone() in
881 * which case the tgid is the same in all threads of the same group.
883 * This is SMP safe as current->tgid does not change.
885 SYSCALL_DEFINE0(getpid)
887 return task_tgid_vnr(current);
890 /* Thread ID - the internal kernel "pid" */
891 SYSCALL_DEFINE0(gettid)
893 return task_pid_vnr(current);
897 * Accessing ->real_parent is not SMP-safe, it could
898 * change from under us. However, we can use a stale
899 * value of ->real_parent under rcu_read_lock(), see
900 * release_task()->call_rcu(delayed_put_task_struct).
902 SYSCALL_DEFINE0(getppid)
907 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
913 SYSCALL_DEFINE0(getuid)
915 /* Only we change this so SMP safe */
916 return from_kuid_munged(current_user_ns(), current_uid());
919 SYSCALL_DEFINE0(geteuid)
921 /* Only we change this so SMP safe */
922 return from_kuid_munged(current_user_ns(), current_euid());
925 SYSCALL_DEFINE0(getgid)
927 /* Only we change this so SMP safe */
928 return from_kgid_munged(current_user_ns(), current_gid());
931 SYSCALL_DEFINE0(getegid)
933 /* Only we change this so SMP safe */
934 return from_kgid_munged(current_user_ns(), current_egid());
937 static void do_sys_times(struct tms *tms)
939 u64 tgutime, tgstime, cutime, cstime;
941 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
942 cutime = current->signal->cutime;
943 cstime = current->signal->cstime;
944 tms->tms_utime = nsec_to_clock_t(tgutime);
945 tms->tms_stime = nsec_to_clock_t(tgstime);
946 tms->tms_cutime = nsec_to_clock_t(cutime);
947 tms->tms_cstime = nsec_to_clock_t(cstime);
950 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
956 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
959 force_successful_syscall_return();
960 return (long) jiffies_64_to_clock_t(get_jiffies_64());
964 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
966 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
969 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
973 struct compat_tms tmp;
976 /* Convert our struct tms to the compat version. */
977 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
978 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
979 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
980 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
981 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
984 force_successful_syscall_return();
985 return compat_jiffies_to_clock_t(jiffies);
990 * This needs some heavy checking ...
991 * I just haven't the stomach for it. I also don't fully
992 * understand sessions/pgrp etc. Let somebody who does explain it.
994 * OK, I think I have the protection semantics right.... this is really
995 * only important on a multi-user system anyway, to make sure one user
996 * can't send a signal to a process owned by another. -TYT, 12/12/91
998 * !PF_FORKNOEXEC check to conform completely to POSIX.
1000 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1002 struct task_struct *p;
1003 struct task_struct *group_leader = current->group_leader;
1008 pid = task_pid_vnr(group_leader);
1015 /* From this point forward we keep holding onto the tasklist lock
1016 * so that our parent does not change from under us. -DaveM
1018 write_lock_irq(&tasklist_lock);
1021 p = find_task_by_vpid(pid);
1026 if (!thread_group_leader(p))
1029 if (same_thread_group(p->real_parent, group_leader)) {
1031 if (task_session(p) != task_session(group_leader))
1034 if (!(p->flags & PF_FORKNOEXEC))
1038 if (p != group_leader)
1043 if (p->signal->leader)
1048 struct task_struct *g;
1050 pgrp = find_vpid(pgid);
1051 g = pid_task(pgrp, PIDTYPE_PGID);
1052 if (!g || task_session(g) != task_session(group_leader))
1056 err = security_task_setpgid(p, pgid);
1060 if (task_pgrp(p) != pgrp)
1061 change_pid(p, PIDTYPE_PGID, pgrp);
1065 /* All paths lead to here, thus we are safe. -DaveM */
1066 write_unlock_irq(&tasklist_lock);
1071 static int do_getpgid(pid_t pid)
1073 struct task_struct *p;
1079 grp = task_pgrp(current);
1082 p = find_task_by_vpid(pid);
1089 retval = security_task_getpgid(p);
1093 retval = pid_vnr(grp);
1099 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1101 return do_getpgid(pid);
1104 #ifdef __ARCH_WANT_SYS_GETPGRP
1106 SYSCALL_DEFINE0(getpgrp)
1108 return do_getpgid(0);
1113 SYSCALL_DEFINE1(getsid, pid_t, pid)
1115 struct task_struct *p;
1121 sid = task_session(current);
1124 p = find_task_by_vpid(pid);
1127 sid = task_session(p);
1131 retval = security_task_getsid(p);
1135 retval = pid_vnr(sid);
1141 static void set_special_pids(struct pid *pid)
1143 struct task_struct *curr = current->group_leader;
1145 if (task_session(curr) != pid)
1146 change_pid(curr, PIDTYPE_SID, pid);
1148 if (task_pgrp(curr) != pid)
1149 change_pid(curr, PIDTYPE_PGID, pid);
1152 int ksys_setsid(void)
1154 struct task_struct *group_leader = current->group_leader;
1155 struct pid *sid = task_pid(group_leader);
1156 pid_t session = pid_vnr(sid);
1159 write_lock_irq(&tasklist_lock);
1160 /* Fail if I am already a session leader */
1161 if (group_leader->signal->leader)
1164 /* Fail if a process group id already exists that equals the
1165 * proposed session id.
1167 if (pid_task(sid, PIDTYPE_PGID))
1170 group_leader->signal->leader = 1;
1171 set_special_pids(sid);
1173 proc_clear_tty(group_leader);
1177 write_unlock_irq(&tasklist_lock);
1179 proc_sid_connector(group_leader);
1180 sched_autogroup_create_attach(group_leader);
1185 SYSCALL_DEFINE0(setsid)
1187 return ksys_setsid();
1190 DECLARE_RWSEM(uts_sem);
1192 #ifdef COMPAT_UTS_MACHINE
1193 #define override_architecture(name) \
1194 (personality(current->personality) == PER_LINUX32 && \
1195 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1196 sizeof(COMPAT_UTS_MACHINE)))
1198 #define override_architecture(name) 0
1202 * Work around broken programs that cannot handle "Linux 3.0".
1203 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1204 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1207 static int override_release(char __user *release, size_t len)
1211 if (current->personality & UNAME26) {
1212 const char *rest = UTS_RELEASE;
1213 char buf[65] = { 0 };
1219 if (*rest == '.' && ++ndots >= 3)
1221 if (!isdigit(*rest) && *rest != '.')
1225 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1226 copy = clamp_t(size_t, len, 1, sizeof(buf));
1227 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1228 ret = copy_to_user(release, buf, copy + 1);
1233 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1235 struct new_utsname tmp;
1237 down_read(&uts_sem);
1238 memcpy(&tmp, utsname(), sizeof(tmp));
1240 if (copy_to_user(name, &tmp, sizeof(tmp)))
1243 if (override_release(name->release, sizeof(name->release)))
1245 if (override_architecture(name))
1250 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1254 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1256 struct old_utsname tmp;
1261 down_read(&uts_sem);
1262 memcpy(&tmp, utsname(), sizeof(tmp));
1264 if (copy_to_user(name, &tmp, sizeof(tmp)))
1267 if (override_release(name->release, sizeof(name->release)))
1269 if (override_architecture(name))
1274 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1276 struct oldold_utsname tmp = {};
1281 down_read(&uts_sem);
1282 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1283 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1284 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1285 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1286 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1288 if (copy_to_user(name, &tmp, sizeof(tmp)))
1291 if (override_architecture(name))
1293 if (override_release(name->release, sizeof(name->release)))
1299 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1302 char tmp[__NEW_UTS_LEN];
1304 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1307 if (len < 0 || len > __NEW_UTS_LEN)
1310 if (!copy_from_user(tmp, name, len)) {
1311 struct new_utsname *u;
1313 down_write(&uts_sem);
1315 memcpy(u->nodename, tmp, len);
1316 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1318 uts_proc_notify(UTS_PROC_HOSTNAME);
1324 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1326 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1329 struct new_utsname *u;
1330 char tmp[__NEW_UTS_LEN + 1];
1334 down_read(&uts_sem);
1336 i = 1 + strlen(u->nodename);
1339 memcpy(tmp, u->nodename, i);
1341 if (copy_to_user(name, tmp, i))
1349 * Only setdomainname; getdomainname can be implemented by calling
1352 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1355 char tmp[__NEW_UTS_LEN];
1357 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1359 if (len < 0 || len > __NEW_UTS_LEN)
1363 if (!copy_from_user(tmp, name, len)) {
1364 struct new_utsname *u;
1366 down_write(&uts_sem);
1368 memcpy(u->domainname, tmp, len);
1369 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1371 uts_proc_notify(UTS_PROC_DOMAINNAME);
1377 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1379 struct rlimit value;
1382 ret = do_prlimit(current, resource, NULL, &value);
1384 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1389 #ifdef CONFIG_COMPAT
1391 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1392 struct compat_rlimit __user *, rlim)
1395 struct compat_rlimit r32;
1397 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1400 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1401 r.rlim_cur = RLIM_INFINITY;
1403 r.rlim_cur = r32.rlim_cur;
1404 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1405 r.rlim_max = RLIM_INFINITY;
1407 r.rlim_max = r32.rlim_max;
1408 return do_prlimit(current, resource, &r, NULL);
1411 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1412 struct compat_rlimit __user *, rlim)
1417 ret = do_prlimit(current, resource, NULL, &r);
1419 struct compat_rlimit r32;
1420 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1421 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1423 r32.rlim_cur = r.rlim_cur;
1424 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1425 r32.rlim_max = COMPAT_RLIM_INFINITY;
1427 r32.rlim_max = r.rlim_max;
1429 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1437 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1440 * Back compatibility for getrlimit. Needed for some apps.
1442 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1443 struct rlimit __user *, rlim)
1446 if (resource >= RLIM_NLIMITS)
1449 resource = array_index_nospec(resource, RLIM_NLIMITS);
1450 task_lock(current->group_leader);
1451 x = current->signal->rlim[resource];
1452 task_unlock(current->group_leader);
1453 if (x.rlim_cur > 0x7FFFFFFF)
1454 x.rlim_cur = 0x7FFFFFFF;
1455 if (x.rlim_max > 0x7FFFFFFF)
1456 x.rlim_max = 0x7FFFFFFF;
1457 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1460 #ifdef CONFIG_COMPAT
1461 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1462 struct compat_rlimit __user *, rlim)
1466 if (resource >= RLIM_NLIMITS)
1469 resource = array_index_nospec(resource, RLIM_NLIMITS);
1470 task_lock(current->group_leader);
1471 r = current->signal->rlim[resource];
1472 task_unlock(current->group_leader);
1473 if (r.rlim_cur > 0x7FFFFFFF)
1474 r.rlim_cur = 0x7FFFFFFF;
1475 if (r.rlim_max > 0x7FFFFFFF)
1476 r.rlim_max = 0x7FFFFFFF;
1478 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1479 put_user(r.rlim_max, &rlim->rlim_max))
1487 static inline bool rlim64_is_infinity(__u64 rlim64)
1489 #if BITS_PER_LONG < 64
1490 return rlim64 >= ULONG_MAX;
1492 return rlim64 == RLIM64_INFINITY;
1496 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1498 if (rlim->rlim_cur == RLIM_INFINITY)
1499 rlim64->rlim_cur = RLIM64_INFINITY;
1501 rlim64->rlim_cur = rlim->rlim_cur;
1502 if (rlim->rlim_max == RLIM_INFINITY)
1503 rlim64->rlim_max = RLIM64_INFINITY;
1505 rlim64->rlim_max = rlim->rlim_max;
1508 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1510 if (rlim64_is_infinity(rlim64->rlim_cur))
1511 rlim->rlim_cur = RLIM_INFINITY;
1513 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1514 if (rlim64_is_infinity(rlim64->rlim_max))
1515 rlim->rlim_max = RLIM_INFINITY;
1517 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1520 /* make sure you are allowed to change @tsk limits before calling this */
1521 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1522 struct rlimit *new_rlim, struct rlimit *old_rlim)
1524 struct rlimit *rlim;
1527 if (resource >= RLIM_NLIMITS)
1530 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1532 if (resource == RLIMIT_NOFILE &&
1533 new_rlim->rlim_max > sysctl_nr_open)
1537 /* protect tsk->signal and tsk->sighand from disappearing */
1538 read_lock(&tasklist_lock);
1539 if (!tsk->sighand) {
1544 rlim = tsk->signal->rlim + resource;
1545 task_lock(tsk->group_leader);
1547 /* Keep the capable check against init_user_ns until
1548 cgroups can contain all limits */
1549 if (new_rlim->rlim_max > rlim->rlim_max &&
1550 !capable(CAP_SYS_RESOURCE))
1553 retval = security_task_setrlimit(tsk, resource, new_rlim);
1554 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1556 * The caller is asking for an immediate RLIMIT_CPU
1557 * expiry. But we use the zero value to mean "it was
1558 * never set". So let's cheat and make it one second
1561 new_rlim->rlim_cur = 1;
1570 task_unlock(tsk->group_leader);
1573 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1574 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1575 * very long-standing error, and fixing it now risks breakage of
1576 * applications, so we live with it
1578 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1579 new_rlim->rlim_cur != RLIM_INFINITY &&
1580 IS_ENABLED(CONFIG_POSIX_TIMERS))
1581 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1583 read_unlock(&tasklist_lock);
1587 /* rcu lock must be held */
1588 static int check_prlimit_permission(struct task_struct *task,
1591 const struct cred *cred = current_cred(), *tcred;
1594 if (current == task)
1597 tcred = __task_cred(task);
1598 id_match = (uid_eq(cred->uid, tcred->euid) &&
1599 uid_eq(cred->uid, tcred->suid) &&
1600 uid_eq(cred->uid, tcred->uid) &&
1601 gid_eq(cred->gid, tcred->egid) &&
1602 gid_eq(cred->gid, tcred->sgid) &&
1603 gid_eq(cred->gid, tcred->gid));
1604 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1607 return security_task_prlimit(cred, tcred, flags);
1610 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1611 const struct rlimit64 __user *, new_rlim,
1612 struct rlimit64 __user *, old_rlim)
1614 struct rlimit64 old64, new64;
1615 struct rlimit old, new;
1616 struct task_struct *tsk;
1617 unsigned int checkflags = 0;
1621 checkflags |= LSM_PRLIMIT_READ;
1624 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1626 rlim64_to_rlim(&new64, &new);
1627 checkflags |= LSM_PRLIMIT_WRITE;
1631 tsk = pid ? find_task_by_vpid(pid) : current;
1636 ret = check_prlimit_permission(tsk, checkflags);
1641 get_task_struct(tsk);
1644 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1645 old_rlim ? &old : NULL);
1647 if (!ret && old_rlim) {
1648 rlim_to_rlim64(&old, &old64);
1649 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1653 put_task_struct(tsk);
1657 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1659 struct rlimit new_rlim;
1661 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1663 return do_prlimit(current, resource, &new_rlim, NULL);
1667 * It would make sense to put struct rusage in the task_struct,
1668 * except that would make the task_struct be *really big*. After
1669 * task_struct gets moved into malloc'ed memory, it would
1670 * make sense to do this. It will make moving the rest of the information
1671 * a lot simpler! (Which we're not doing right now because we're not
1672 * measuring them yet).
1674 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1675 * races with threads incrementing their own counters. But since word
1676 * reads are atomic, we either get new values or old values and we don't
1677 * care which for the sums. We always take the siglock to protect reading
1678 * the c* fields from p->signal from races with exit.c updating those
1679 * fields when reaping, so a sample either gets all the additions of a
1680 * given child after it's reaped, or none so this sample is before reaping.
1683 * We need to take the siglock for CHILDEREN, SELF and BOTH
1684 * for the cases current multithreaded, non-current single threaded
1685 * non-current multithreaded. Thread traversal is now safe with
1687 * Strictly speaking, we donot need to take the siglock if we are current and
1688 * single threaded, as no one else can take our signal_struct away, no one
1689 * else can reap the children to update signal->c* counters, and no one else
1690 * can race with the signal-> fields. If we do not take any lock, the
1691 * signal-> fields could be read out of order while another thread was just
1692 * exiting. So we should place a read memory barrier when we avoid the lock.
1693 * On the writer side, write memory barrier is implied in __exit_signal
1694 * as __exit_signal releases the siglock spinlock after updating the signal->
1695 * fields. But we don't do this yet to keep things simple.
1699 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1701 r->ru_nvcsw += t->nvcsw;
1702 r->ru_nivcsw += t->nivcsw;
1703 r->ru_minflt += t->min_flt;
1704 r->ru_majflt += t->maj_flt;
1705 r->ru_inblock += task_io_get_inblock(t);
1706 r->ru_oublock += task_io_get_oublock(t);
1709 void getrusage(struct task_struct *p, int who, struct rusage *r)
1711 struct task_struct *t;
1712 unsigned long flags;
1713 u64 tgutime, tgstime, utime, stime;
1714 unsigned long maxrss = 0;
1716 memset((char *)r, 0, sizeof (*r));
1719 if (who == RUSAGE_THREAD) {
1720 task_cputime_adjusted(current, &utime, &stime);
1721 accumulate_thread_rusage(p, r);
1722 maxrss = p->signal->maxrss;
1726 if (!lock_task_sighand(p, &flags))
1731 case RUSAGE_CHILDREN:
1732 utime = p->signal->cutime;
1733 stime = p->signal->cstime;
1734 r->ru_nvcsw = p->signal->cnvcsw;
1735 r->ru_nivcsw = p->signal->cnivcsw;
1736 r->ru_minflt = p->signal->cmin_flt;
1737 r->ru_majflt = p->signal->cmaj_flt;
1738 r->ru_inblock = p->signal->cinblock;
1739 r->ru_oublock = p->signal->coublock;
1740 maxrss = p->signal->cmaxrss;
1742 if (who == RUSAGE_CHILDREN)
1747 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1750 r->ru_nvcsw += p->signal->nvcsw;
1751 r->ru_nivcsw += p->signal->nivcsw;
1752 r->ru_minflt += p->signal->min_flt;
1753 r->ru_majflt += p->signal->maj_flt;
1754 r->ru_inblock += p->signal->inblock;
1755 r->ru_oublock += p->signal->oublock;
1756 if (maxrss < p->signal->maxrss)
1757 maxrss = p->signal->maxrss;
1760 accumulate_thread_rusage(t, r);
1761 } while_each_thread(p, t);
1767 unlock_task_sighand(p, &flags);
1770 r->ru_utime = ns_to_timeval(utime);
1771 r->ru_stime = ns_to_timeval(stime);
1773 if (who != RUSAGE_CHILDREN) {
1774 struct mm_struct *mm = get_task_mm(p);
1777 setmax_mm_hiwater_rss(&maxrss, mm);
1781 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1784 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1788 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1789 who != RUSAGE_THREAD)
1792 getrusage(current, who, &r);
1793 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1796 #ifdef CONFIG_COMPAT
1797 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1801 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1802 who != RUSAGE_THREAD)
1805 getrusage(current, who, &r);
1806 return put_compat_rusage(&r, ru);
1810 SYSCALL_DEFINE1(umask, int, mask)
1812 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1816 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1819 struct file *old_exe, *exe_file;
1820 struct inode *inode;
1827 inode = file_inode(exe.file);
1830 * Because the original mm->exe_file points to executable file, make
1831 * sure that this one is executable as well, to avoid breaking an
1835 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1838 err = inode_permission(inode, MAY_EXEC);
1843 * Forbid mm->exe_file change if old file still mapped.
1845 exe_file = get_mm_exe_file(mm);
1848 struct vm_area_struct *vma;
1850 down_read(&mm->mmap_sem);
1851 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1854 if (path_equal(&vma->vm_file->f_path,
1859 up_read(&mm->mmap_sem);
1864 /* set the new file, lockless */
1866 old_exe = xchg(&mm->exe_file, exe.file);
1873 up_read(&mm->mmap_sem);
1879 * Check arithmetic relations of passed addresses.
1881 * WARNING: we don't require any capability here so be very careful
1882 * in what is allowed for modification from userspace.
1884 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1886 unsigned long mmap_max_addr = TASK_SIZE;
1887 int error = -EINVAL, i;
1889 static const unsigned char offsets[] = {
1890 offsetof(struct prctl_mm_map, start_code),
1891 offsetof(struct prctl_mm_map, end_code),
1892 offsetof(struct prctl_mm_map, start_data),
1893 offsetof(struct prctl_mm_map, end_data),
1894 offsetof(struct prctl_mm_map, start_brk),
1895 offsetof(struct prctl_mm_map, brk),
1896 offsetof(struct prctl_mm_map, start_stack),
1897 offsetof(struct prctl_mm_map, arg_start),
1898 offsetof(struct prctl_mm_map, arg_end),
1899 offsetof(struct prctl_mm_map, env_start),
1900 offsetof(struct prctl_mm_map, env_end),
1904 * Make sure the members are not somewhere outside
1905 * of allowed address space.
1907 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1908 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1910 if ((unsigned long)val >= mmap_max_addr ||
1911 (unsigned long)val < mmap_min_addr)
1916 * Make sure the pairs are ordered.
1918 #define __prctl_check_order(__m1, __op, __m2) \
1919 ((unsigned long)prctl_map->__m1 __op \
1920 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1921 error = __prctl_check_order(start_code, <, end_code);
1922 error |= __prctl_check_order(start_data,<=, end_data);
1923 error |= __prctl_check_order(start_brk, <=, brk);
1924 error |= __prctl_check_order(arg_start, <=, arg_end);
1925 error |= __prctl_check_order(env_start, <=, env_end);
1928 #undef __prctl_check_order
1933 * @brk should be after @end_data in traditional maps.
1935 if (prctl_map->start_brk <= prctl_map->end_data ||
1936 prctl_map->brk <= prctl_map->end_data)
1940 * Neither we should allow to override limits if they set.
1942 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1943 prctl_map->start_brk, prctl_map->end_data,
1944 prctl_map->start_data))
1952 #ifdef CONFIG_CHECKPOINT_RESTORE
1953 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1955 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1956 unsigned long user_auxv[AT_VECTOR_SIZE];
1957 struct mm_struct *mm = current->mm;
1960 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1961 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1963 if (opt == PR_SET_MM_MAP_SIZE)
1964 return put_user((unsigned int)sizeof(prctl_map),
1965 (unsigned int __user *)addr);
1967 if (data_size != sizeof(prctl_map))
1970 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1973 error = validate_prctl_map_addr(&prctl_map);
1977 if (prctl_map.auxv_size) {
1979 * Someone is trying to cheat the auxv vector.
1981 if (!prctl_map.auxv ||
1982 prctl_map.auxv_size > sizeof(mm->saved_auxv))
1985 memset(user_auxv, 0, sizeof(user_auxv));
1986 if (copy_from_user(user_auxv,
1987 (const void __user *)prctl_map.auxv,
1988 prctl_map.auxv_size))
1991 /* Last entry must be AT_NULL as specification requires */
1992 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1993 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1996 if (prctl_map.exe_fd != (u32)-1) {
1998 * Make sure the caller has the rights to
1999 * change /proc/pid/exe link: only local sys admin should
2002 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
2005 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2011 * arg_lock protects concurent updates but we still need mmap_sem for
2012 * read to exclude races with sys_brk.
2014 down_read(&mm->mmap_sem);
2017 * We don't validate if these members are pointing to
2018 * real present VMAs because application may have correspond
2019 * VMAs already unmapped and kernel uses these members for statistics
2020 * output in procfs mostly, except
2022 * - @start_brk/@brk which are used in do_brk but kernel lookups
2023 * for VMAs when updating these memvers so anything wrong written
2024 * here cause kernel to swear at userspace program but won't lead
2025 * to any problem in kernel itself
2028 spin_lock(&mm->arg_lock);
2029 mm->start_code = prctl_map.start_code;
2030 mm->end_code = prctl_map.end_code;
2031 mm->start_data = prctl_map.start_data;
2032 mm->end_data = prctl_map.end_data;
2033 mm->start_brk = prctl_map.start_brk;
2034 mm->brk = prctl_map.brk;
2035 mm->start_stack = prctl_map.start_stack;
2036 mm->arg_start = prctl_map.arg_start;
2037 mm->arg_end = prctl_map.arg_end;
2038 mm->env_start = prctl_map.env_start;
2039 mm->env_end = prctl_map.env_end;
2040 spin_unlock(&mm->arg_lock);
2043 * Note this update of @saved_auxv is lockless thus
2044 * if someone reads this member in procfs while we're
2045 * updating -- it may get partly updated results. It's
2046 * known and acceptable trade off: we leave it as is to
2047 * not introduce additional locks here making the kernel
2050 if (prctl_map.auxv_size)
2051 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2053 up_read(&mm->mmap_sem);
2056 #endif /* CONFIG_CHECKPOINT_RESTORE */
2058 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2062 * This doesn't move the auxiliary vector itself since it's pinned to
2063 * mm_struct, but it permits filling the vector with new values. It's
2064 * up to the caller to provide sane values here, otherwise userspace
2065 * tools which use this vector might be unhappy.
2067 unsigned long user_auxv[AT_VECTOR_SIZE];
2069 if (len > sizeof(user_auxv))
2072 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2075 /* Make sure the last entry is always AT_NULL */
2076 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2077 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2079 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2082 memcpy(mm->saved_auxv, user_auxv, len);
2083 task_unlock(current);
2088 static int prctl_set_mm(int opt, unsigned long addr,
2089 unsigned long arg4, unsigned long arg5)
2091 struct mm_struct *mm = current->mm;
2092 struct prctl_mm_map prctl_map = {
2097 struct vm_area_struct *vma;
2100 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2101 opt != PR_SET_MM_MAP &&
2102 opt != PR_SET_MM_MAP_SIZE)))
2105 #ifdef CONFIG_CHECKPOINT_RESTORE
2106 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2107 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2110 if (!capable(CAP_SYS_RESOURCE))
2113 if (opt == PR_SET_MM_EXE_FILE)
2114 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2116 if (opt == PR_SET_MM_AUXV)
2117 return prctl_set_auxv(mm, addr, arg4);
2119 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2125 * arg_lock protects concurent updates of arg boundaries, we need
2126 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2129 down_read(&mm->mmap_sem);
2130 vma = find_vma(mm, addr);
2132 spin_lock(&mm->arg_lock);
2133 prctl_map.start_code = mm->start_code;
2134 prctl_map.end_code = mm->end_code;
2135 prctl_map.start_data = mm->start_data;
2136 prctl_map.end_data = mm->end_data;
2137 prctl_map.start_brk = mm->start_brk;
2138 prctl_map.brk = mm->brk;
2139 prctl_map.start_stack = mm->start_stack;
2140 prctl_map.arg_start = mm->arg_start;
2141 prctl_map.arg_end = mm->arg_end;
2142 prctl_map.env_start = mm->env_start;
2143 prctl_map.env_end = mm->env_end;
2146 case PR_SET_MM_START_CODE:
2147 prctl_map.start_code = addr;
2149 case PR_SET_MM_END_CODE:
2150 prctl_map.end_code = addr;
2152 case PR_SET_MM_START_DATA:
2153 prctl_map.start_data = addr;
2155 case PR_SET_MM_END_DATA:
2156 prctl_map.end_data = addr;
2158 case PR_SET_MM_START_STACK:
2159 prctl_map.start_stack = addr;
2161 case PR_SET_MM_START_BRK:
2162 prctl_map.start_brk = addr;
2165 prctl_map.brk = addr;
2167 case PR_SET_MM_ARG_START:
2168 prctl_map.arg_start = addr;
2170 case PR_SET_MM_ARG_END:
2171 prctl_map.arg_end = addr;
2173 case PR_SET_MM_ENV_START:
2174 prctl_map.env_start = addr;
2176 case PR_SET_MM_ENV_END:
2177 prctl_map.env_end = addr;
2183 error = validate_prctl_map_addr(&prctl_map);
2189 * If command line arguments and environment
2190 * are placed somewhere else on stack, we can
2191 * set them up here, ARG_START/END to setup
2192 * command line argumets and ENV_START/END
2195 case PR_SET_MM_START_STACK:
2196 case PR_SET_MM_ARG_START:
2197 case PR_SET_MM_ARG_END:
2198 case PR_SET_MM_ENV_START:
2199 case PR_SET_MM_ENV_END:
2206 mm->start_code = prctl_map.start_code;
2207 mm->end_code = prctl_map.end_code;
2208 mm->start_data = prctl_map.start_data;
2209 mm->end_data = prctl_map.end_data;
2210 mm->start_brk = prctl_map.start_brk;
2211 mm->brk = prctl_map.brk;
2212 mm->start_stack = prctl_map.start_stack;
2213 mm->arg_start = prctl_map.arg_start;
2214 mm->arg_end = prctl_map.arg_end;
2215 mm->env_start = prctl_map.env_start;
2216 mm->env_end = prctl_map.env_end;
2220 spin_unlock(&mm->arg_lock);
2221 up_read(&mm->mmap_sem);
2225 #ifdef CONFIG_CHECKPOINT_RESTORE
2226 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2228 return put_user(me->clear_child_tid, tid_addr);
2231 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2237 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2240 * If task has has_child_subreaper - all its decendants
2241 * already have these flag too and new decendants will
2242 * inherit it on fork, skip them.
2244 * If we've found child_reaper - skip descendants in
2245 * it's subtree as they will never get out pidns.
2247 if (p->signal->has_child_subreaper ||
2248 is_child_reaper(task_pid(p)))
2251 p->signal->has_child_subreaper = 1;
2255 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2260 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2266 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2267 unsigned long, arg4, unsigned long, arg5)
2269 struct task_struct *me = current;
2270 unsigned char comm[sizeof(me->comm)];
2273 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2274 if (error != -ENOSYS)
2279 case PR_SET_PDEATHSIG:
2280 if (!valid_signal(arg2)) {
2284 me->pdeath_signal = arg2;
2286 case PR_GET_PDEATHSIG:
2287 error = put_user(me->pdeath_signal, (int __user *)arg2);
2289 case PR_GET_DUMPABLE:
2290 error = get_dumpable(me->mm);
2292 case PR_SET_DUMPABLE:
2293 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2297 set_dumpable(me->mm, arg2);
2300 case PR_SET_UNALIGN:
2301 error = SET_UNALIGN_CTL(me, arg2);
2303 case PR_GET_UNALIGN:
2304 error = GET_UNALIGN_CTL(me, arg2);
2307 error = SET_FPEMU_CTL(me, arg2);
2310 error = GET_FPEMU_CTL(me, arg2);
2313 error = SET_FPEXC_CTL(me, arg2);
2316 error = GET_FPEXC_CTL(me, arg2);
2319 error = PR_TIMING_STATISTICAL;
2322 if (arg2 != PR_TIMING_STATISTICAL)
2326 comm[sizeof(me->comm) - 1] = 0;
2327 if (strncpy_from_user(comm, (char __user *)arg2,
2328 sizeof(me->comm) - 1) < 0)
2330 set_task_comm(me, comm);
2331 proc_comm_connector(me);
2334 get_task_comm(comm, me);
2335 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2339 error = GET_ENDIAN(me, arg2);
2342 error = SET_ENDIAN(me, arg2);
2344 case PR_GET_SECCOMP:
2345 error = prctl_get_seccomp();
2347 case PR_SET_SECCOMP:
2348 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2351 error = GET_TSC_CTL(arg2);
2354 error = SET_TSC_CTL(arg2);
2356 case PR_TASK_PERF_EVENTS_DISABLE:
2357 error = perf_event_task_disable();
2359 case PR_TASK_PERF_EVENTS_ENABLE:
2360 error = perf_event_task_enable();
2362 case PR_GET_TIMERSLACK:
2363 if (current->timer_slack_ns > ULONG_MAX)
2366 error = current->timer_slack_ns;
2368 case PR_SET_TIMERSLACK:
2370 current->timer_slack_ns =
2371 current->default_timer_slack_ns;
2373 current->timer_slack_ns = arg2;
2379 case PR_MCE_KILL_CLEAR:
2382 current->flags &= ~PF_MCE_PROCESS;
2384 case PR_MCE_KILL_SET:
2385 current->flags |= PF_MCE_PROCESS;
2386 if (arg3 == PR_MCE_KILL_EARLY)
2387 current->flags |= PF_MCE_EARLY;
2388 else if (arg3 == PR_MCE_KILL_LATE)
2389 current->flags &= ~PF_MCE_EARLY;
2390 else if (arg3 == PR_MCE_KILL_DEFAULT)
2392 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2400 case PR_MCE_KILL_GET:
2401 if (arg2 | arg3 | arg4 | arg5)
2403 if (current->flags & PF_MCE_PROCESS)
2404 error = (current->flags & PF_MCE_EARLY) ?
2405 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2407 error = PR_MCE_KILL_DEFAULT;
2410 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2412 case PR_GET_TID_ADDRESS:
2413 error = prctl_get_tid_address(me, (int __user **)arg2);
2415 case PR_SET_CHILD_SUBREAPER:
2416 me->signal->is_child_subreaper = !!arg2;
2420 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2422 case PR_GET_CHILD_SUBREAPER:
2423 error = put_user(me->signal->is_child_subreaper,
2424 (int __user *)arg2);
2426 case PR_SET_NO_NEW_PRIVS:
2427 if (arg2 != 1 || arg3 || arg4 || arg5)
2430 task_set_no_new_privs(current);
2432 case PR_GET_NO_NEW_PRIVS:
2433 if (arg2 || arg3 || arg4 || arg5)
2435 return task_no_new_privs(current) ? 1 : 0;
2436 case PR_GET_THP_DISABLE:
2437 if (arg2 || arg3 || arg4 || arg5)
2439 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2441 case PR_SET_THP_DISABLE:
2442 if (arg3 || arg4 || arg5)
2444 if (down_write_killable(&me->mm->mmap_sem))
2447 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2449 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2450 up_write(&me->mm->mmap_sem);
2452 case PR_MPX_ENABLE_MANAGEMENT:
2453 case PR_MPX_DISABLE_MANAGEMENT:
2454 /* No longer implemented: */
2456 case PR_SET_FP_MODE:
2457 error = SET_FP_MODE(me, arg2);
2459 case PR_GET_FP_MODE:
2460 error = GET_FP_MODE(me);
2463 error = SVE_SET_VL(arg2);
2466 error = SVE_GET_VL();
2468 case PR_GET_SPECULATION_CTRL:
2469 if (arg3 || arg4 || arg5)
2471 error = arch_prctl_spec_ctrl_get(me, arg2);
2473 case PR_SET_SPECULATION_CTRL:
2476 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2478 case PR_PAC_RESET_KEYS:
2479 if (arg3 || arg4 || arg5)
2481 error = PAC_RESET_KEYS(me, arg2);
2490 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2491 struct getcpu_cache __user *, unused)
2494 int cpu = raw_smp_processor_id();
2497 err |= put_user(cpu, cpup);
2499 err |= put_user(cpu_to_node(cpu), nodep);
2500 return err ? -EFAULT : 0;
2504 * do_sysinfo - fill in sysinfo struct
2505 * @info: pointer to buffer to fill
2507 static int do_sysinfo(struct sysinfo *info)
2509 unsigned long mem_total, sav_total;
2510 unsigned int mem_unit, bitcount;
2511 struct timespec64 tp;
2513 memset(info, 0, sizeof(struct sysinfo));
2515 ktime_get_boottime_ts64(&tp);
2516 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2518 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2520 info->procs = nr_threads;
2526 * If the sum of all the available memory (i.e. ram + swap)
2527 * is less than can be stored in a 32 bit unsigned long then
2528 * we can be binary compatible with 2.2.x kernels. If not,
2529 * well, in that case 2.2.x was broken anyways...
2531 * -Erik Andersen <andersee@debian.org>
2534 mem_total = info->totalram + info->totalswap;
2535 if (mem_total < info->totalram || mem_total < info->totalswap)
2538 mem_unit = info->mem_unit;
2539 while (mem_unit > 1) {
2542 sav_total = mem_total;
2544 if (mem_total < sav_total)
2549 * If mem_total did not overflow, multiply all memory values by
2550 * info->mem_unit and set it to 1. This leaves things compatible
2551 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2556 info->totalram <<= bitcount;
2557 info->freeram <<= bitcount;
2558 info->sharedram <<= bitcount;
2559 info->bufferram <<= bitcount;
2560 info->totalswap <<= bitcount;
2561 info->freeswap <<= bitcount;
2562 info->totalhigh <<= bitcount;
2563 info->freehigh <<= bitcount;
2569 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2575 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2581 #ifdef CONFIG_COMPAT
2582 struct compat_sysinfo {
2596 char _f[20-2*sizeof(u32)-sizeof(int)];
2599 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2605 /* Check to see if any memory value is too large for 32-bit and scale
2608 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2611 while (s.mem_unit < PAGE_SIZE) {
2616 s.totalram >>= bitcount;
2617 s.freeram >>= bitcount;
2618 s.sharedram >>= bitcount;
2619 s.bufferram >>= bitcount;
2620 s.totalswap >>= bitcount;
2621 s.freeswap >>= bitcount;
2622 s.totalhigh >>= bitcount;
2623 s.freehigh >>= bitcount;
2626 if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2627 __put_user(s.uptime, &info->uptime) ||
2628 __put_user(s.loads[0], &info->loads[0]) ||
2629 __put_user(s.loads[1], &info->loads[1]) ||
2630 __put_user(s.loads[2], &info->loads[2]) ||
2631 __put_user(s.totalram, &info->totalram) ||
2632 __put_user(s.freeram, &info->freeram) ||
2633 __put_user(s.sharedram, &info->sharedram) ||
2634 __put_user(s.bufferram, &info->bufferram) ||
2635 __put_user(s.totalswap, &info->totalswap) ||
2636 __put_user(s.freeswap, &info->freeswap) ||
2637 __put_user(s.procs, &info->procs) ||
2638 __put_user(s.totalhigh, &info->totalhigh) ||
2639 __put_user(s.freehigh, &info->freehigh) ||
2640 __put_user(s.mem_unit, &info->mem_unit))
2645 #endif /* CONFIG_COMPAT */