1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/export.h>
10 #include <linux/mm_inline.h>
11 #include <linux/utsname.h>
12 #include <linux/mman.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/kmod.h>
18 #include <linux/perf_event.h>
19 #include <linux/resource.h>
20 #include <linux/kernel.h>
21 #include <linux/workqueue.h>
22 #include <linux/capability.h>
23 #include <linux/device.h>
24 #include <linux/key.h>
25 #include <linux/times.h>
26 #include <linux/posix-timers.h>
27 #include <linux/security.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45 #include <linux/syscall_user_dispatch.h>
47 #include <linux/compat.h>
48 #include <linux/syscalls.h>
49 #include <linux/kprobes.h>
50 #include <linux/user_namespace.h>
51 #include <linux/time_namespace.h>
52 #include <linux/binfmts.h>
54 #include <linux/sched.h>
55 #include <linux/sched/autogroup.h>
56 #include <linux/sched/loadavg.h>
57 #include <linux/sched/stat.h>
58 #include <linux/sched/mm.h>
59 #include <linux/sched/coredump.h>
60 #include <linux/sched/task.h>
61 #include <linux/sched/cputime.h>
62 #include <linux/rcupdate.h>
63 #include <linux/uidgid.h>
64 #include <linux/cred.h>
66 #include <linux/nospec.h>
68 #include <linux/kmsg_dump.h>
69 /* Move somewhere else to avoid recompiling? */
70 #include <generated/utsrelease.h>
72 #include <linux/uaccess.h>
74 #include <asm/unistd.h>
78 #ifndef SET_UNALIGN_CTL
79 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
81 #ifndef GET_UNALIGN_CTL
82 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
85 # define SET_FPEMU_CTL(a, b) (-EINVAL)
88 # define GET_FPEMU_CTL(a, b) (-EINVAL)
91 # define SET_FPEXC_CTL(a, b) (-EINVAL)
94 # define GET_FPEXC_CTL(a, b) (-EINVAL)
97 # define GET_ENDIAN(a, b) (-EINVAL)
100 # define SET_ENDIAN(a, b) (-EINVAL)
103 # define GET_TSC_CTL(a) (-EINVAL)
106 # define SET_TSC_CTL(a) (-EINVAL)
109 # define GET_FP_MODE(a) (-EINVAL)
112 # define SET_FP_MODE(a,b) (-EINVAL)
115 # define SVE_SET_VL(a) (-EINVAL)
118 # define SVE_GET_VL() (-EINVAL)
120 #ifndef PAC_RESET_KEYS
121 # define PAC_RESET_KEYS(a, b) (-EINVAL)
123 #ifndef PAC_SET_ENABLED_KEYS
124 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL)
126 #ifndef PAC_GET_ENABLED_KEYS
127 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL)
129 #ifndef SET_TAGGED_ADDR_CTRL
130 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
132 #ifndef GET_TAGGED_ADDR_CTRL
133 # define GET_TAGGED_ADDR_CTRL() (-EINVAL)
137 * this is where the system-wide overflow UID and GID are defined, for
138 * architectures that now have 32-bit UID/GID but didn't in the past
141 int overflowuid = DEFAULT_OVERFLOWUID;
142 int overflowgid = DEFAULT_OVERFLOWGID;
144 EXPORT_SYMBOL(overflowuid);
145 EXPORT_SYMBOL(overflowgid);
148 * the same as above, but for filesystems which can only store a 16-bit
149 * UID and GID. as such, this is needed on all architectures
152 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
153 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
155 EXPORT_SYMBOL(fs_overflowuid);
156 EXPORT_SYMBOL(fs_overflowgid);
159 * Returns true if current's euid is same as p's uid or euid,
160 * or has CAP_SYS_NICE to p's user_ns.
162 * Called with rcu_read_lock, creds are safe
164 static bool set_one_prio_perm(struct task_struct *p)
166 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
168 if (uid_eq(pcred->uid, cred->euid) ||
169 uid_eq(pcred->euid, cred->euid))
171 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
177 * set the priority of a task
178 * - the caller must hold the RCU read lock
180 static int set_one_prio(struct task_struct *p, int niceval, int error)
184 if (!set_one_prio_perm(p)) {
188 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
192 no_nice = security_task_setnice(p, niceval);
199 set_user_nice(p, niceval);
204 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
206 struct task_struct *g, *p;
207 struct user_struct *user;
208 const struct cred *cred = current_cred();
213 if (which > PRIO_USER || which < PRIO_PROCESS)
216 /* normalize: avoid signed division (rounding problems) */
218 if (niceval < MIN_NICE)
220 if (niceval > MAX_NICE)
227 p = find_task_by_vpid(who);
231 error = set_one_prio(p, niceval, error);
235 pgrp = find_vpid(who);
237 pgrp = task_pgrp(current);
238 read_lock(&tasklist_lock);
239 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
240 error = set_one_prio(p, niceval, error);
241 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
242 read_unlock(&tasklist_lock);
245 uid = make_kuid(cred->user_ns, who);
249 else if (!uid_eq(uid, cred->uid)) {
250 user = find_user(uid);
252 goto out_unlock; /* No processes for this user */
254 for_each_process_thread(g, p) {
255 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
256 error = set_one_prio(p, niceval, error);
258 if (!uid_eq(uid, cred->uid))
259 free_uid(user); /* For find_user() */
269 * Ugh. To avoid negative return values, "getpriority()" will
270 * not return the normal nice-value, but a negated value that
271 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
272 * to stay compatible.
274 SYSCALL_DEFINE2(getpriority, int, which, int, who)
276 struct task_struct *g, *p;
277 struct user_struct *user;
278 const struct cred *cred = current_cred();
279 long niceval, retval = -ESRCH;
283 if (which > PRIO_USER || which < PRIO_PROCESS)
290 p = find_task_by_vpid(who);
294 niceval = nice_to_rlimit(task_nice(p));
295 if (niceval > retval)
301 pgrp = find_vpid(who);
303 pgrp = task_pgrp(current);
304 read_lock(&tasklist_lock);
305 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
306 niceval = nice_to_rlimit(task_nice(p));
307 if (niceval > retval)
309 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
310 read_unlock(&tasklist_lock);
313 uid = make_kuid(cred->user_ns, who);
317 else if (!uid_eq(uid, cred->uid)) {
318 user = find_user(uid);
320 goto out_unlock; /* No processes for this user */
322 for_each_process_thread(g, p) {
323 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
324 niceval = nice_to_rlimit(task_nice(p));
325 if (niceval > retval)
329 if (!uid_eq(uid, cred->uid))
330 free_uid(user); /* for find_user() */
340 * Unprivileged users may change the real gid to the effective gid
341 * or vice versa. (BSD-style)
343 * If you set the real gid at all, or set the effective gid to a value not
344 * equal to the real gid, then the saved gid is set to the new effective gid.
346 * This makes it possible for a setgid program to completely drop its
347 * privileges, which is often a useful assertion to make when you are doing
348 * a security audit over a program.
350 * The general idea is that a program which uses just setregid() will be
351 * 100% compatible with BSD. A program which uses just setgid() will be
352 * 100% compatible with POSIX with saved IDs.
354 * SMP: There are not races, the GIDs are checked only by filesystem
355 * operations (as far as semantic preservation is concerned).
357 #ifdef CONFIG_MULTIUSER
358 long __sys_setregid(gid_t rgid, gid_t egid)
360 struct user_namespace *ns = current_user_ns();
361 const struct cred *old;
366 krgid = make_kgid(ns, rgid);
367 kegid = make_kgid(ns, egid);
369 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
371 if ((egid != (gid_t) -1) && !gid_valid(kegid))
374 new = prepare_creds();
377 old = current_cred();
380 if (rgid != (gid_t) -1) {
381 if (gid_eq(old->gid, krgid) ||
382 gid_eq(old->egid, krgid) ||
383 ns_capable_setid(old->user_ns, CAP_SETGID))
388 if (egid != (gid_t) -1) {
389 if (gid_eq(old->gid, kegid) ||
390 gid_eq(old->egid, kegid) ||
391 gid_eq(old->sgid, kegid) ||
392 ns_capable_setid(old->user_ns, CAP_SETGID))
398 if (rgid != (gid_t) -1 ||
399 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
400 new->sgid = new->egid;
401 new->fsgid = new->egid;
403 retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
407 return commit_creds(new);
414 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
416 return __sys_setregid(rgid, egid);
420 * setgid() is implemented like SysV w/ SAVED_IDS
422 * SMP: Same implicit races as above.
424 long __sys_setgid(gid_t gid)
426 struct user_namespace *ns = current_user_ns();
427 const struct cred *old;
432 kgid = make_kgid(ns, gid);
433 if (!gid_valid(kgid))
436 new = prepare_creds();
439 old = current_cred();
442 if (ns_capable_setid(old->user_ns, CAP_SETGID))
443 new->gid = new->egid = new->sgid = new->fsgid = kgid;
444 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
445 new->egid = new->fsgid = kgid;
449 retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
453 return commit_creds(new);
460 SYSCALL_DEFINE1(setgid, gid_t, gid)
462 return __sys_setgid(gid);
466 * change the user struct in a credentials set to match the new UID
468 static int set_user(struct cred *new)
470 struct user_struct *new_user;
472 new_user = alloc_uid(new->uid);
477 new->user = new_user;
481 static void flag_nproc_exceeded(struct cred *new)
483 if (new->ucounts == current_ucounts())
487 * We don't fail in case of NPROC limit excess here because too many
488 * poorly written programs don't check set*uid() return code, assuming
489 * it never fails if called by root. We may still enforce NPROC limit
490 * for programs doing set*uid()+execve() by harmlessly deferring the
491 * failure to the execve() stage.
493 if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
494 new->user != INIT_USER)
495 current->flags |= PF_NPROC_EXCEEDED;
497 current->flags &= ~PF_NPROC_EXCEEDED;
501 * Unprivileged users may change the real uid to the effective uid
502 * or vice versa. (BSD-style)
504 * If you set the real uid at all, or set the effective uid to a value not
505 * equal to the real uid, then the saved uid is set to the new effective uid.
507 * This makes it possible for a setuid program to completely drop its
508 * privileges, which is often a useful assertion to make when you are doing
509 * a security audit over a program.
511 * The general idea is that a program which uses just setreuid() will be
512 * 100% compatible with BSD. A program which uses just setuid() will be
513 * 100% compatible with POSIX with saved IDs.
515 long __sys_setreuid(uid_t ruid, uid_t euid)
517 struct user_namespace *ns = current_user_ns();
518 const struct cred *old;
523 kruid = make_kuid(ns, ruid);
524 keuid = make_kuid(ns, euid);
526 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
528 if ((euid != (uid_t) -1) && !uid_valid(keuid))
531 new = prepare_creds();
534 old = current_cred();
537 if (ruid != (uid_t) -1) {
539 if (!uid_eq(old->uid, kruid) &&
540 !uid_eq(old->euid, kruid) &&
541 !ns_capable_setid(old->user_ns, CAP_SETUID))
545 if (euid != (uid_t) -1) {
547 if (!uid_eq(old->uid, keuid) &&
548 !uid_eq(old->euid, keuid) &&
549 !uid_eq(old->suid, keuid) &&
550 !ns_capable_setid(old->user_ns, CAP_SETUID))
554 if (!uid_eq(new->uid, old->uid)) {
555 retval = set_user(new);
559 if (ruid != (uid_t) -1 ||
560 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
561 new->suid = new->euid;
562 new->fsuid = new->euid;
564 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
568 retval = set_cred_ucounts(new);
572 flag_nproc_exceeded(new);
573 return commit_creds(new);
580 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
582 return __sys_setreuid(ruid, euid);
586 * setuid() is implemented like SysV with SAVED_IDS
588 * Note that SAVED_ID's is deficient in that a setuid root program
589 * like sendmail, for example, cannot set its uid to be a normal
590 * user and then switch back, because if you're root, setuid() sets
591 * the saved uid too. If you don't like this, blame the bright people
592 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
593 * will allow a root program to temporarily drop privileges and be able to
594 * regain them by swapping the real and effective uid.
596 long __sys_setuid(uid_t uid)
598 struct user_namespace *ns = current_user_ns();
599 const struct cred *old;
604 kuid = make_kuid(ns, uid);
605 if (!uid_valid(kuid))
608 new = prepare_creds();
611 old = current_cred();
614 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
615 new->suid = new->uid = kuid;
616 if (!uid_eq(kuid, old->uid)) {
617 retval = set_user(new);
621 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
625 new->fsuid = new->euid = kuid;
627 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
631 retval = set_cred_ucounts(new);
635 flag_nproc_exceeded(new);
636 return commit_creds(new);
643 SYSCALL_DEFINE1(setuid, uid_t, uid)
645 return __sys_setuid(uid);
650 * This function implements a generic ability to update ruid, euid,
651 * and suid. This allows you to implement the 4.4 compatible seteuid().
653 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
655 struct user_namespace *ns = current_user_ns();
656 const struct cred *old;
659 kuid_t kruid, keuid, ksuid;
661 kruid = make_kuid(ns, ruid);
662 keuid = make_kuid(ns, euid);
663 ksuid = make_kuid(ns, suid);
665 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
668 if ((euid != (uid_t) -1) && !uid_valid(keuid))
671 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
674 new = prepare_creds();
678 old = current_cred();
681 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
682 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
683 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
685 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
686 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
688 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
689 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
693 if (ruid != (uid_t) -1) {
695 if (!uid_eq(kruid, old->uid)) {
696 retval = set_user(new);
701 if (euid != (uid_t) -1)
703 if (suid != (uid_t) -1)
705 new->fsuid = new->euid;
707 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
711 retval = set_cred_ucounts(new);
715 flag_nproc_exceeded(new);
716 return commit_creds(new);
723 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
725 return __sys_setresuid(ruid, euid, suid);
728 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
730 const struct cred *cred = current_cred();
732 uid_t ruid, euid, suid;
734 ruid = from_kuid_munged(cred->user_ns, cred->uid);
735 euid = from_kuid_munged(cred->user_ns, cred->euid);
736 suid = from_kuid_munged(cred->user_ns, cred->suid);
738 retval = put_user(ruid, ruidp);
740 retval = put_user(euid, euidp);
742 return put_user(suid, suidp);
748 * Same as above, but for rgid, egid, sgid.
750 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
752 struct user_namespace *ns = current_user_ns();
753 const struct cred *old;
756 kgid_t krgid, kegid, ksgid;
758 krgid = make_kgid(ns, rgid);
759 kegid = make_kgid(ns, egid);
760 ksgid = make_kgid(ns, sgid);
762 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
764 if ((egid != (gid_t) -1) && !gid_valid(kegid))
766 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
769 new = prepare_creds();
772 old = current_cred();
775 if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
776 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
777 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
779 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
780 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
782 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
783 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
787 if (rgid != (gid_t) -1)
789 if (egid != (gid_t) -1)
791 if (sgid != (gid_t) -1)
793 new->fsgid = new->egid;
795 retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
799 return commit_creds(new);
806 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
808 return __sys_setresgid(rgid, egid, sgid);
811 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
813 const struct cred *cred = current_cred();
815 gid_t rgid, egid, sgid;
817 rgid = from_kgid_munged(cred->user_ns, cred->gid);
818 egid = from_kgid_munged(cred->user_ns, cred->egid);
819 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
821 retval = put_user(rgid, rgidp);
823 retval = put_user(egid, egidp);
825 retval = put_user(sgid, sgidp);
833 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
834 * is used for "access()" and for the NFS daemon (letting nfsd stay at
835 * whatever uid it wants to). It normally shadows "euid", except when
836 * explicitly set by setfsuid() or for access..
838 long __sys_setfsuid(uid_t uid)
840 const struct cred *old;
845 old = current_cred();
846 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
848 kuid = make_kuid(old->user_ns, uid);
849 if (!uid_valid(kuid))
852 new = prepare_creds();
856 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
857 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
858 ns_capable_setid(old->user_ns, CAP_SETUID)) {
859 if (!uid_eq(kuid, old->fsuid)) {
861 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
874 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
876 return __sys_setfsuid(uid);
880 * Samma på svenska..
882 long __sys_setfsgid(gid_t gid)
884 const struct cred *old;
889 old = current_cred();
890 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
892 kgid = make_kgid(old->user_ns, gid);
893 if (!gid_valid(kgid))
896 new = prepare_creds();
900 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
901 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
902 ns_capable_setid(old->user_ns, CAP_SETGID)) {
903 if (!gid_eq(kgid, old->fsgid)) {
905 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
918 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
920 return __sys_setfsgid(gid);
922 #endif /* CONFIG_MULTIUSER */
925 * sys_getpid - return the thread group id of the current process
927 * Note, despite the name, this returns the tgid not the pid. The tgid and
928 * the pid are identical unless CLONE_THREAD was specified on clone() in
929 * which case the tgid is the same in all threads of the same group.
931 * This is SMP safe as current->tgid does not change.
933 SYSCALL_DEFINE0(getpid)
935 return task_tgid_vnr(current);
938 /* Thread ID - the internal kernel "pid" */
939 SYSCALL_DEFINE0(gettid)
941 return task_pid_vnr(current);
945 * Accessing ->real_parent is not SMP-safe, it could
946 * change from under us. However, we can use a stale
947 * value of ->real_parent under rcu_read_lock(), see
948 * release_task()->call_rcu(delayed_put_task_struct).
950 SYSCALL_DEFINE0(getppid)
955 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
961 SYSCALL_DEFINE0(getuid)
963 /* Only we change this so SMP safe */
964 return from_kuid_munged(current_user_ns(), current_uid());
967 SYSCALL_DEFINE0(geteuid)
969 /* Only we change this so SMP safe */
970 return from_kuid_munged(current_user_ns(), current_euid());
973 SYSCALL_DEFINE0(getgid)
975 /* Only we change this so SMP safe */
976 return from_kgid_munged(current_user_ns(), current_gid());
979 SYSCALL_DEFINE0(getegid)
981 /* Only we change this so SMP safe */
982 return from_kgid_munged(current_user_ns(), current_egid());
985 static void do_sys_times(struct tms *tms)
987 u64 tgutime, tgstime, cutime, cstime;
989 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
990 cutime = current->signal->cutime;
991 cstime = current->signal->cstime;
992 tms->tms_utime = nsec_to_clock_t(tgutime);
993 tms->tms_stime = nsec_to_clock_t(tgstime);
994 tms->tms_cutime = nsec_to_clock_t(cutime);
995 tms->tms_cstime = nsec_to_clock_t(cstime);
998 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1004 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1007 force_successful_syscall_return();
1008 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1011 #ifdef CONFIG_COMPAT
1012 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
1014 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
1017 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
1021 struct compat_tms tmp;
1024 /* Convert our struct tms to the compat version. */
1025 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
1026 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1027 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1028 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1029 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1032 force_successful_syscall_return();
1033 return compat_jiffies_to_clock_t(jiffies);
1038 * This needs some heavy checking ...
1039 * I just haven't the stomach for it. I also don't fully
1040 * understand sessions/pgrp etc. Let somebody who does explain it.
1042 * OK, I think I have the protection semantics right.... this is really
1043 * only important on a multi-user system anyway, to make sure one user
1044 * can't send a signal to a process owned by another. -TYT, 12/12/91
1046 * !PF_FORKNOEXEC check to conform completely to POSIX.
1048 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1050 struct task_struct *p;
1051 struct task_struct *group_leader = current->group_leader;
1056 pid = task_pid_vnr(group_leader);
1063 /* From this point forward we keep holding onto the tasklist lock
1064 * so that our parent does not change from under us. -DaveM
1066 write_lock_irq(&tasklist_lock);
1069 p = find_task_by_vpid(pid);
1074 if (!thread_group_leader(p))
1077 if (same_thread_group(p->real_parent, group_leader)) {
1079 if (task_session(p) != task_session(group_leader))
1082 if (!(p->flags & PF_FORKNOEXEC))
1086 if (p != group_leader)
1091 if (p->signal->leader)
1096 struct task_struct *g;
1098 pgrp = find_vpid(pgid);
1099 g = pid_task(pgrp, PIDTYPE_PGID);
1100 if (!g || task_session(g) != task_session(group_leader))
1104 err = security_task_setpgid(p, pgid);
1108 if (task_pgrp(p) != pgrp)
1109 change_pid(p, PIDTYPE_PGID, pgrp);
1113 /* All paths lead to here, thus we are safe. -DaveM */
1114 write_unlock_irq(&tasklist_lock);
1119 static int do_getpgid(pid_t pid)
1121 struct task_struct *p;
1127 grp = task_pgrp(current);
1130 p = find_task_by_vpid(pid);
1137 retval = security_task_getpgid(p);
1141 retval = pid_vnr(grp);
1147 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1149 return do_getpgid(pid);
1152 #ifdef __ARCH_WANT_SYS_GETPGRP
1154 SYSCALL_DEFINE0(getpgrp)
1156 return do_getpgid(0);
1161 SYSCALL_DEFINE1(getsid, pid_t, pid)
1163 struct task_struct *p;
1169 sid = task_session(current);
1172 p = find_task_by_vpid(pid);
1175 sid = task_session(p);
1179 retval = security_task_getsid(p);
1183 retval = pid_vnr(sid);
1189 static void set_special_pids(struct pid *pid)
1191 struct task_struct *curr = current->group_leader;
1193 if (task_session(curr) != pid)
1194 change_pid(curr, PIDTYPE_SID, pid);
1196 if (task_pgrp(curr) != pid)
1197 change_pid(curr, PIDTYPE_PGID, pid);
1200 int ksys_setsid(void)
1202 struct task_struct *group_leader = current->group_leader;
1203 struct pid *sid = task_pid(group_leader);
1204 pid_t session = pid_vnr(sid);
1207 write_lock_irq(&tasklist_lock);
1208 /* Fail if I am already a session leader */
1209 if (group_leader->signal->leader)
1212 /* Fail if a process group id already exists that equals the
1213 * proposed session id.
1215 if (pid_task(sid, PIDTYPE_PGID))
1218 group_leader->signal->leader = 1;
1219 set_special_pids(sid);
1221 proc_clear_tty(group_leader);
1225 write_unlock_irq(&tasklist_lock);
1227 proc_sid_connector(group_leader);
1228 sched_autogroup_create_attach(group_leader);
1233 SYSCALL_DEFINE0(setsid)
1235 return ksys_setsid();
1238 DECLARE_RWSEM(uts_sem);
1240 #ifdef COMPAT_UTS_MACHINE
1241 #define override_architecture(name) \
1242 (personality(current->personality) == PER_LINUX32 && \
1243 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1244 sizeof(COMPAT_UTS_MACHINE)))
1246 #define override_architecture(name) 0
1250 * Work around broken programs that cannot handle "Linux 3.0".
1251 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1252 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1255 static int override_release(char __user *release, size_t len)
1259 if (current->personality & UNAME26) {
1260 const char *rest = UTS_RELEASE;
1261 char buf[65] = { 0 };
1267 if (*rest == '.' && ++ndots >= 3)
1269 if (!isdigit(*rest) && *rest != '.')
1273 v = LINUX_VERSION_PATCHLEVEL + 60;
1274 copy = clamp_t(size_t, len, 1, sizeof(buf));
1275 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1276 ret = copy_to_user(release, buf, copy + 1);
1281 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1283 struct new_utsname tmp;
1285 down_read(&uts_sem);
1286 memcpy(&tmp, utsname(), sizeof(tmp));
1288 if (copy_to_user(name, &tmp, sizeof(tmp)))
1291 if (override_release(name->release, sizeof(name->release)))
1293 if (override_architecture(name))
1298 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1302 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1304 struct old_utsname tmp;
1309 down_read(&uts_sem);
1310 memcpy(&tmp, utsname(), sizeof(tmp));
1312 if (copy_to_user(name, &tmp, sizeof(tmp)))
1315 if (override_release(name->release, sizeof(name->release)))
1317 if (override_architecture(name))
1322 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1324 struct oldold_utsname tmp;
1329 memset(&tmp, 0, sizeof(tmp));
1331 down_read(&uts_sem);
1332 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1333 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1334 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1335 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1336 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1338 if (copy_to_user(name, &tmp, sizeof(tmp)))
1341 if (override_architecture(name))
1343 if (override_release(name->release, sizeof(name->release)))
1349 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1352 char tmp[__NEW_UTS_LEN];
1354 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1357 if (len < 0 || len > __NEW_UTS_LEN)
1360 if (!copy_from_user(tmp, name, len)) {
1361 struct new_utsname *u;
1363 down_write(&uts_sem);
1365 memcpy(u->nodename, tmp, len);
1366 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1368 uts_proc_notify(UTS_PROC_HOSTNAME);
1374 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1376 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1379 struct new_utsname *u;
1380 char tmp[__NEW_UTS_LEN + 1];
1384 down_read(&uts_sem);
1386 i = 1 + strlen(u->nodename);
1389 memcpy(tmp, u->nodename, i);
1391 if (copy_to_user(name, tmp, i))
1399 * Only setdomainname; getdomainname can be implemented by calling
1402 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1405 char tmp[__NEW_UTS_LEN];
1407 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1409 if (len < 0 || len > __NEW_UTS_LEN)
1413 if (!copy_from_user(tmp, name, len)) {
1414 struct new_utsname *u;
1416 down_write(&uts_sem);
1418 memcpy(u->domainname, tmp, len);
1419 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1421 uts_proc_notify(UTS_PROC_DOMAINNAME);
1427 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1429 struct rlimit value;
1432 ret = do_prlimit(current, resource, NULL, &value);
1434 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1439 #ifdef CONFIG_COMPAT
1441 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1442 struct compat_rlimit __user *, rlim)
1445 struct compat_rlimit r32;
1447 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1450 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1451 r.rlim_cur = RLIM_INFINITY;
1453 r.rlim_cur = r32.rlim_cur;
1454 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1455 r.rlim_max = RLIM_INFINITY;
1457 r.rlim_max = r32.rlim_max;
1458 return do_prlimit(current, resource, &r, NULL);
1461 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1462 struct compat_rlimit __user *, rlim)
1467 ret = do_prlimit(current, resource, NULL, &r);
1469 struct compat_rlimit r32;
1470 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1471 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1473 r32.rlim_cur = r.rlim_cur;
1474 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1475 r32.rlim_max = COMPAT_RLIM_INFINITY;
1477 r32.rlim_max = r.rlim_max;
1479 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1487 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1490 * Back compatibility for getrlimit. Needed for some apps.
1492 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1493 struct rlimit __user *, rlim)
1496 if (resource >= RLIM_NLIMITS)
1499 resource = array_index_nospec(resource, RLIM_NLIMITS);
1500 task_lock(current->group_leader);
1501 x = current->signal->rlim[resource];
1502 task_unlock(current->group_leader);
1503 if (x.rlim_cur > 0x7FFFFFFF)
1504 x.rlim_cur = 0x7FFFFFFF;
1505 if (x.rlim_max > 0x7FFFFFFF)
1506 x.rlim_max = 0x7FFFFFFF;
1507 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1510 #ifdef CONFIG_COMPAT
1511 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1512 struct compat_rlimit __user *, rlim)
1516 if (resource >= RLIM_NLIMITS)
1519 resource = array_index_nospec(resource, RLIM_NLIMITS);
1520 task_lock(current->group_leader);
1521 r = current->signal->rlim[resource];
1522 task_unlock(current->group_leader);
1523 if (r.rlim_cur > 0x7FFFFFFF)
1524 r.rlim_cur = 0x7FFFFFFF;
1525 if (r.rlim_max > 0x7FFFFFFF)
1526 r.rlim_max = 0x7FFFFFFF;
1528 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1529 put_user(r.rlim_max, &rlim->rlim_max))
1537 static inline bool rlim64_is_infinity(__u64 rlim64)
1539 #if BITS_PER_LONG < 64
1540 return rlim64 >= ULONG_MAX;
1542 return rlim64 == RLIM64_INFINITY;
1546 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1548 if (rlim->rlim_cur == RLIM_INFINITY)
1549 rlim64->rlim_cur = RLIM64_INFINITY;
1551 rlim64->rlim_cur = rlim->rlim_cur;
1552 if (rlim->rlim_max == RLIM_INFINITY)
1553 rlim64->rlim_max = RLIM64_INFINITY;
1555 rlim64->rlim_max = rlim->rlim_max;
1558 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1560 if (rlim64_is_infinity(rlim64->rlim_cur))
1561 rlim->rlim_cur = RLIM_INFINITY;
1563 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1564 if (rlim64_is_infinity(rlim64->rlim_max))
1565 rlim->rlim_max = RLIM_INFINITY;
1567 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1570 /* make sure you are allowed to change @tsk limits before calling this */
1571 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1572 struct rlimit *new_rlim, struct rlimit *old_rlim)
1574 struct rlimit *rlim;
1577 if (resource >= RLIM_NLIMITS)
1580 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1582 if (resource == RLIMIT_NOFILE &&
1583 new_rlim->rlim_max > sysctl_nr_open)
1587 /* protect tsk->signal and tsk->sighand from disappearing */
1588 read_lock(&tasklist_lock);
1589 if (!tsk->sighand) {
1594 rlim = tsk->signal->rlim + resource;
1595 task_lock(tsk->group_leader);
1597 /* Keep the capable check against init_user_ns until
1598 cgroups can contain all limits */
1599 if (new_rlim->rlim_max > rlim->rlim_max &&
1600 !capable(CAP_SYS_RESOURCE))
1603 retval = security_task_setrlimit(tsk, resource, new_rlim);
1611 task_unlock(tsk->group_leader);
1614 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1615 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1616 * ignores the rlimit.
1618 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1619 new_rlim->rlim_cur != RLIM_INFINITY &&
1620 IS_ENABLED(CONFIG_POSIX_TIMERS))
1621 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1623 read_unlock(&tasklist_lock);
1627 /* rcu lock must be held */
1628 static int check_prlimit_permission(struct task_struct *task,
1631 const struct cred *cred = current_cred(), *tcred;
1634 if (current == task)
1637 tcred = __task_cred(task);
1638 id_match = (uid_eq(cred->uid, tcred->euid) &&
1639 uid_eq(cred->uid, tcred->suid) &&
1640 uid_eq(cred->uid, tcred->uid) &&
1641 gid_eq(cred->gid, tcred->egid) &&
1642 gid_eq(cred->gid, tcred->sgid) &&
1643 gid_eq(cred->gid, tcred->gid));
1644 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1647 return security_task_prlimit(cred, tcred, flags);
1650 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1651 const struct rlimit64 __user *, new_rlim,
1652 struct rlimit64 __user *, old_rlim)
1654 struct rlimit64 old64, new64;
1655 struct rlimit old, new;
1656 struct task_struct *tsk;
1657 unsigned int checkflags = 0;
1661 checkflags |= LSM_PRLIMIT_READ;
1664 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1666 rlim64_to_rlim(&new64, &new);
1667 checkflags |= LSM_PRLIMIT_WRITE;
1671 tsk = pid ? find_task_by_vpid(pid) : current;
1676 ret = check_prlimit_permission(tsk, checkflags);
1681 get_task_struct(tsk);
1684 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1685 old_rlim ? &old : NULL);
1687 if (!ret && old_rlim) {
1688 rlim_to_rlim64(&old, &old64);
1689 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1693 put_task_struct(tsk);
1697 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1699 struct rlimit new_rlim;
1701 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1703 return do_prlimit(current, resource, &new_rlim, NULL);
1707 * It would make sense to put struct rusage in the task_struct,
1708 * except that would make the task_struct be *really big*. After
1709 * task_struct gets moved into malloc'ed memory, it would
1710 * make sense to do this. It will make moving the rest of the information
1711 * a lot simpler! (Which we're not doing right now because we're not
1712 * measuring them yet).
1714 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1715 * races with threads incrementing their own counters. But since word
1716 * reads are atomic, we either get new values or old values and we don't
1717 * care which for the sums. We always take the siglock to protect reading
1718 * the c* fields from p->signal from races with exit.c updating those
1719 * fields when reaping, so a sample either gets all the additions of a
1720 * given child after it's reaped, or none so this sample is before reaping.
1723 * We need to take the siglock for CHILDEREN, SELF and BOTH
1724 * for the cases current multithreaded, non-current single threaded
1725 * non-current multithreaded. Thread traversal is now safe with
1727 * Strictly speaking, we donot need to take the siglock if we are current and
1728 * single threaded, as no one else can take our signal_struct away, no one
1729 * else can reap the children to update signal->c* counters, and no one else
1730 * can race with the signal-> fields. If we do not take any lock, the
1731 * signal-> fields could be read out of order while another thread was just
1732 * exiting. So we should place a read memory barrier when we avoid the lock.
1733 * On the writer side, write memory barrier is implied in __exit_signal
1734 * as __exit_signal releases the siglock spinlock after updating the signal->
1735 * fields. But we don't do this yet to keep things simple.
1739 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1741 r->ru_nvcsw += t->nvcsw;
1742 r->ru_nivcsw += t->nivcsw;
1743 r->ru_minflt += t->min_flt;
1744 r->ru_majflt += t->maj_flt;
1745 r->ru_inblock += task_io_get_inblock(t);
1746 r->ru_oublock += task_io_get_oublock(t);
1749 void getrusage(struct task_struct *p, int who, struct rusage *r)
1751 struct task_struct *t;
1752 unsigned long flags;
1753 u64 tgutime, tgstime, utime, stime;
1754 unsigned long maxrss = 0;
1756 memset((char *)r, 0, sizeof (*r));
1759 if (who == RUSAGE_THREAD) {
1760 task_cputime_adjusted(current, &utime, &stime);
1761 accumulate_thread_rusage(p, r);
1762 maxrss = p->signal->maxrss;
1766 if (!lock_task_sighand(p, &flags))
1771 case RUSAGE_CHILDREN:
1772 utime = p->signal->cutime;
1773 stime = p->signal->cstime;
1774 r->ru_nvcsw = p->signal->cnvcsw;
1775 r->ru_nivcsw = p->signal->cnivcsw;
1776 r->ru_minflt = p->signal->cmin_flt;
1777 r->ru_majflt = p->signal->cmaj_flt;
1778 r->ru_inblock = p->signal->cinblock;
1779 r->ru_oublock = p->signal->coublock;
1780 maxrss = p->signal->cmaxrss;
1782 if (who == RUSAGE_CHILDREN)
1787 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1790 r->ru_nvcsw += p->signal->nvcsw;
1791 r->ru_nivcsw += p->signal->nivcsw;
1792 r->ru_minflt += p->signal->min_flt;
1793 r->ru_majflt += p->signal->maj_flt;
1794 r->ru_inblock += p->signal->inblock;
1795 r->ru_oublock += p->signal->oublock;
1796 if (maxrss < p->signal->maxrss)
1797 maxrss = p->signal->maxrss;
1800 accumulate_thread_rusage(t, r);
1801 } while_each_thread(p, t);
1807 unlock_task_sighand(p, &flags);
1810 r->ru_utime = ns_to_kernel_old_timeval(utime);
1811 r->ru_stime = ns_to_kernel_old_timeval(stime);
1813 if (who != RUSAGE_CHILDREN) {
1814 struct mm_struct *mm = get_task_mm(p);
1817 setmax_mm_hiwater_rss(&maxrss, mm);
1821 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1824 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1828 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1829 who != RUSAGE_THREAD)
1832 getrusage(current, who, &r);
1833 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1836 #ifdef CONFIG_COMPAT
1837 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1841 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1842 who != RUSAGE_THREAD)
1845 getrusage(current, who, &r);
1846 return put_compat_rusage(&r, ru);
1850 SYSCALL_DEFINE1(umask, int, mask)
1852 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1856 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1859 struct inode *inode;
1866 inode = file_inode(exe.file);
1869 * Because the original mm->exe_file points to executable file, make
1870 * sure that this one is executable as well, to avoid breaking an
1874 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1877 err = file_permission(exe.file, MAY_EXEC);
1881 err = replace_mm_exe_file(mm, exe.file);
1888 * Check arithmetic relations of passed addresses.
1890 * WARNING: we don't require any capability here so be very careful
1891 * in what is allowed for modification from userspace.
1893 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1895 unsigned long mmap_max_addr = TASK_SIZE;
1896 int error = -EINVAL, i;
1898 static const unsigned char offsets[] = {
1899 offsetof(struct prctl_mm_map, start_code),
1900 offsetof(struct prctl_mm_map, end_code),
1901 offsetof(struct prctl_mm_map, start_data),
1902 offsetof(struct prctl_mm_map, end_data),
1903 offsetof(struct prctl_mm_map, start_brk),
1904 offsetof(struct prctl_mm_map, brk),
1905 offsetof(struct prctl_mm_map, start_stack),
1906 offsetof(struct prctl_mm_map, arg_start),
1907 offsetof(struct prctl_mm_map, arg_end),
1908 offsetof(struct prctl_mm_map, env_start),
1909 offsetof(struct prctl_mm_map, env_end),
1913 * Make sure the members are not somewhere outside
1914 * of allowed address space.
1916 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1917 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1919 if ((unsigned long)val >= mmap_max_addr ||
1920 (unsigned long)val < mmap_min_addr)
1925 * Make sure the pairs are ordered.
1927 #define __prctl_check_order(__m1, __op, __m2) \
1928 ((unsigned long)prctl_map->__m1 __op \
1929 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1930 error = __prctl_check_order(start_code, <, end_code);
1931 error |= __prctl_check_order(start_data,<=, end_data);
1932 error |= __prctl_check_order(start_brk, <=, brk);
1933 error |= __prctl_check_order(arg_start, <=, arg_end);
1934 error |= __prctl_check_order(env_start, <=, env_end);
1937 #undef __prctl_check_order
1942 * Neither we should allow to override limits if they set.
1944 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1945 prctl_map->start_brk, prctl_map->end_data,
1946 prctl_map->start_data))
1954 #ifdef CONFIG_CHECKPOINT_RESTORE
1955 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1957 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1958 unsigned long user_auxv[AT_VECTOR_SIZE];
1959 struct mm_struct *mm = current->mm;
1962 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1963 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1965 if (opt == PR_SET_MM_MAP_SIZE)
1966 return put_user((unsigned int)sizeof(prctl_map),
1967 (unsigned int __user *)addr);
1969 if (data_size != sizeof(prctl_map))
1972 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1975 error = validate_prctl_map_addr(&prctl_map);
1979 if (prctl_map.auxv_size) {
1981 * Someone is trying to cheat the auxv vector.
1983 if (!prctl_map.auxv ||
1984 prctl_map.auxv_size > sizeof(mm->saved_auxv))
1987 memset(user_auxv, 0, sizeof(user_auxv));
1988 if (copy_from_user(user_auxv,
1989 (const void __user *)prctl_map.auxv,
1990 prctl_map.auxv_size))
1993 /* Last entry must be AT_NULL as specification requires */
1994 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1995 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1998 if (prctl_map.exe_fd != (u32)-1) {
2000 * Check if the current user is checkpoint/restore capable.
2001 * At the time of this writing, it checks for CAP_SYS_ADMIN
2002 * or CAP_CHECKPOINT_RESTORE.
2003 * Note that a user with access to ptrace can masquerade an
2004 * arbitrary program as any executable, even setuid ones.
2005 * This may have implications in the tomoyo subsystem.
2007 if (!checkpoint_restore_ns_capable(current_user_ns()))
2010 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2016 * arg_lock protects concurrent updates but we still need mmap_lock for
2017 * read to exclude races with sys_brk.
2022 * We don't validate if these members are pointing to
2023 * real present VMAs because application may have correspond
2024 * VMAs already unmapped and kernel uses these members for statistics
2025 * output in procfs mostly, except
2027 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2028 * for VMAs when updating these members so anything wrong written
2029 * here cause kernel to swear at userspace program but won't lead
2030 * to any problem in kernel itself
2033 spin_lock(&mm->arg_lock);
2034 mm->start_code = prctl_map.start_code;
2035 mm->end_code = prctl_map.end_code;
2036 mm->start_data = prctl_map.start_data;
2037 mm->end_data = prctl_map.end_data;
2038 mm->start_brk = prctl_map.start_brk;
2039 mm->brk = prctl_map.brk;
2040 mm->start_stack = prctl_map.start_stack;
2041 mm->arg_start = prctl_map.arg_start;
2042 mm->arg_end = prctl_map.arg_end;
2043 mm->env_start = prctl_map.env_start;
2044 mm->env_end = prctl_map.env_end;
2045 spin_unlock(&mm->arg_lock);
2048 * Note this update of @saved_auxv is lockless thus
2049 * if someone reads this member in procfs while we're
2050 * updating -- it may get partly updated results. It's
2051 * known and acceptable trade off: we leave it as is to
2052 * not introduce additional locks here making the kernel
2055 if (prctl_map.auxv_size)
2056 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2058 mmap_read_unlock(mm);
2061 #endif /* CONFIG_CHECKPOINT_RESTORE */
2063 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2067 * This doesn't move the auxiliary vector itself since it's pinned to
2068 * mm_struct, but it permits filling the vector with new values. It's
2069 * up to the caller to provide sane values here, otherwise userspace
2070 * tools which use this vector might be unhappy.
2072 unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2074 if (len > sizeof(user_auxv))
2077 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2080 /* Make sure the last entry is always AT_NULL */
2081 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2082 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2084 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2087 memcpy(mm->saved_auxv, user_auxv, len);
2088 task_unlock(current);
2093 static int prctl_set_mm(int opt, unsigned long addr,
2094 unsigned long arg4, unsigned long arg5)
2096 struct mm_struct *mm = current->mm;
2097 struct prctl_mm_map prctl_map = {
2102 struct vm_area_struct *vma;
2105 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2106 opt != PR_SET_MM_MAP &&
2107 opt != PR_SET_MM_MAP_SIZE)))
2110 #ifdef CONFIG_CHECKPOINT_RESTORE
2111 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2112 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2115 if (!capable(CAP_SYS_RESOURCE))
2118 if (opt == PR_SET_MM_EXE_FILE)
2119 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2121 if (opt == PR_SET_MM_AUXV)
2122 return prctl_set_auxv(mm, addr, arg4);
2124 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2130 * arg_lock protects concurrent updates of arg boundaries, we need
2131 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2135 vma = find_vma(mm, addr);
2137 spin_lock(&mm->arg_lock);
2138 prctl_map.start_code = mm->start_code;
2139 prctl_map.end_code = mm->end_code;
2140 prctl_map.start_data = mm->start_data;
2141 prctl_map.end_data = mm->end_data;
2142 prctl_map.start_brk = mm->start_brk;
2143 prctl_map.brk = mm->brk;
2144 prctl_map.start_stack = mm->start_stack;
2145 prctl_map.arg_start = mm->arg_start;
2146 prctl_map.arg_end = mm->arg_end;
2147 prctl_map.env_start = mm->env_start;
2148 prctl_map.env_end = mm->env_end;
2151 case PR_SET_MM_START_CODE:
2152 prctl_map.start_code = addr;
2154 case PR_SET_MM_END_CODE:
2155 prctl_map.end_code = addr;
2157 case PR_SET_MM_START_DATA:
2158 prctl_map.start_data = addr;
2160 case PR_SET_MM_END_DATA:
2161 prctl_map.end_data = addr;
2163 case PR_SET_MM_START_STACK:
2164 prctl_map.start_stack = addr;
2166 case PR_SET_MM_START_BRK:
2167 prctl_map.start_brk = addr;
2170 prctl_map.brk = addr;
2172 case PR_SET_MM_ARG_START:
2173 prctl_map.arg_start = addr;
2175 case PR_SET_MM_ARG_END:
2176 prctl_map.arg_end = addr;
2178 case PR_SET_MM_ENV_START:
2179 prctl_map.env_start = addr;
2181 case PR_SET_MM_ENV_END:
2182 prctl_map.env_end = addr;
2188 error = validate_prctl_map_addr(&prctl_map);
2194 * If command line arguments and environment
2195 * are placed somewhere else on stack, we can
2196 * set them up here, ARG_START/END to setup
2197 * command line arguments and ENV_START/END
2200 case PR_SET_MM_START_STACK:
2201 case PR_SET_MM_ARG_START:
2202 case PR_SET_MM_ARG_END:
2203 case PR_SET_MM_ENV_START:
2204 case PR_SET_MM_ENV_END:
2211 mm->start_code = prctl_map.start_code;
2212 mm->end_code = prctl_map.end_code;
2213 mm->start_data = prctl_map.start_data;
2214 mm->end_data = prctl_map.end_data;
2215 mm->start_brk = prctl_map.start_brk;
2216 mm->brk = prctl_map.brk;
2217 mm->start_stack = prctl_map.start_stack;
2218 mm->arg_start = prctl_map.arg_start;
2219 mm->arg_end = prctl_map.arg_end;
2220 mm->env_start = prctl_map.env_start;
2221 mm->env_end = prctl_map.env_end;
2225 spin_unlock(&mm->arg_lock);
2226 mmap_read_unlock(mm);
2230 #ifdef CONFIG_CHECKPOINT_RESTORE
2231 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2233 return put_user(me->clear_child_tid, tid_addr);
2236 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2242 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2245 * If task has has_child_subreaper - all its descendants
2246 * already have these flag too and new descendants will
2247 * inherit it on fork, skip them.
2249 * If we've found child_reaper - skip descendants in
2250 * it's subtree as they will never get out pidns.
2252 if (p->signal->has_child_subreaper ||
2253 is_child_reaper(task_pid(p)))
2256 p->signal->has_child_subreaper = 1;
2260 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2265 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2271 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2273 #ifdef CONFIG_ANON_VMA_NAME
2275 #define ANON_VMA_NAME_MAX_LEN 80
2276 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]"
2278 static inline bool is_valid_name_char(char ch)
2280 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2281 return ch > 0x1f && ch < 0x7f &&
2282 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
2285 static int prctl_set_vma(unsigned long opt, unsigned long addr,
2286 unsigned long size, unsigned long arg)
2288 struct mm_struct *mm = current->mm;
2289 const char __user *uname;
2290 struct anon_vma_name *anon_name = NULL;
2294 case PR_SET_VMA_ANON_NAME:
2295 uname = (const char __user *)arg;
2299 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
2301 return PTR_ERR(name);
2303 for (pch = name; *pch != '\0'; pch++) {
2304 if (!is_valid_name_char(*pch)) {
2309 /* anon_vma has its own copy */
2310 anon_name = anon_vma_name_alloc(name);
2317 mmap_write_lock(mm);
2318 error = madvise_set_anon_name(mm, addr, size, anon_name);
2319 mmap_write_unlock(mm);
2320 anon_vma_name_put(anon_name);
2329 #else /* CONFIG_ANON_VMA_NAME */
2330 static int prctl_set_vma(unsigned long opt, unsigned long start,
2331 unsigned long size, unsigned long arg)
2335 #endif /* CONFIG_ANON_VMA_NAME */
2337 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2338 unsigned long, arg4, unsigned long, arg5)
2340 struct task_struct *me = current;
2341 unsigned char comm[sizeof(me->comm)];
2344 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2345 if (error != -ENOSYS)
2350 case PR_SET_PDEATHSIG:
2351 if (!valid_signal(arg2)) {
2355 me->pdeath_signal = arg2;
2357 case PR_GET_PDEATHSIG:
2358 error = put_user(me->pdeath_signal, (int __user *)arg2);
2360 case PR_GET_DUMPABLE:
2361 error = get_dumpable(me->mm);
2363 case PR_SET_DUMPABLE:
2364 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2368 set_dumpable(me->mm, arg2);
2371 case PR_SET_UNALIGN:
2372 error = SET_UNALIGN_CTL(me, arg2);
2374 case PR_GET_UNALIGN:
2375 error = GET_UNALIGN_CTL(me, arg2);
2378 error = SET_FPEMU_CTL(me, arg2);
2381 error = GET_FPEMU_CTL(me, arg2);
2384 error = SET_FPEXC_CTL(me, arg2);
2387 error = GET_FPEXC_CTL(me, arg2);
2390 error = PR_TIMING_STATISTICAL;
2393 if (arg2 != PR_TIMING_STATISTICAL)
2397 comm[sizeof(me->comm) - 1] = 0;
2398 if (strncpy_from_user(comm, (char __user *)arg2,
2399 sizeof(me->comm) - 1) < 0)
2401 set_task_comm(me, comm);
2402 proc_comm_connector(me);
2405 get_task_comm(comm, me);
2406 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2410 error = GET_ENDIAN(me, arg2);
2413 error = SET_ENDIAN(me, arg2);
2415 case PR_GET_SECCOMP:
2416 error = prctl_get_seccomp();
2418 case PR_SET_SECCOMP:
2419 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2422 error = GET_TSC_CTL(arg2);
2425 error = SET_TSC_CTL(arg2);
2427 case PR_TASK_PERF_EVENTS_DISABLE:
2428 error = perf_event_task_disable();
2430 case PR_TASK_PERF_EVENTS_ENABLE:
2431 error = perf_event_task_enable();
2433 case PR_GET_TIMERSLACK:
2434 if (current->timer_slack_ns > ULONG_MAX)
2437 error = current->timer_slack_ns;
2439 case PR_SET_TIMERSLACK:
2441 current->timer_slack_ns =
2442 current->default_timer_slack_ns;
2444 current->timer_slack_ns = arg2;
2450 case PR_MCE_KILL_CLEAR:
2453 current->flags &= ~PF_MCE_PROCESS;
2455 case PR_MCE_KILL_SET:
2456 current->flags |= PF_MCE_PROCESS;
2457 if (arg3 == PR_MCE_KILL_EARLY)
2458 current->flags |= PF_MCE_EARLY;
2459 else if (arg3 == PR_MCE_KILL_LATE)
2460 current->flags &= ~PF_MCE_EARLY;
2461 else if (arg3 == PR_MCE_KILL_DEFAULT)
2463 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2471 case PR_MCE_KILL_GET:
2472 if (arg2 | arg3 | arg4 | arg5)
2474 if (current->flags & PF_MCE_PROCESS)
2475 error = (current->flags & PF_MCE_EARLY) ?
2476 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2478 error = PR_MCE_KILL_DEFAULT;
2481 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2483 case PR_GET_TID_ADDRESS:
2484 error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2486 case PR_SET_CHILD_SUBREAPER:
2487 me->signal->is_child_subreaper = !!arg2;
2491 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2493 case PR_GET_CHILD_SUBREAPER:
2494 error = put_user(me->signal->is_child_subreaper,
2495 (int __user *)arg2);
2497 case PR_SET_NO_NEW_PRIVS:
2498 if (arg2 != 1 || arg3 || arg4 || arg5)
2501 task_set_no_new_privs(current);
2503 case PR_GET_NO_NEW_PRIVS:
2504 if (arg2 || arg3 || arg4 || arg5)
2506 return task_no_new_privs(current) ? 1 : 0;
2507 case PR_GET_THP_DISABLE:
2508 if (arg2 || arg3 || arg4 || arg5)
2510 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2512 case PR_SET_THP_DISABLE:
2513 if (arg3 || arg4 || arg5)
2515 if (mmap_write_lock_killable(me->mm))
2518 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2520 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2521 mmap_write_unlock(me->mm);
2523 case PR_MPX_ENABLE_MANAGEMENT:
2524 case PR_MPX_DISABLE_MANAGEMENT:
2525 /* No longer implemented: */
2527 case PR_SET_FP_MODE:
2528 error = SET_FP_MODE(me, arg2);
2530 case PR_GET_FP_MODE:
2531 error = GET_FP_MODE(me);
2534 error = SVE_SET_VL(arg2);
2537 error = SVE_GET_VL();
2539 case PR_GET_SPECULATION_CTRL:
2540 if (arg3 || arg4 || arg5)
2542 error = arch_prctl_spec_ctrl_get(me, arg2);
2544 case PR_SET_SPECULATION_CTRL:
2547 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2549 case PR_PAC_RESET_KEYS:
2550 if (arg3 || arg4 || arg5)
2552 error = PAC_RESET_KEYS(me, arg2);
2554 case PR_PAC_SET_ENABLED_KEYS:
2557 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
2559 case PR_PAC_GET_ENABLED_KEYS:
2560 if (arg2 || arg3 || arg4 || arg5)
2562 error = PAC_GET_ENABLED_KEYS(me);
2564 case PR_SET_TAGGED_ADDR_CTRL:
2565 if (arg3 || arg4 || arg5)
2567 error = SET_TAGGED_ADDR_CTRL(arg2);
2569 case PR_GET_TAGGED_ADDR_CTRL:
2570 if (arg2 || arg3 || arg4 || arg5)
2572 error = GET_TAGGED_ADDR_CTRL();
2574 case PR_SET_IO_FLUSHER:
2575 if (!capable(CAP_SYS_RESOURCE))
2578 if (arg3 || arg4 || arg5)
2582 current->flags |= PR_IO_FLUSHER;
2584 current->flags &= ~PR_IO_FLUSHER;
2588 case PR_GET_IO_FLUSHER:
2589 if (!capable(CAP_SYS_RESOURCE))
2592 if (arg2 || arg3 || arg4 || arg5)
2595 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2597 case PR_SET_SYSCALL_USER_DISPATCH:
2598 error = set_syscall_user_dispatch(arg2, arg3, arg4,
2599 (char __user *) arg5);
2601 #ifdef CONFIG_SCHED_CORE
2603 error = sched_core_share_pid(arg2, arg3, arg4, arg5);
2607 error = prctl_set_vma(arg2, arg3, arg4, arg5);
2616 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2617 struct getcpu_cache __user *, unused)
2620 int cpu = raw_smp_processor_id();
2623 err |= put_user(cpu, cpup);
2625 err |= put_user(cpu_to_node(cpu), nodep);
2626 return err ? -EFAULT : 0;
2630 * do_sysinfo - fill in sysinfo struct
2631 * @info: pointer to buffer to fill
2633 static int do_sysinfo(struct sysinfo *info)
2635 unsigned long mem_total, sav_total;
2636 unsigned int mem_unit, bitcount;
2637 struct timespec64 tp;
2639 memset(info, 0, sizeof(struct sysinfo));
2641 ktime_get_boottime_ts64(&tp);
2642 timens_add_boottime(&tp);
2643 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2645 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2647 info->procs = nr_threads;
2653 * If the sum of all the available memory (i.e. ram + swap)
2654 * is less than can be stored in a 32 bit unsigned long then
2655 * we can be binary compatible with 2.2.x kernels. If not,
2656 * well, in that case 2.2.x was broken anyways...
2658 * -Erik Andersen <andersee@debian.org>
2661 mem_total = info->totalram + info->totalswap;
2662 if (mem_total < info->totalram || mem_total < info->totalswap)
2665 mem_unit = info->mem_unit;
2666 while (mem_unit > 1) {
2669 sav_total = mem_total;
2671 if (mem_total < sav_total)
2676 * If mem_total did not overflow, multiply all memory values by
2677 * info->mem_unit and set it to 1. This leaves things compatible
2678 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2683 info->totalram <<= bitcount;
2684 info->freeram <<= bitcount;
2685 info->sharedram <<= bitcount;
2686 info->bufferram <<= bitcount;
2687 info->totalswap <<= bitcount;
2688 info->freeswap <<= bitcount;
2689 info->totalhigh <<= bitcount;
2690 info->freehigh <<= bitcount;
2696 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2702 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2708 #ifdef CONFIG_COMPAT
2709 struct compat_sysinfo {
2723 char _f[20-2*sizeof(u32)-sizeof(int)];
2726 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2729 struct compat_sysinfo s_32;
2733 /* Check to see if any memory value is too large for 32-bit and scale
2736 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2739 while (s.mem_unit < PAGE_SIZE) {
2744 s.totalram >>= bitcount;
2745 s.freeram >>= bitcount;
2746 s.sharedram >>= bitcount;
2747 s.bufferram >>= bitcount;
2748 s.totalswap >>= bitcount;
2749 s.freeswap >>= bitcount;
2750 s.totalhigh >>= bitcount;
2751 s.freehigh >>= bitcount;
2754 memset(&s_32, 0, sizeof(s_32));
2755 s_32.uptime = s.uptime;
2756 s_32.loads[0] = s.loads[0];
2757 s_32.loads[1] = s.loads[1];
2758 s_32.loads[2] = s.loads[2];
2759 s_32.totalram = s.totalram;
2760 s_32.freeram = s.freeram;
2761 s_32.sharedram = s.sharedram;
2762 s_32.bufferram = s.bufferram;
2763 s_32.totalswap = s.totalswap;
2764 s_32.freeswap = s.freeswap;
2765 s_32.procs = s.procs;
2766 s_32.totalhigh = s.totalhigh;
2767 s_32.freehigh = s.freehigh;
2768 s_32.mem_unit = s.mem_unit;
2769 if (copy_to_user(info, &s_32, sizeof(s_32)))
2773 #endif /* CONFIG_COMPAT */