1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_SIGNAL_H
3 #define _LINUX_SCHED_SIGNAL_H
5 #include <linux/rculist.h>
6 #include <linux/signal.h>
7 #include <linux/sched.h>
8 #include <linux/sched/jobctl.h>
9 #include <linux/sched/task.h>
10 #include <linux/cred.h>
11 #include <linux/refcount.h>
12 #include <linux/posix-timers.h>
15 * Types defining task->signal and task->sighand and APIs using them:
18 struct sighand_struct {
21 wait_queue_head_t signalfd_wqh;
22 struct k_sigaction action[_NSIG];
26 * Per-process accounting stats:
32 u64 ac_utime, ac_stime;
33 unsigned long ac_minflt, ac_majflt;
42 * This is the atomic variant of task_cputime, which can be used for
43 * storing and updating task_cputime statistics without locking.
45 struct task_cputime_atomic {
48 atomic64_t sum_exec_runtime;
51 #define INIT_CPUTIME_ATOMIC \
52 (struct task_cputime_atomic) { \
53 .utime = ATOMIC64_INIT(0), \
54 .stime = ATOMIC64_INIT(0), \
55 .sum_exec_runtime = ATOMIC64_INIT(0), \
58 * struct thread_group_cputimer - thread group interval timer counts
59 * @cputime_atomic: atomic thread group interval timers.
61 * This structure contains the version of task_cputime, above, that is
62 * used for thread group CPU timer calculations.
64 struct thread_group_cputimer {
65 struct task_cputime_atomic cputime_atomic;
68 struct multiprocess_signals {
70 struct hlist_node node;
74 * NOTE! "signal_struct" does not have its own
75 * locking, because a shared signal_struct always
76 * implies a shared sighand_struct, so locking
77 * sighand_struct is always a proper superset of
78 * the locking of signal_struct.
80 struct signal_struct {
84 struct list_head thread_head;
86 wait_queue_head_t wait_chldexit; /* for wait4() */
88 /* current thread group signal load-balancing target: */
89 struct task_struct *curr_target;
91 /* shared signal handling: */
92 struct sigpending shared_pending;
94 /* For collecting multiprocess signals during fork */
95 struct hlist_head multiprocess;
97 /* thread group exit support */
100 * - notify group_exit_task when ->count is equal to notify_count
101 * - everyone except group_exit_task is stopped during signal delivery
102 * of fatal signals, group_exit_task processes the signal.
105 struct task_struct *group_exit_task;
107 /* thread group stop support, overloads group_exit_code too */
108 int group_stop_count;
109 unsigned int flags; /* see SIGNAL_* flags below */
112 * PR_SET_CHILD_SUBREAPER marks a process, like a service
113 * manager, to re-parent orphan (double-forking) child processes
114 * to this process instead of 'init'. The service manager is
115 * able to receive SIGCHLD signals and is able to investigate
116 * the process until it calls wait(). All children of this
117 * process will inherit a flag if they should look for a
118 * child_subreaper process at exit.
120 unsigned int is_child_subreaper:1;
121 unsigned int has_child_subreaper:1;
123 #ifdef CONFIG_POSIX_TIMERS
125 /* POSIX.1b Interval Timers */
127 struct list_head posix_timers;
129 /* ITIMER_REAL timer for the process */
130 struct hrtimer real_timer;
131 ktime_t it_real_incr;
134 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
135 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
136 * values are defined to 0 and 1 respectively
138 struct cpu_itimer it[2];
141 * Thread group totals for process CPU timers.
142 * See thread_group_cputimer(), et al, for details.
144 struct thread_group_cputimer cputimer;
147 /* Empty if CONFIG_POSIX_TIMERS=n */
148 struct posix_cputimers posix_cputimers;
150 /* PID/PID hash table linkage. */
151 struct pid *pids[PIDTYPE_MAX];
153 #ifdef CONFIG_NO_HZ_FULL
154 atomic_t tick_dep_mask;
157 struct pid *tty_old_pgrp;
159 /* boolean value for session group leader */
162 struct tty_struct *tty; /* NULL if no tty */
164 #ifdef CONFIG_SCHED_AUTOGROUP
165 struct autogroup *autogroup;
168 * Cumulative resource counters for dead threads in the group,
169 * and for reaped dead child processes forked by this group.
170 * Live threads maintain their own counters and add to these
171 * in __exit_signal, except for the group leader.
173 seqlock_t stats_lock;
174 u64 utime, stime, cutime, cstime;
177 struct prev_cputime prev_cputime;
178 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
179 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
180 unsigned long inblock, oublock, cinblock, coublock;
181 unsigned long maxrss, cmaxrss;
182 struct task_io_accounting ioac;
185 * Cumulative ns of schedule CPU time fo dead threads in the
186 * group, not including a zombie group leader, (This only differs
187 * from jiffies_to_ns(utime + stime) if sched_clock uses something
188 * other than jiffies.)
190 unsigned long long sum_sched_runtime;
193 * We don't bother to synchronize most readers of this at all,
194 * because there is no reader checking a limit that actually needs
195 * to get both rlim_cur and rlim_max atomically, and either one
196 * alone is a single word that can safely be read normally.
197 * getrlimit/setrlimit use task_lock(current->group_leader) to
198 * protect this instead of the siglock, because they really
199 * have no need to disable irqs.
201 struct rlimit rlim[RLIM_NLIMITS];
203 #ifdef CONFIG_BSD_PROCESS_ACCT
204 struct pacct_struct pacct; /* per-process accounting information */
206 #ifdef CONFIG_TASKSTATS
207 struct taskstats *stats;
211 struct tty_audit_buf *tty_audit_buf;
215 * Thread is the potential origin of an oom condition; kill first on
218 bool oom_flag_origin;
219 short oom_score_adj; /* OOM kill score adjustment */
220 short oom_score_adj_min; /* OOM kill score adjustment min value.
221 * Only settable by CAP_SYS_RESOURCE. */
222 struct mm_struct *oom_mm; /* recorded mm when the thread group got
223 * killed by the oom killer */
225 struct mutex cred_guard_mutex; /* guard against foreign influences on
226 * credential calculations
228 * Deprecated do not use in new code.
229 * Use exec_update_lock instead.
231 struct rw_semaphore exec_update_lock; /* Held while task_struct is
232 * being updated during exec,
233 * and may have inconsistent
236 } __randomize_layout;
239 * Bits in flags field of signal_struct.
241 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
242 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
243 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
244 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
246 * Pending notifications to parent.
248 #define SIGNAL_CLD_STOPPED 0x00000010
249 #define SIGNAL_CLD_CONTINUED 0x00000020
250 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
252 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
254 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
255 SIGNAL_STOP_CONTINUED)
257 static inline void signal_set_stop_flags(struct signal_struct *sig,
260 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
261 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
264 /* If true, all threads except ->group_exit_task have pending SIGKILL */
265 static inline int signal_group_exit(const struct signal_struct *sig)
267 return (sig->flags & SIGNAL_GROUP_EXIT) ||
268 (sig->group_exit_task != NULL);
271 extern void flush_signals(struct task_struct *);
272 extern void ignore_signals(struct task_struct *);
273 extern void flush_signal_handlers(struct task_struct *, int force_default);
274 extern int dequeue_signal(struct task_struct *task,
275 sigset_t *mask, kernel_siginfo_t *info);
277 static inline int kernel_dequeue_signal(void)
279 struct task_struct *task = current;
280 kernel_siginfo_t __info;
283 spin_lock_irq(&task->sighand->siglock);
284 ret = dequeue_signal(task, &task->blocked, &__info);
285 spin_unlock_irq(&task->sighand->siglock);
290 static inline void kernel_signal_stop(void)
292 spin_lock_irq(¤t->sighand->siglock);
293 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
294 set_special_state(TASK_STOPPED);
295 spin_unlock_irq(¤t->sighand->siglock);
299 #ifdef __ARCH_SI_TRAPNO
300 # define ___ARCH_SI_TRAPNO(_a1) , _a1
302 # define ___ARCH_SI_TRAPNO(_a1)
305 # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
307 # define ___ARCH_SI_IA64(_a1, _a2, _a3)
310 int force_sig_fault_to_task(int sig, int code, void __user *addr
311 ___ARCH_SI_TRAPNO(int trapno)
312 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
313 , struct task_struct *t);
314 int force_sig_fault(int sig, int code, void __user *addr
315 ___ARCH_SI_TRAPNO(int trapno)
316 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
317 int send_sig_fault(int sig, int code, void __user *addr
318 ___ARCH_SI_TRAPNO(int trapno)
319 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
320 , struct task_struct *t);
322 int force_sig_mceerr(int code, void __user *, short);
323 int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
325 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
326 int force_sig_pkuerr(void __user *addr, u32 pkey);
328 int force_sig_ptrace_errno_trap(int errno, void __user *addr);
330 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
331 extern void force_sigsegv(int sig);
332 extern int force_sig_info(struct kernel_siginfo *);
333 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
334 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
335 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
336 const struct cred *);
337 extern int kill_pgrp(struct pid *pid, int sig, int priv);
338 extern int kill_pid(struct pid *pid, int sig, int priv);
339 extern __must_check bool do_notify_parent(struct task_struct *, int);
340 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
341 extern void force_sig(int);
342 extern int send_sig(int, struct task_struct *, int);
343 extern int zap_other_threads(struct task_struct *p);
344 extern struct sigqueue *sigqueue_alloc(void);
345 extern void sigqueue_free(struct sigqueue *);
346 extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
347 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
349 static inline int restart_syscall(void)
351 set_tsk_thread_flag(current, TIF_SIGPENDING);
352 return -ERESTARTNOINTR;
355 static inline int signal_pending(struct task_struct *p)
357 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
360 static inline int __fatal_signal_pending(struct task_struct *p)
362 return unlikely(sigismember(&p->pending.signal, SIGKILL));
365 static inline int fatal_signal_pending(struct task_struct *p)
367 return signal_pending(p) && __fatal_signal_pending(p);
370 static inline int signal_pending_state(long state, struct task_struct *p)
372 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
374 if (!signal_pending(p))
377 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
381 * Reevaluate whether the task has signals pending delivery.
382 * Wake the task if so.
383 * This is required every time the blocked sigset_t changes.
384 * callers must hold sighand->siglock.
386 extern void recalc_sigpending_and_wake(struct task_struct *t);
387 extern void recalc_sigpending(void);
388 extern void calculate_sigpending(void);
390 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
392 static inline void signal_wake_up(struct task_struct *t, bool resume)
394 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
396 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
398 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
401 void task_join_group_stop(struct task_struct *task);
403 #ifdef TIF_RESTORE_SIGMASK
405 * Legacy restore_sigmask accessors. These are inefficient on
406 * SMP architectures because they require atomic operations.
410 * set_restore_sigmask() - make sure saved_sigmask processing gets done
412 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
413 * will run before returning to user mode, to process the flag. For
414 * all callers, TIF_SIGPENDING is already set or it's no harm to set
415 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
416 * arch code will notice on return to user mode, in case those bits
417 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
418 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
420 static inline void set_restore_sigmask(void)
422 set_thread_flag(TIF_RESTORE_SIGMASK);
425 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
427 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
430 static inline void clear_restore_sigmask(void)
432 clear_thread_flag(TIF_RESTORE_SIGMASK);
434 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
436 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
438 static inline bool test_restore_sigmask(void)
440 return test_thread_flag(TIF_RESTORE_SIGMASK);
442 static inline bool test_and_clear_restore_sigmask(void)
444 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
447 #else /* TIF_RESTORE_SIGMASK */
449 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
450 static inline void set_restore_sigmask(void)
452 current->restore_sigmask = true;
454 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
456 task->restore_sigmask = false;
458 static inline void clear_restore_sigmask(void)
460 current->restore_sigmask = false;
462 static inline bool test_restore_sigmask(void)
464 return current->restore_sigmask;
466 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
468 return task->restore_sigmask;
470 static inline bool test_and_clear_restore_sigmask(void)
472 if (!current->restore_sigmask)
474 current->restore_sigmask = false;
479 static inline void restore_saved_sigmask(void)
481 if (test_and_clear_restore_sigmask())
482 __set_current_blocked(¤t->saved_sigmask);
485 extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
487 static inline void restore_saved_sigmask_unless(bool interrupted)
490 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
492 restore_saved_sigmask();
495 static inline sigset_t *sigmask_to_save(void)
497 sigset_t *res = ¤t->blocked;
498 if (unlikely(test_restore_sigmask()))
499 res = ¤t->saved_sigmask;
503 static inline int kill_cad_pid(int sig, int priv)
505 return kill_pid(cad_pid, sig, priv);
508 /* These can be the second arg to send_sig_info/send_group_sig_info. */
509 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
510 #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
512 static inline int __on_sig_stack(unsigned long sp)
514 #ifdef CONFIG_STACK_GROWSUP
515 return sp >= current->sas_ss_sp &&
516 sp - current->sas_ss_sp < current->sas_ss_size;
518 return sp > current->sas_ss_sp &&
519 sp - current->sas_ss_sp <= current->sas_ss_size;
524 * True if we are on the alternate signal stack.
526 static inline int on_sig_stack(unsigned long sp)
529 * If the signal stack is SS_AUTODISARM then, by construction, we
530 * can't be on the signal stack unless user code deliberately set
531 * SS_AUTODISARM when we were already on it.
533 * This improves reliability: if user state gets corrupted such that
534 * the stack pointer points very close to the end of the signal stack,
535 * then this check will enable the signal to be handled anyway.
537 if (current->sas_ss_flags & SS_AUTODISARM)
540 return __on_sig_stack(sp);
543 static inline int sas_ss_flags(unsigned long sp)
545 if (!current->sas_ss_size)
548 return on_sig_stack(sp) ? SS_ONSTACK : 0;
551 static inline void sas_ss_reset(struct task_struct *p)
555 p->sas_ss_flags = SS_DISABLE;
558 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
560 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
561 #ifdef CONFIG_STACK_GROWSUP
562 return current->sas_ss_sp;
564 return current->sas_ss_sp + current->sas_ss_size;
569 extern void __cleanup_sighand(struct sighand_struct *);
570 extern void flush_itimer_signals(void);
572 #define tasklist_empty() \
573 list_empty(&init_task.tasks)
575 #define next_task(p) \
576 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
578 #define for_each_process(p) \
579 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
581 extern bool current_is_single_threaded(void);
584 * Careful: do_each_thread/while_each_thread is a double loop so
585 * 'break' will not work as expected - use goto instead.
587 #define do_each_thread(g, t) \
588 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
590 #define while_each_thread(g, t) \
591 while ((t = next_thread(t)) != g)
593 #define __for_each_thread(signal, t) \
594 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
596 #define for_each_thread(p, t) \
597 __for_each_thread((p)->signal, t)
599 /* Careful: this is a double loop, 'break' won't work as expected. */
600 #define for_each_process_thread(p, t) \
601 for_each_process(p) for_each_thread(p, t)
603 typedef int (*proc_visitor)(struct task_struct *p, void *data);
604 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
607 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
610 if (type == PIDTYPE_PID)
611 pid = task_pid(task);
613 pid = task->signal->pids[type];
617 static inline struct pid *task_tgid(struct task_struct *task)
619 return task->signal->pids[PIDTYPE_TGID];
623 * Without tasklist or RCU lock it is not safe to dereference
624 * the result of task_pgrp/task_session even if task == current,
625 * we can race with another thread doing sys_setsid/sys_setpgid.
627 static inline struct pid *task_pgrp(struct task_struct *task)
629 return task->signal->pids[PIDTYPE_PGID];
632 static inline struct pid *task_session(struct task_struct *task)
634 return task->signal->pids[PIDTYPE_SID];
637 static inline int get_nr_threads(struct task_struct *task)
639 return task->signal->nr_threads;
642 static inline bool thread_group_leader(struct task_struct *p)
644 return p->exit_signal >= 0;
647 /* Do to the insanities of de_thread it is possible for a process
648 * to have the pid of the thread group leader without actually being
649 * the thread group leader. For iteration through the pids in proc
650 * all we care about is that we have a task with the appropriate
651 * pid, we don't actually care if we have the right task.
653 static inline bool has_group_leader_pid(struct task_struct *p)
655 return task_pid(p) == task_tgid(p);
659 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
661 return p1->signal == p2->signal;
664 static inline struct task_struct *next_thread(const struct task_struct *p)
666 return list_entry_rcu(p->thread_group.next,
667 struct task_struct, thread_group);
670 static inline int thread_group_empty(struct task_struct *p)
672 return list_empty(&p->thread_group);
675 #define delay_group_leader(p) \
676 (thread_group_leader(p) && !thread_group_empty(p))
678 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
679 unsigned long *flags);
681 static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
682 unsigned long *flags)
684 struct sighand_struct *ret;
686 ret = __lock_task_sighand(task, flags);
687 (void)__cond_lock(&task->sighand->siglock, ret);
691 static inline void unlock_task_sighand(struct task_struct *task,
692 unsigned long *flags)
694 spin_unlock_irqrestore(&task->sighand->siglock, *flags);
697 static inline unsigned long task_rlimit(const struct task_struct *task,
700 return READ_ONCE(task->signal->rlim[limit].rlim_cur);
703 static inline unsigned long task_rlimit_max(const struct task_struct *task,
706 return READ_ONCE(task->signal->rlim[limit].rlim_max);
709 static inline unsigned long rlimit(unsigned int limit)
711 return task_rlimit(current, limit);
714 static inline unsigned long rlimit_max(unsigned int limit)
716 return task_rlimit_max(current, limit);
719 #endif /* _LINUX_SCHED_SIGNAL_H */