2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
56 * SLAB caches for signal bits.
59 static struct kmem_cache *sigqueue_cachep;
61 int print_fatal_signals __read_mostly;
63 static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler;
68 static inline bool sig_handler_ignored(void __user *handler, int sig)
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
75 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 handler = sig_handler(t, sig);
81 /* SIGKILL and SIGSTOP may not be sent to the global init */
82 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
85 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
86 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
89 /* Only allow kernel generated signals to this kthread */
90 if (unlikely((t->flags & PF_KTHREAD) &&
91 (handler == SIG_KTHREAD_KERNEL) && !force))
94 return sig_handler_ignored(handler, sig);
97 static bool sig_ignored(struct task_struct *t, int sig, bool force)
100 * Blocked signals are never ignored, since the
101 * signal handler may change by the time it is
104 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 * Tracers may want to know about even ignored signal unless it
109 * is SIGKILL which can't be reported anyway but can be ignored
110 * by SIGNAL_UNKILLABLE task.
112 if (t->ptrace && sig != SIGKILL)
115 return sig_task_ignored(t, sig, force);
119 * Re-calculate pending state from the set of locally pending
120 * signals, globally pending signals, and blocked signals.
122 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 switch (_NSIG_WORDS) {
129 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
130 ready |= signal->sig[i] &~ blocked->sig[i];
133 case 4: ready = signal->sig[3] &~ blocked->sig[3];
134 ready |= signal->sig[2] &~ blocked->sig[2];
135 ready |= signal->sig[1] &~ blocked->sig[1];
136 ready |= signal->sig[0] &~ blocked->sig[0];
139 case 2: ready = signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
150 static bool recalc_sigpending_tsk(struct task_struct *t)
152 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
153 PENDING(&t->pending, &t->blocked) ||
154 PENDING(&t->signal->shared_pending, &t->blocked)) {
155 set_tsk_thread_flag(t, TIF_SIGPENDING);
160 * We must never clear the flag in another thread, or in current
161 * when it's possible the current syscall is returning -ERESTART*.
162 * So we don't clear it here, and only callers who know they should do.
168 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
169 * This is superfluous when called on current, the wakeup is a harmless no-op.
171 void recalc_sigpending_and_wake(struct task_struct *t)
173 if (recalc_sigpending_tsk(t))
174 signal_wake_up(t, 0);
177 void recalc_sigpending(void)
179 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
180 !klp_patch_pending(current))
181 clear_thread_flag(TIF_SIGPENDING);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
389 struct signal_struct *sig = current->signal;
391 if (sig->group_stop_count) {
392 sig->group_stop_count++;
393 mask |= JOBCTL_STOP_CONSUME;
394 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
397 /* Have the new thread join an on-going signal group stop */
398 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
402 * allocate a new signal queue record
403 * - this may be called without locks if and only if t == current, otherwise an
404 * appropriate lock must be held to stop the target task from exiting
406 static struct sigqueue *
407 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
409 struct sigqueue *q = NULL;
410 struct user_struct *user;
414 * Protect access to @t credentials. This can go away when all
415 * callers hold rcu read lock.
417 * NOTE! A pending signal will hold on to the user refcount,
418 * and we get/put the refcount only when the sigpending count
419 * changes from/to zero.
422 user = __task_cred(t)->user;
423 sigpending = atomic_inc_return(&user->sigpending);
428 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
429 q = kmem_cache_alloc(sigqueue_cachep, flags);
431 print_dropped_signal(sig);
434 if (unlikely(q == NULL)) {
435 if (atomic_dec_and_test(&user->sigpending))
438 INIT_LIST_HEAD(&q->list);
446 static void __sigqueue_free(struct sigqueue *q)
448 if (q->flags & SIGQUEUE_PREALLOC)
450 if (atomic_dec_and_test(&q->user->sigpending))
452 kmem_cache_free(sigqueue_cachep, q);
455 void flush_sigqueue(struct sigpending *queue)
459 sigemptyset(&queue->signal);
460 while (!list_empty(&queue->list)) {
461 q = list_entry(queue->list.next, struct sigqueue , list);
462 list_del_init(&q->list);
468 * Flush all pending signals for this kthread.
470 void flush_signals(struct task_struct *t)
474 spin_lock_irqsave(&t->sighand->siglock, flags);
475 clear_tsk_thread_flag(t, TIF_SIGPENDING);
476 flush_sigqueue(&t->pending);
477 flush_sigqueue(&t->signal->shared_pending);
478 spin_unlock_irqrestore(&t->sighand->siglock, flags);
481 #ifdef CONFIG_POSIX_TIMERS
482 static void __flush_itimer_signals(struct sigpending *pending)
484 sigset_t signal, retain;
485 struct sigqueue *q, *n;
487 signal = pending->signal;
488 sigemptyset(&retain);
490 list_for_each_entry_safe(q, n, &pending->list, list) {
491 int sig = q->info.si_signo;
493 if (likely(q->info.si_code != SI_TIMER)) {
494 sigaddset(&retain, sig);
496 sigdelset(&signal, sig);
497 list_del_init(&q->list);
502 sigorsets(&pending->signal, &signal, &retain);
505 void flush_itimer_signals(void)
507 struct task_struct *tsk = current;
510 spin_lock_irqsave(&tsk->sighand->siglock, flags);
511 __flush_itimer_signals(&tsk->pending);
512 __flush_itimer_signals(&tsk->signal->shared_pending);
513 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
517 void ignore_signals(struct task_struct *t)
521 for (i = 0; i < _NSIG; ++i)
522 t->sighand->action[i].sa.sa_handler = SIG_IGN;
528 * Flush all handlers for a task.
532 flush_signal_handlers(struct task_struct *t, int force_default)
535 struct k_sigaction *ka = &t->sighand->action[0];
536 for (i = _NSIG ; i != 0 ; i--) {
537 if (force_default || ka->sa.sa_handler != SIG_IGN)
538 ka->sa.sa_handler = SIG_DFL;
540 #ifdef __ARCH_HAS_SA_RESTORER
541 ka->sa.sa_restorer = NULL;
543 sigemptyset(&ka->sa.sa_mask);
548 bool unhandled_signal(struct task_struct *tsk, int sig)
550 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
551 if (is_global_init(tsk))
554 if (handler != SIG_IGN && handler != SIG_DFL)
557 /* if ptraced, let the tracer determine */
561 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
564 struct sigqueue *q, *first = NULL;
567 * Collect the siginfo appropriate to this signal. Check if
568 * there is another siginfo for the same signal.
570 list_for_each_entry(q, &list->list, list) {
571 if (q->info.si_signo == sig) {
578 sigdelset(&list->signal, sig);
582 list_del_init(&first->list);
583 copy_siginfo(info, &first->info);
586 (first->flags & SIGQUEUE_PREALLOC) &&
587 (info->si_code == SI_TIMER) &&
588 (info->si_sys_private);
590 __sigqueue_free(first);
593 * Ok, it wasn't in the queue. This must be
594 * a fast-pathed signal or we must have been
595 * out of queue space. So zero out the info.
598 info->si_signo = sig;
600 info->si_code = SI_USER;
606 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
607 siginfo_t *info, bool *resched_timer)
609 int sig = next_signal(pending, mask);
612 collect_signal(sig, pending, info, resched_timer);
617 * Dequeue a signal and return the element to the caller, which is
618 * expected to free it.
620 * All callers have to hold the siglock.
622 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
624 bool resched_timer = false;
627 /* We only dequeue private signals from ourselves, we don't let
628 * signalfd steal them
630 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
632 signr = __dequeue_signal(&tsk->signal->shared_pending,
633 mask, info, &resched_timer);
634 #ifdef CONFIG_POSIX_TIMERS
638 * itimers are process shared and we restart periodic
639 * itimers in the signal delivery path to prevent DoS
640 * attacks in the high resolution timer case. This is
641 * compliant with the old way of self-restarting
642 * itimers, as the SIGALRM is a legacy signal and only
643 * queued once. Changing the restart behaviour to
644 * restart the timer in the signal dequeue path is
645 * reducing the timer noise on heavy loaded !highres
648 if (unlikely(signr == SIGALRM)) {
649 struct hrtimer *tmr = &tsk->signal->real_timer;
651 if (!hrtimer_is_queued(tmr) &&
652 tsk->signal->it_real_incr != 0) {
653 hrtimer_forward(tmr, tmr->base->get_time(),
654 tsk->signal->it_real_incr);
655 hrtimer_restart(tmr);
665 if (unlikely(sig_kernel_stop(signr))) {
667 * Set a marker that we have dequeued a stop signal. Our
668 * caller might release the siglock and then the pending
669 * stop signal it is about to process is no longer in the
670 * pending bitmasks, but must still be cleared by a SIGCONT
671 * (and overruled by a SIGKILL). So those cases clear this
672 * shared flag after we've set it. Note that this flag may
673 * remain set after the signal we return is ignored or
674 * handled. That doesn't matter because its only purpose
675 * is to alert stop-signal processing code when another
676 * processor has come along and cleared the flag.
678 current->jobctl |= JOBCTL_STOP_DEQUEUED;
680 #ifdef CONFIG_POSIX_TIMERS
683 * Release the siglock to ensure proper locking order
684 * of timer locks outside of siglocks. Note, we leave
685 * irqs disabled here, since the posix-timers code is
686 * about to disable them again anyway.
688 spin_unlock(&tsk->sighand->siglock);
689 posixtimer_rearm(info);
690 spin_lock(&tsk->sighand->siglock);
692 /* Don't expose the si_sys_private value to userspace */
693 info->si_sys_private = 0;
699 static int dequeue_synchronous_signal(siginfo_t *info)
701 struct task_struct *tsk = current;
702 struct sigpending *pending = &tsk->pending;
703 struct sigqueue *q, *sync = NULL;
706 * Might a synchronous signal be in the queue?
708 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
712 * Return the first synchronous signal in the queue.
714 list_for_each_entry(q, &pending->list, list) {
715 /* Synchronous signals have a postive si_code */
716 if ((q->info.si_code > SI_USER) &&
717 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 * Check if there is another siginfo for the same signal.
727 list_for_each_entry_continue(q, &pending->list, list) {
728 if (q->info.si_signo == sync->info.si_signo)
732 sigdelset(&pending->signal, sync->info.si_signo);
735 list_del_init(&sync->list);
736 copy_siginfo(info, &sync->info);
737 __sigqueue_free(sync);
738 return info->si_signo;
742 * Tell a process that it has a new active signal..
744 * NOTE! we rely on the previous spin_lock to
745 * lock interrupts for us! We can only be called with
746 * "siglock" held, and the local interrupt must
747 * have been disabled when that got acquired!
749 * No need to set need_resched since signal event passing
750 * goes through ->blocked
752 void signal_wake_up_state(struct task_struct *t, unsigned int state)
754 set_tsk_thread_flag(t, TIF_SIGPENDING);
756 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
757 * case. We don't check t->state here because there is a race with it
758 * executing another processor and just now entering stopped state.
759 * By using wake_up_state, we ensure the process will wake up and
760 * handle its death signal.
762 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
767 * Remove signals in mask from the pending set and queue.
768 * Returns 1 if any signals were found.
770 * All callers must be holding the siglock.
772 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
774 struct sigqueue *q, *n;
777 sigandsets(&m, mask, &s->signal);
778 if (sigisemptyset(&m))
781 sigandnsets(&s->signal, &s->signal, mask);
782 list_for_each_entry_safe(q, n, &s->list, list) {
783 if (sigismember(mask, q->info.si_signo)) {
784 list_del_init(&q->list);
790 static inline int is_si_special(const struct siginfo *info)
792 return info <= SEND_SIG_FORCED;
795 static inline bool si_fromuser(const struct siginfo *info)
797 return info == SEND_SIG_NOINFO ||
798 (!is_si_special(info) && SI_FROMUSER(info));
802 * called with RCU read lock from check_kill_permission()
804 static bool kill_ok_by_cred(struct task_struct *t)
806 const struct cred *cred = current_cred();
807 const struct cred *tcred = __task_cred(t);
809 return uid_eq(cred->euid, tcred->suid) ||
810 uid_eq(cred->euid, tcred->uid) ||
811 uid_eq(cred->uid, tcred->suid) ||
812 uid_eq(cred->uid, tcred->uid) ||
813 ns_capable(tcred->user_ns, CAP_KILL);
817 * Bad permissions for sending the signal
818 * - the caller must hold the RCU read lock
820 static int check_kill_permission(int sig, struct siginfo *info,
821 struct task_struct *t)
826 if (!valid_signal(sig))
829 if (!si_fromuser(info))
832 error = audit_signal_info(sig, t); /* Let audit system see the signal */
836 if (!same_thread_group(current, t) &&
837 !kill_ok_by_cred(t)) {
840 sid = task_session(t);
842 * We don't return the error if sid == NULL. The
843 * task was unhashed, the caller must notice this.
845 if (!sid || sid == task_session(current))
852 return security_task_kill(t, info, sig, NULL);
856 * ptrace_trap_notify - schedule trap to notify ptracer
857 * @t: tracee wanting to notify tracer
859 * This function schedules sticky ptrace trap which is cleared on the next
860 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
863 * If @t is running, STOP trap will be taken. If trapped for STOP and
864 * ptracer is listening for events, tracee is woken up so that it can
865 * re-trap for the new event. If trapped otherwise, STOP trap will be
866 * eventually taken without returning to userland after the existing traps
867 * are finished by PTRACE_CONT.
870 * Must be called with @task->sighand->siglock held.
872 static void ptrace_trap_notify(struct task_struct *t)
874 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
875 assert_spin_locked(&t->sighand->siglock);
877 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
878 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
882 * Handle magic process-wide effects of stop/continue signals. Unlike
883 * the signal actions, these happen immediately at signal-generation
884 * time regardless of blocking, ignoring, or handling. This does the
885 * actual continuing for SIGCONT, but not the actual stopping for stop
886 * signals. The process stop is done as a signal action for SIG_DFL.
888 * Returns true if the signal should be actually delivered, otherwise
889 * it should be dropped.
891 static bool prepare_signal(int sig, struct task_struct *p, bool force)
893 struct signal_struct *signal = p->signal;
894 struct task_struct *t;
897 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
898 if (!(signal->flags & SIGNAL_GROUP_EXIT))
899 return sig == SIGKILL;
901 * The process is in the middle of dying, nothing to do.
903 } else if (sig_kernel_stop(sig)) {
905 * This is a stop signal. Remove SIGCONT from all queues.
907 siginitset(&flush, sigmask(SIGCONT));
908 flush_sigqueue_mask(&flush, &signal->shared_pending);
909 for_each_thread(p, t)
910 flush_sigqueue_mask(&flush, &t->pending);
911 } else if (sig == SIGCONT) {
914 * Remove all stop signals from all queues, wake all threads.
916 siginitset(&flush, SIG_KERNEL_STOP_MASK);
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t) {
919 flush_sigqueue_mask(&flush, &t->pending);
920 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
921 if (likely(!(t->ptrace & PT_SEIZED)))
922 wake_up_state(t, __TASK_STOPPED);
924 ptrace_trap_notify(t);
928 * Notify the parent with CLD_CONTINUED if we were stopped.
930 * If we were in the middle of a group stop, we pretend it
931 * was already finished, and then continued. Since SIGCHLD
932 * doesn't queue we report only CLD_STOPPED, as if the next
933 * CLD_CONTINUED was dropped.
936 if (signal->flags & SIGNAL_STOP_STOPPED)
937 why |= SIGNAL_CLD_CONTINUED;
938 else if (signal->group_stop_count)
939 why |= SIGNAL_CLD_STOPPED;
943 * The first thread which returns from do_signal_stop()
944 * will take ->siglock, notice SIGNAL_CLD_MASK, and
945 * notify its parent. See get_signal_to_deliver().
947 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
948 signal->group_stop_count = 0;
949 signal->group_exit_code = 0;
953 return !sig_ignored(p, sig, force);
957 * Test if P wants to take SIG. After we've checked all threads with this,
958 * it's equivalent to finding no threads not blocking SIG. Any threads not
959 * blocking SIG were ruled out because they are not running and already
960 * have pending signals. Such threads will dequeue from the shared queue
961 * as soon as they're available, so putting the signal on the shared queue
962 * will be equivalent to sending it to one such thread.
964 static inline bool wants_signal(int sig, struct task_struct *p)
966 if (sigismember(&p->blocked, sig))
969 if (p->flags & PF_EXITING)
975 if (task_is_stopped_or_traced(p))
978 return task_curr(p) || !signal_pending(p);
981 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
983 struct signal_struct *signal = p->signal;
984 struct task_struct *t;
987 * Now find a thread we can wake up to take the signal off the queue.
989 * If the main thread wants the signal, it gets first crack.
990 * Probably the least surprising to the average bear.
992 if (wants_signal(sig, p))
994 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
996 * There is just one thread and it does not need to be woken.
997 * It will dequeue unblocked signals before it runs again.
1002 * Otherwise try to find a suitable thread.
1004 t = signal->curr_target;
1005 while (!wants_signal(sig, t)) {
1007 if (t == signal->curr_target)
1009 * No thread needs to be woken.
1010 * Any eligible threads will see
1011 * the signal in the queue soon.
1015 signal->curr_target = t;
1019 * Found a killable thread. If the signal will be fatal,
1020 * then start taking the whole group down immediately.
1022 if (sig_fatal(p, sig) &&
1023 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1024 !sigismember(&t->real_blocked, sig) &&
1025 (sig == SIGKILL || !p->ptrace)) {
1027 * This signal will be fatal to the whole group.
1029 if (!sig_kernel_coredump(sig)) {
1031 * Start a group exit and wake everybody up.
1032 * This way we don't have other threads
1033 * running and doing things after a slower
1034 * thread has the fatal signal pending.
1036 signal->flags = SIGNAL_GROUP_EXIT;
1037 signal->group_exit_code = sig;
1038 signal->group_stop_count = 0;
1041 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1042 sigaddset(&t->pending.signal, SIGKILL);
1043 signal_wake_up(t, 1);
1044 } while_each_thread(p, t);
1050 * The signal is already in the shared-pending queue.
1051 * Tell the chosen thread to wake up and dequeue it.
1053 signal_wake_up(t, sig == SIGKILL);
1057 static inline bool legacy_queue(struct sigpending *signals, int sig)
1059 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1062 #ifdef CONFIG_USER_NS
1063 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1065 if (current_user_ns() == task_cred_xxx(t, user_ns))
1068 if (SI_FROMKERNEL(info))
1072 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1073 make_kuid(current_user_ns(), info->si_uid));
1077 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1083 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1084 enum pid_type type, int from_ancestor_ns)
1086 struct sigpending *pending;
1088 int override_rlimit;
1089 int ret = 0, result;
1091 assert_spin_locked(&t->sighand->siglock);
1093 result = TRACE_SIGNAL_IGNORED;
1094 if (!prepare_signal(sig, t,
1095 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1098 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1100 * Short-circuit ignored signals and support queuing
1101 * exactly one non-rt signal, so that we can get more
1102 * detailed information about the cause of the signal.
1104 result = TRACE_SIGNAL_ALREADY_PENDING;
1105 if (legacy_queue(pending, sig))
1108 result = TRACE_SIGNAL_DELIVERED;
1110 * fast-pathed signals for kernel-internal things like SIGSTOP
1113 if (info == SEND_SIG_FORCED)
1117 * Real-time signals must be queued if sent by sigqueue, or
1118 * some other real-time mechanism. It is implementation
1119 * defined whether kill() does so. We attempt to do so, on
1120 * the principle of least surprise, but since kill is not
1121 * allowed to fail with EAGAIN when low on memory we just
1122 * make sure at least one signal gets delivered and don't
1123 * pass on the info struct.
1126 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1128 override_rlimit = 0;
1130 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1132 list_add_tail(&q->list, &pending->list);
1133 switch ((unsigned long) info) {
1134 case (unsigned long) SEND_SIG_NOINFO:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_USER;
1139 q->info.si_pid = task_tgid_nr_ns(current,
1140 task_active_pid_ns(t));
1141 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1143 case (unsigned long) SEND_SIG_PRIV:
1144 clear_siginfo(&q->info);
1145 q->info.si_signo = sig;
1146 q->info.si_errno = 0;
1147 q->info.si_code = SI_KERNEL;
1152 copy_siginfo(&q->info, info);
1153 if (from_ancestor_ns)
1158 userns_fixup_signal_uid(&q->info, t);
1160 } else if (!is_si_special(info)) {
1161 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1163 * Queue overflow, abort. We may abort if the
1164 * signal was rt and sent by user using something
1165 * other than kill().
1167 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1172 * This is a silent loss of information. We still
1173 * send the signal, but the *info bits are lost.
1175 result = TRACE_SIGNAL_LOSE_INFO;
1180 signalfd_notify(t, sig);
1181 sigaddset(&pending->signal, sig);
1183 /* Let multiprocess signals appear after on-going forks */
1184 if (type > PIDTYPE_TGID) {
1185 struct multiprocess_signals *delayed;
1186 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1187 sigset_t *signal = &delayed->signal;
1188 /* Can't queue both a stop and a continue signal */
1190 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1191 else if (sig_kernel_stop(sig))
1192 sigdelset(signal, SIGCONT);
1193 sigaddset(signal, sig);
1197 complete_signal(sig, t, type);
1199 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1203 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1206 int from_ancestor_ns = 0;
1208 #ifdef CONFIG_PID_NS
1209 from_ancestor_ns = si_fromuser(info) &&
1210 !task_pid_nr_ns(current, task_active_pid_ns(t));
1213 return __send_signal(sig, info, t, type, from_ancestor_ns);
1216 static void print_fatal_signal(int signr)
1218 struct pt_regs *regs = signal_pt_regs();
1219 pr_info("potentially unexpected fatal signal %d.\n", signr);
1221 #if defined(__i386__) && !defined(__arch_um__)
1222 pr_info("code at %08lx: ", regs->ip);
1225 for (i = 0; i < 16; i++) {
1228 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1230 pr_cont("%02x ", insn);
1240 static int __init setup_print_fatal_signals(char *str)
1242 get_option (&str, &print_fatal_signals);
1247 __setup("print-fatal-signals=", setup_print_fatal_signals);
1250 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1252 return send_signal(sig, info, p, PIDTYPE_TGID);
1256 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1258 return send_signal(sig, info, t, PIDTYPE_PID);
1261 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1264 unsigned long flags;
1267 if (lock_task_sighand(p, &flags)) {
1268 ret = send_signal(sig, info, p, type);
1269 unlock_task_sighand(p, &flags);
1276 * Force a signal that the process can't ignore: if necessary
1277 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1279 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1280 * since we do not want to have a signal handler that was blocked
1281 * be invoked when user space had explicitly blocked it.
1283 * We don't want to have recursive SIGSEGV's etc, for example,
1284 * that is why we also clear SIGNAL_UNKILLABLE.
1287 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1289 unsigned long int flags;
1290 int ret, blocked, ignored;
1291 struct k_sigaction *action;
1293 spin_lock_irqsave(&t->sighand->siglock, flags);
1294 action = &t->sighand->action[sig-1];
1295 ignored = action->sa.sa_handler == SIG_IGN;
1296 blocked = sigismember(&t->blocked, sig);
1297 if (blocked || ignored) {
1298 action->sa.sa_handler = SIG_DFL;
1300 sigdelset(&t->blocked, sig);
1301 recalc_sigpending_and_wake(t);
1305 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1306 * debugging to leave init killable.
1308 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1309 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1310 ret = specific_send_sig_info(sig, info, t);
1311 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1317 * Nuke all other threads in the group.
1319 int zap_other_threads(struct task_struct *p)
1321 struct task_struct *t = p;
1324 p->signal->group_stop_count = 0;
1326 while_each_thread(p, t) {
1327 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1330 /* Don't bother with already dead threads */
1333 sigaddset(&t->pending.signal, SIGKILL);
1334 signal_wake_up(t, 1);
1340 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1341 unsigned long *flags)
1343 struct sighand_struct *sighand;
1347 sighand = rcu_dereference(tsk->sighand);
1348 if (unlikely(sighand == NULL))
1352 * This sighand can be already freed and even reused, but
1353 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1354 * initializes ->siglock: this slab can't go away, it has
1355 * the same object type, ->siglock can't be reinitialized.
1357 * We need to ensure that tsk->sighand is still the same
1358 * after we take the lock, we can race with de_thread() or
1359 * __exit_signal(). In the latter case the next iteration
1360 * must see ->sighand == NULL.
1362 spin_lock_irqsave(&sighand->siglock, *flags);
1363 if (likely(sighand == tsk->sighand))
1365 spin_unlock_irqrestore(&sighand->siglock, *flags);
1373 * send signal info to all the members of a group
1375 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1381 ret = check_kill_permission(sig, info, p);
1385 ret = do_send_sig_info(sig, info, p, type);
1391 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1392 * control characters do (^C, ^Z etc)
1393 * - the caller must hold at least a readlock on tasklist_lock
1395 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1397 struct task_struct *p = NULL;
1398 int retval, success;
1402 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1403 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1406 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1407 return success ? 0 : retval;
1410 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1413 struct task_struct *p;
1417 p = pid_task(pid, PIDTYPE_PID);
1419 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1421 if (likely(!p || error != -ESRCH))
1425 * The task was unhashed in between, try again. If it
1426 * is dead, pid_task() will return NULL, if we race with
1427 * de_thread() it will find the new leader.
1432 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1436 error = kill_pid_info(sig, info, find_vpid(pid));
1441 static inline bool kill_as_cred_perm(const struct cred *cred,
1442 struct task_struct *target)
1444 const struct cred *pcred = __task_cred(target);
1446 return uid_eq(cred->euid, pcred->suid) ||
1447 uid_eq(cred->euid, pcred->uid) ||
1448 uid_eq(cred->uid, pcred->suid) ||
1449 uid_eq(cred->uid, pcred->uid);
1452 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1453 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1454 const struct cred *cred)
1457 struct task_struct *p;
1458 unsigned long flags;
1460 if (!valid_signal(sig))
1464 p = pid_task(pid, PIDTYPE_PID);
1469 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1473 ret = security_task_kill(p, info, sig, cred);
1478 if (lock_task_sighand(p, &flags)) {
1479 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1480 unlock_task_sighand(p, &flags);
1488 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1491 * kill_something_info() interprets pid in interesting ways just like kill(2).
1493 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1494 * is probably wrong. Should make it like BSD or SYSV.
1497 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1503 ret = kill_pid_info(sig, info, find_vpid(pid));
1508 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1512 read_lock(&tasklist_lock);
1514 ret = __kill_pgrp_info(sig, info,
1515 pid ? find_vpid(-pid) : task_pgrp(current));
1517 int retval = 0, count = 0;
1518 struct task_struct * p;
1520 for_each_process(p) {
1521 if (task_pid_vnr(p) > 1 &&
1522 !same_thread_group(p, current)) {
1523 int err = group_send_sig_info(sig, info, p,
1530 ret = count ? retval : -ESRCH;
1532 read_unlock(&tasklist_lock);
1538 * These are for backward compatibility with the rest of the kernel source.
1541 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1544 * Make sure legacy kernel users don't send in bad values
1545 * (normal paths check this in check_kill_permission).
1547 if (!valid_signal(sig))
1550 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1553 #define __si_special(priv) \
1554 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1557 send_sig(int sig, struct task_struct *p, int priv)
1559 return send_sig_info(sig, __si_special(priv), p);
1562 void force_sig(int sig, struct task_struct *p)
1564 force_sig_info(sig, SEND_SIG_PRIV, p);
1568 * When things go south during signal handling, we
1569 * will force a SIGSEGV. And if the signal that caused
1570 * the problem was already a SIGSEGV, we'll want to
1571 * make sure we don't even try to deliver the signal..
1573 void force_sigsegv(int sig, struct task_struct *p)
1575 if (sig == SIGSEGV) {
1576 unsigned long flags;
1577 spin_lock_irqsave(&p->sighand->siglock, flags);
1578 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1579 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1581 force_sig(SIGSEGV, p);
1584 int force_sig_fault(int sig, int code, void __user *addr
1585 ___ARCH_SI_TRAPNO(int trapno)
1586 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1587 , struct task_struct *t)
1589 struct siginfo info;
1591 clear_siginfo(&info);
1592 info.si_signo = sig;
1594 info.si_code = code;
1595 info.si_addr = addr;
1596 #ifdef __ARCH_SI_TRAPNO
1597 info.si_trapno = trapno;
1601 info.si_flags = flags;
1604 return force_sig_info(info.si_signo, &info, t);
1607 int send_sig_fault(int sig, int code, void __user *addr
1608 ___ARCH_SI_TRAPNO(int trapno)
1609 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1610 , struct task_struct *t)
1612 struct siginfo info;
1614 clear_siginfo(&info);
1615 info.si_signo = sig;
1617 info.si_code = code;
1618 info.si_addr = addr;
1619 #ifdef __ARCH_SI_TRAPNO
1620 info.si_trapno = trapno;
1624 info.si_flags = flags;
1627 return send_sig_info(info.si_signo, &info, t);
1630 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1632 struct siginfo info;
1634 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1635 clear_siginfo(&info);
1636 info.si_signo = SIGBUS;
1638 info.si_code = code;
1639 info.si_addr = addr;
1640 info.si_addr_lsb = lsb;
1641 return force_sig_info(info.si_signo, &info, t);
1644 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1646 struct siginfo info;
1648 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1649 clear_siginfo(&info);
1650 info.si_signo = SIGBUS;
1652 info.si_code = code;
1653 info.si_addr = addr;
1654 info.si_addr_lsb = lsb;
1655 return send_sig_info(info.si_signo, &info, t);
1657 EXPORT_SYMBOL(send_sig_mceerr);
1659 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1661 struct siginfo info;
1663 clear_siginfo(&info);
1664 info.si_signo = SIGSEGV;
1666 info.si_code = SEGV_BNDERR;
1667 info.si_addr = addr;
1668 info.si_lower = lower;
1669 info.si_upper = upper;
1670 return force_sig_info(info.si_signo, &info, current);
1674 int force_sig_pkuerr(void __user *addr, u32 pkey)
1676 struct siginfo info;
1678 clear_siginfo(&info);
1679 info.si_signo = SIGSEGV;
1681 info.si_code = SEGV_PKUERR;
1682 info.si_addr = addr;
1683 info.si_pkey = pkey;
1684 return force_sig_info(info.si_signo, &info, current);
1688 /* For the crazy architectures that include trap information in
1689 * the errno field, instead of an actual errno value.
1691 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1693 struct siginfo info;
1695 clear_siginfo(&info);
1696 info.si_signo = SIGTRAP;
1697 info.si_errno = errno;
1698 info.si_code = TRAP_HWBKPT;
1699 info.si_addr = addr;
1700 return force_sig_info(info.si_signo, &info, current);
1703 int kill_pgrp(struct pid *pid, int sig, int priv)
1707 read_lock(&tasklist_lock);
1708 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1709 read_unlock(&tasklist_lock);
1713 EXPORT_SYMBOL(kill_pgrp);
1715 int kill_pid(struct pid *pid, int sig, int priv)
1717 return kill_pid_info(sig, __si_special(priv), pid);
1719 EXPORT_SYMBOL(kill_pid);
1722 * These functions support sending signals using preallocated sigqueue
1723 * structures. This is needed "because realtime applications cannot
1724 * afford to lose notifications of asynchronous events, like timer
1725 * expirations or I/O completions". In the case of POSIX Timers
1726 * we allocate the sigqueue structure from the timer_create. If this
1727 * allocation fails we are able to report the failure to the application
1728 * with an EAGAIN error.
1730 struct sigqueue *sigqueue_alloc(void)
1732 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1735 q->flags |= SIGQUEUE_PREALLOC;
1740 void sigqueue_free(struct sigqueue *q)
1742 unsigned long flags;
1743 spinlock_t *lock = ¤t->sighand->siglock;
1745 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1747 * We must hold ->siglock while testing q->list
1748 * to serialize with collect_signal() or with
1749 * __exit_signal()->flush_sigqueue().
1751 spin_lock_irqsave(lock, flags);
1752 q->flags &= ~SIGQUEUE_PREALLOC;
1754 * If it is queued it will be freed when dequeued,
1755 * like the "regular" sigqueue.
1757 if (!list_empty(&q->list))
1759 spin_unlock_irqrestore(lock, flags);
1765 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1767 int sig = q->info.si_signo;
1768 struct sigpending *pending;
1769 struct task_struct *t;
1770 unsigned long flags;
1773 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1777 t = pid_task(pid, type);
1778 if (!t || !likely(lock_task_sighand(t, &flags)))
1781 ret = 1; /* the signal is ignored */
1782 result = TRACE_SIGNAL_IGNORED;
1783 if (!prepare_signal(sig, t, false))
1787 if (unlikely(!list_empty(&q->list))) {
1789 * If an SI_TIMER entry is already queue just increment
1790 * the overrun count.
1792 BUG_ON(q->info.si_code != SI_TIMER);
1793 q->info.si_overrun++;
1794 result = TRACE_SIGNAL_ALREADY_PENDING;
1797 q->info.si_overrun = 0;
1799 signalfd_notify(t, sig);
1800 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1801 list_add_tail(&q->list, &pending->list);
1802 sigaddset(&pending->signal, sig);
1803 complete_signal(sig, t, type);
1804 result = TRACE_SIGNAL_DELIVERED;
1806 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1807 unlock_task_sighand(t, &flags);
1814 * Let a parent know about the death of a child.
1815 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1817 * Returns true if our parent ignored us and so we've switched to
1820 bool do_notify_parent(struct task_struct *tsk, int sig)
1822 struct siginfo info;
1823 unsigned long flags;
1824 struct sighand_struct *psig;
1825 bool autoreap = false;
1830 /* do_notify_parent_cldstop should have been called instead. */
1831 BUG_ON(task_is_stopped_or_traced(tsk));
1833 BUG_ON(!tsk->ptrace &&
1834 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1836 if (sig != SIGCHLD) {
1838 * This is only possible if parent == real_parent.
1839 * Check if it has changed security domain.
1841 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1845 clear_siginfo(&info);
1846 info.si_signo = sig;
1849 * We are under tasklist_lock here so our parent is tied to
1850 * us and cannot change.
1852 * task_active_pid_ns will always return the same pid namespace
1853 * until a task passes through release_task.
1855 * write_lock() currently calls preempt_disable() which is the
1856 * same as rcu_read_lock(), but according to Oleg, this is not
1857 * correct to rely on this
1860 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1861 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1865 task_cputime(tsk, &utime, &stime);
1866 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1867 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1869 info.si_status = tsk->exit_code & 0x7f;
1870 if (tsk->exit_code & 0x80)
1871 info.si_code = CLD_DUMPED;
1872 else if (tsk->exit_code & 0x7f)
1873 info.si_code = CLD_KILLED;
1875 info.si_code = CLD_EXITED;
1876 info.si_status = tsk->exit_code >> 8;
1879 psig = tsk->parent->sighand;
1880 spin_lock_irqsave(&psig->siglock, flags);
1881 if (!tsk->ptrace && sig == SIGCHLD &&
1882 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1883 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1885 * We are exiting and our parent doesn't care. POSIX.1
1886 * defines special semantics for setting SIGCHLD to SIG_IGN
1887 * or setting the SA_NOCLDWAIT flag: we should be reaped
1888 * automatically and not left for our parent's wait4 call.
1889 * Rather than having the parent do it as a magic kind of
1890 * signal handler, we just set this to tell do_exit that we
1891 * can be cleaned up without becoming a zombie. Note that
1892 * we still call __wake_up_parent in this case, because a
1893 * blocked sys_wait4 might now return -ECHILD.
1895 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1896 * is implementation-defined: we do (if you don't want
1897 * it, just use SIG_IGN instead).
1900 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1903 if (valid_signal(sig) && sig)
1904 __group_send_sig_info(sig, &info, tsk->parent);
1905 __wake_up_parent(tsk, tsk->parent);
1906 spin_unlock_irqrestore(&psig->siglock, flags);
1912 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1913 * @tsk: task reporting the state change
1914 * @for_ptracer: the notification is for ptracer
1915 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1917 * Notify @tsk's parent that the stopped/continued state has changed. If
1918 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1919 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1922 * Must be called with tasklist_lock at least read locked.
1924 static void do_notify_parent_cldstop(struct task_struct *tsk,
1925 bool for_ptracer, int why)
1927 struct siginfo info;
1928 unsigned long flags;
1929 struct task_struct *parent;
1930 struct sighand_struct *sighand;
1934 parent = tsk->parent;
1936 tsk = tsk->group_leader;
1937 parent = tsk->real_parent;
1940 clear_siginfo(&info);
1941 info.si_signo = SIGCHLD;
1944 * see comment in do_notify_parent() about the following 4 lines
1947 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1948 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1951 task_cputime(tsk, &utime, &stime);
1952 info.si_utime = nsec_to_clock_t(utime);
1953 info.si_stime = nsec_to_clock_t(stime);
1958 info.si_status = SIGCONT;
1961 info.si_status = tsk->signal->group_exit_code & 0x7f;
1964 info.si_status = tsk->exit_code & 0x7f;
1970 sighand = parent->sighand;
1971 spin_lock_irqsave(&sighand->siglock, flags);
1972 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1973 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1974 __group_send_sig_info(SIGCHLD, &info, parent);
1976 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1978 __wake_up_parent(tsk, parent);
1979 spin_unlock_irqrestore(&sighand->siglock, flags);
1982 static inline bool may_ptrace_stop(void)
1984 if (!likely(current->ptrace))
1987 * Are we in the middle of do_coredump?
1988 * If so and our tracer is also part of the coredump stopping
1989 * is a deadlock situation, and pointless because our tracer
1990 * is dead so don't allow us to stop.
1991 * If SIGKILL was already sent before the caller unlocked
1992 * ->siglock we must see ->core_state != NULL. Otherwise it
1993 * is safe to enter schedule().
1995 * This is almost outdated, a task with the pending SIGKILL can't
1996 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1997 * after SIGKILL was already dequeued.
1999 if (unlikely(current->mm->core_state) &&
2000 unlikely(current->mm == current->parent->mm))
2007 * Return non-zero if there is a SIGKILL that should be waking us up.
2008 * Called with the siglock held.
2010 static bool sigkill_pending(struct task_struct *tsk)
2012 return sigismember(&tsk->pending.signal, SIGKILL) ||
2013 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2017 * This must be called with current->sighand->siglock held.
2019 * This should be the path for all ptrace stops.
2020 * We always set current->last_siginfo while stopped here.
2021 * That makes it a way to test a stopped process for
2022 * being ptrace-stopped vs being job-control-stopped.
2024 * If we actually decide not to stop at all because the tracer
2025 * is gone, we keep current->exit_code unless clear_code.
2027 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2028 __releases(¤t->sighand->siglock)
2029 __acquires(¤t->sighand->siglock)
2031 bool gstop_done = false;
2033 if (arch_ptrace_stop_needed(exit_code, info)) {
2035 * The arch code has something special to do before a
2036 * ptrace stop. This is allowed to block, e.g. for faults
2037 * on user stack pages. We can't keep the siglock while
2038 * calling arch_ptrace_stop, so we must release it now.
2039 * To preserve proper semantics, we must do this before
2040 * any signal bookkeeping like checking group_stop_count.
2041 * Meanwhile, a SIGKILL could come in before we retake the
2042 * siglock. That must prevent us from sleeping in TASK_TRACED.
2043 * So after regaining the lock, we must check for SIGKILL.
2045 spin_unlock_irq(¤t->sighand->siglock);
2046 arch_ptrace_stop(exit_code, info);
2047 spin_lock_irq(¤t->sighand->siglock);
2048 if (sigkill_pending(current))
2052 set_special_state(TASK_TRACED);
2055 * We're committing to trapping. TRACED should be visible before
2056 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2057 * Also, transition to TRACED and updates to ->jobctl should be
2058 * atomic with respect to siglock and should be done after the arch
2059 * hook as siglock is released and regrabbed across it.
2064 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2066 * set_current_state() smp_wmb();
2068 * wait_task_stopped()
2069 * task_stopped_code()
2070 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2074 current->last_siginfo = info;
2075 current->exit_code = exit_code;
2078 * If @why is CLD_STOPPED, we're trapping to participate in a group
2079 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2080 * across siglock relocks since INTERRUPT was scheduled, PENDING
2081 * could be clear now. We act as if SIGCONT is received after
2082 * TASK_TRACED is entered - ignore it.
2084 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2085 gstop_done = task_participate_group_stop(current);
2087 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2088 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2089 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2090 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2092 /* entering a trap, clear TRAPPING */
2093 task_clear_jobctl_trapping(current);
2095 spin_unlock_irq(¤t->sighand->siglock);
2096 read_lock(&tasklist_lock);
2097 if (may_ptrace_stop()) {
2099 * Notify parents of the stop.
2101 * While ptraced, there are two parents - the ptracer and
2102 * the real_parent of the group_leader. The ptracer should
2103 * know about every stop while the real parent is only
2104 * interested in the completion of group stop. The states
2105 * for the two don't interact with each other. Notify
2106 * separately unless they're gonna be duplicates.
2108 do_notify_parent_cldstop(current, true, why);
2109 if (gstop_done && ptrace_reparented(current))
2110 do_notify_parent_cldstop(current, false, why);
2113 * Don't want to allow preemption here, because
2114 * sys_ptrace() needs this task to be inactive.
2116 * XXX: implement read_unlock_no_resched().
2119 read_unlock(&tasklist_lock);
2120 preempt_enable_no_resched();
2121 freezable_schedule();
2124 * By the time we got the lock, our tracer went away.
2125 * Don't drop the lock yet, another tracer may come.
2127 * If @gstop_done, the ptracer went away between group stop
2128 * completion and here. During detach, it would have set
2129 * JOBCTL_STOP_PENDING on us and we'll re-enter
2130 * TASK_STOPPED in do_signal_stop() on return, so notifying
2131 * the real parent of the group stop completion is enough.
2134 do_notify_parent_cldstop(current, false, why);
2136 /* tasklist protects us from ptrace_freeze_traced() */
2137 __set_current_state(TASK_RUNNING);
2139 current->exit_code = 0;
2140 read_unlock(&tasklist_lock);
2144 * We are back. Now reacquire the siglock before touching
2145 * last_siginfo, so that we are sure to have synchronized with
2146 * any signal-sending on another CPU that wants to examine it.
2148 spin_lock_irq(¤t->sighand->siglock);
2149 current->last_siginfo = NULL;
2151 /* LISTENING can be set only during STOP traps, clear it */
2152 current->jobctl &= ~JOBCTL_LISTENING;
2155 * Queued signals ignored us while we were stopped for tracing.
2156 * So check for any that we should take before resuming user mode.
2157 * This sets TIF_SIGPENDING, but never clears it.
2159 recalc_sigpending_tsk(current);
2162 static void ptrace_do_notify(int signr, int exit_code, int why)
2166 clear_siginfo(&info);
2167 info.si_signo = signr;
2168 info.si_code = exit_code;
2169 info.si_pid = task_pid_vnr(current);
2170 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2172 /* Let the debugger run. */
2173 ptrace_stop(exit_code, why, 1, &info);
2176 void ptrace_notify(int exit_code)
2178 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2179 if (unlikely(current->task_works))
2182 spin_lock_irq(¤t->sighand->siglock);
2183 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2184 spin_unlock_irq(¤t->sighand->siglock);
2188 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2189 * @signr: signr causing group stop if initiating
2191 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2192 * and participate in it. If already set, participate in the existing
2193 * group stop. If participated in a group stop (and thus slept), %true is
2194 * returned with siglock released.
2196 * If ptraced, this function doesn't handle stop itself. Instead,
2197 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2198 * untouched. The caller must ensure that INTERRUPT trap handling takes
2199 * places afterwards.
2202 * Must be called with @current->sighand->siglock held, which is released
2206 * %false if group stop is already cancelled or ptrace trap is scheduled.
2207 * %true if participated in group stop.
2209 static bool do_signal_stop(int signr)
2210 __releases(¤t->sighand->siglock)
2212 struct signal_struct *sig = current->signal;
2214 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2215 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2216 struct task_struct *t;
2218 /* signr will be recorded in task->jobctl for retries */
2219 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2221 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2222 unlikely(signal_group_exit(sig)))
2225 * There is no group stop already in progress. We must
2228 * While ptraced, a task may be resumed while group stop is
2229 * still in effect and then receive a stop signal and
2230 * initiate another group stop. This deviates from the
2231 * usual behavior as two consecutive stop signals can't
2232 * cause two group stops when !ptraced. That is why we
2233 * also check !task_is_stopped(t) below.
2235 * The condition can be distinguished by testing whether
2236 * SIGNAL_STOP_STOPPED is already set. Don't generate
2237 * group_exit_code in such case.
2239 * This is not necessary for SIGNAL_STOP_CONTINUED because
2240 * an intervening stop signal is required to cause two
2241 * continued events regardless of ptrace.
2243 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2244 sig->group_exit_code = signr;
2246 sig->group_stop_count = 0;
2248 if (task_set_jobctl_pending(current, signr | gstop))
2249 sig->group_stop_count++;
2252 while_each_thread(current, t) {
2254 * Setting state to TASK_STOPPED for a group
2255 * stop is always done with the siglock held,
2256 * so this check has no races.
2258 if (!task_is_stopped(t) &&
2259 task_set_jobctl_pending(t, signr | gstop)) {
2260 sig->group_stop_count++;
2261 if (likely(!(t->ptrace & PT_SEIZED)))
2262 signal_wake_up(t, 0);
2264 ptrace_trap_notify(t);
2269 if (likely(!current->ptrace)) {
2273 * If there are no other threads in the group, or if there
2274 * is a group stop in progress and we are the last to stop,
2275 * report to the parent.
2277 if (task_participate_group_stop(current))
2278 notify = CLD_STOPPED;
2280 set_special_state(TASK_STOPPED);
2281 spin_unlock_irq(¤t->sighand->siglock);
2284 * Notify the parent of the group stop completion. Because
2285 * we're not holding either the siglock or tasklist_lock
2286 * here, ptracer may attach inbetween; however, this is for
2287 * group stop and should always be delivered to the real
2288 * parent of the group leader. The new ptracer will get
2289 * its notification when this task transitions into
2293 read_lock(&tasklist_lock);
2294 do_notify_parent_cldstop(current, false, notify);
2295 read_unlock(&tasklist_lock);
2298 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2299 freezable_schedule();
2303 * While ptraced, group stop is handled by STOP trap.
2304 * Schedule it and let the caller deal with it.
2306 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2312 * do_jobctl_trap - take care of ptrace jobctl traps
2314 * When PT_SEIZED, it's used for both group stop and explicit
2315 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2316 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2317 * the stop signal; otherwise, %SIGTRAP.
2319 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2320 * number as exit_code and no siginfo.
2323 * Must be called with @current->sighand->siglock held, which may be
2324 * released and re-acquired before returning with intervening sleep.
2326 static void do_jobctl_trap(void)
2328 struct signal_struct *signal = current->signal;
2329 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2331 if (current->ptrace & PT_SEIZED) {
2332 if (!signal->group_stop_count &&
2333 !(signal->flags & SIGNAL_STOP_STOPPED))
2335 WARN_ON_ONCE(!signr);
2336 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2339 WARN_ON_ONCE(!signr);
2340 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2341 current->exit_code = 0;
2345 static int ptrace_signal(int signr, siginfo_t *info)
2348 * We do not check sig_kernel_stop(signr) but set this marker
2349 * unconditionally because we do not know whether debugger will
2350 * change signr. This flag has no meaning unless we are going
2351 * to stop after return from ptrace_stop(). In this case it will
2352 * be checked in do_signal_stop(), we should only stop if it was
2353 * not cleared by SIGCONT while we were sleeping. See also the
2354 * comment in dequeue_signal().
2356 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2357 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2359 /* We're back. Did the debugger cancel the sig? */
2360 signr = current->exit_code;
2364 current->exit_code = 0;
2367 * Update the siginfo structure if the signal has
2368 * changed. If the debugger wanted something
2369 * specific in the siginfo structure then it should
2370 * have updated *info via PTRACE_SETSIGINFO.
2372 if (signr != info->si_signo) {
2373 clear_siginfo(info);
2374 info->si_signo = signr;
2376 info->si_code = SI_USER;
2378 info->si_pid = task_pid_vnr(current->parent);
2379 info->si_uid = from_kuid_munged(current_user_ns(),
2380 task_uid(current->parent));
2384 /* If the (new) signal is now blocked, requeue it. */
2385 if (sigismember(¤t->blocked, signr)) {
2386 specific_send_sig_info(signr, info, current);
2393 bool get_signal(struct ksignal *ksig)
2395 struct sighand_struct *sighand = current->sighand;
2396 struct signal_struct *signal = current->signal;
2399 if (unlikely(current->task_works))
2402 if (unlikely(uprobe_deny_signal()))
2406 * Do this once, we can't return to user-mode if freezing() == T.
2407 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2408 * thus do not need another check after return.
2413 spin_lock_irq(&sighand->siglock);
2415 * Every stopped thread goes here after wakeup. Check to see if
2416 * we should notify the parent, prepare_signal(SIGCONT) encodes
2417 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2419 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2422 if (signal->flags & SIGNAL_CLD_CONTINUED)
2423 why = CLD_CONTINUED;
2427 signal->flags &= ~SIGNAL_CLD_MASK;
2429 spin_unlock_irq(&sighand->siglock);
2432 * Notify the parent that we're continuing. This event is
2433 * always per-process and doesn't make whole lot of sense
2434 * for ptracers, who shouldn't consume the state via
2435 * wait(2) either, but, for backward compatibility, notify
2436 * the ptracer of the group leader too unless it's gonna be
2439 read_lock(&tasklist_lock);
2440 do_notify_parent_cldstop(current, false, why);
2442 if (ptrace_reparented(current->group_leader))
2443 do_notify_parent_cldstop(current->group_leader,
2445 read_unlock(&tasklist_lock);
2450 /* Has this task already been marked for death? */
2451 if (signal_group_exit(signal)) {
2452 ksig->info.si_signo = signr = SIGKILL;
2453 sigdelset(¤t->pending.signal, SIGKILL);
2454 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2455 &sighand->action[SIGKILL - 1]);
2456 recalc_sigpending();
2461 struct k_sigaction *ka;
2463 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2467 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2469 spin_unlock_irq(&sighand->siglock);
2474 * Signals generated by the execution of an instruction
2475 * need to be delivered before any other pending signals
2476 * so that the instruction pointer in the signal stack
2477 * frame points to the faulting instruction.
2479 signr = dequeue_synchronous_signal(&ksig->info);
2481 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2484 break; /* will return 0 */
2486 if (unlikely(current->ptrace) && signr != SIGKILL) {
2487 signr = ptrace_signal(signr, &ksig->info);
2492 ka = &sighand->action[signr-1];
2494 /* Trace actually delivered signals. */
2495 trace_signal_deliver(signr, &ksig->info, ka);
2497 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2499 if (ka->sa.sa_handler != SIG_DFL) {
2500 /* Run the handler. */
2503 if (ka->sa.sa_flags & SA_ONESHOT)
2504 ka->sa.sa_handler = SIG_DFL;
2506 break; /* will return non-zero "signr" value */
2510 * Now we are doing the default action for this signal.
2512 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2516 * Global init gets no signals it doesn't want.
2517 * Container-init gets no signals it doesn't want from same
2520 * Note that if global/container-init sees a sig_kernel_only()
2521 * signal here, the signal must have been generated internally
2522 * or must have come from an ancestor namespace. In either
2523 * case, the signal cannot be dropped.
2525 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2526 !sig_kernel_only(signr))
2529 if (sig_kernel_stop(signr)) {
2531 * The default action is to stop all threads in
2532 * the thread group. The job control signals
2533 * do nothing in an orphaned pgrp, but SIGSTOP
2534 * always works. Note that siglock needs to be
2535 * dropped during the call to is_orphaned_pgrp()
2536 * because of lock ordering with tasklist_lock.
2537 * This allows an intervening SIGCONT to be posted.
2538 * We need to check for that and bail out if necessary.
2540 if (signr != SIGSTOP) {
2541 spin_unlock_irq(&sighand->siglock);
2543 /* signals can be posted during this window */
2545 if (is_current_pgrp_orphaned())
2548 spin_lock_irq(&sighand->siglock);
2551 if (likely(do_signal_stop(ksig->info.si_signo))) {
2552 /* It released the siglock. */
2557 * We didn't actually stop, due to a race
2558 * with SIGCONT or something like that.
2564 spin_unlock_irq(&sighand->siglock);
2567 * Anything else is fatal, maybe with a core dump.
2569 current->flags |= PF_SIGNALED;
2571 if (sig_kernel_coredump(signr)) {
2572 if (print_fatal_signals)
2573 print_fatal_signal(ksig->info.si_signo);
2574 proc_coredump_connector(current);
2576 * If it was able to dump core, this kills all
2577 * other threads in the group and synchronizes with
2578 * their demise. If we lost the race with another
2579 * thread getting here, it set group_exit_code
2580 * first and our do_group_exit call below will use
2581 * that value and ignore the one we pass it.
2583 do_coredump(&ksig->info);
2587 * Death signals, no core dump.
2589 do_group_exit(ksig->info.si_signo);
2592 spin_unlock_irq(&sighand->siglock);
2595 return ksig->sig > 0;
2599 * signal_delivered -
2600 * @ksig: kernel signal struct
2601 * @stepping: nonzero if debugger single-step or block-step in use
2603 * This function should be called when a signal has successfully been
2604 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2605 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2606 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2608 static void signal_delivered(struct ksignal *ksig, int stepping)
2612 /* A signal was successfully delivered, and the
2613 saved sigmask was stored on the signal frame,
2614 and will be restored by sigreturn. So we can
2615 simply clear the restore sigmask flag. */
2616 clear_restore_sigmask();
2618 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2619 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2620 sigaddset(&blocked, ksig->sig);
2621 set_current_blocked(&blocked);
2622 tracehook_signal_handler(stepping);
2625 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2628 force_sigsegv(ksig->sig, current);
2630 signal_delivered(ksig, stepping);
2634 * It could be that complete_signal() picked us to notify about the
2635 * group-wide signal. Other threads should be notified now to take
2636 * the shared signals in @which since we will not.
2638 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2641 struct task_struct *t;
2643 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2644 if (sigisemptyset(&retarget))
2648 while_each_thread(tsk, t) {
2649 if (t->flags & PF_EXITING)
2652 if (!has_pending_signals(&retarget, &t->blocked))
2654 /* Remove the signals this thread can handle. */
2655 sigandsets(&retarget, &retarget, &t->blocked);
2657 if (!signal_pending(t))
2658 signal_wake_up(t, 0);
2660 if (sigisemptyset(&retarget))
2665 void exit_signals(struct task_struct *tsk)
2671 * @tsk is about to have PF_EXITING set - lock out users which
2672 * expect stable threadgroup.
2674 cgroup_threadgroup_change_begin(tsk);
2676 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2677 tsk->flags |= PF_EXITING;
2678 cgroup_threadgroup_change_end(tsk);
2682 spin_lock_irq(&tsk->sighand->siglock);
2684 * From now this task is not visible for group-wide signals,
2685 * see wants_signal(), do_signal_stop().
2687 tsk->flags |= PF_EXITING;
2689 cgroup_threadgroup_change_end(tsk);
2691 if (!signal_pending(tsk))
2694 unblocked = tsk->blocked;
2695 signotset(&unblocked);
2696 retarget_shared_pending(tsk, &unblocked);
2698 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2699 task_participate_group_stop(tsk))
2700 group_stop = CLD_STOPPED;
2702 spin_unlock_irq(&tsk->sighand->siglock);
2705 * If group stop has completed, deliver the notification. This
2706 * should always go to the real parent of the group leader.
2708 if (unlikely(group_stop)) {
2709 read_lock(&tasklist_lock);
2710 do_notify_parent_cldstop(tsk, false, group_stop);
2711 read_unlock(&tasklist_lock);
2715 EXPORT_SYMBOL(recalc_sigpending);
2716 EXPORT_SYMBOL_GPL(dequeue_signal);
2717 EXPORT_SYMBOL(flush_signals);
2718 EXPORT_SYMBOL(force_sig);
2719 EXPORT_SYMBOL(send_sig);
2720 EXPORT_SYMBOL(send_sig_info);
2721 EXPORT_SYMBOL(sigprocmask);
2724 * System call entry points.
2728 * sys_restart_syscall - restart a system call
2730 SYSCALL_DEFINE0(restart_syscall)
2732 struct restart_block *restart = ¤t->restart_block;
2733 return restart->fn(restart);
2736 long do_no_restart_syscall(struct restart_block *param)
2741 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2743 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2744 sigset_t newblocked;
2745 /* A set of now blocked but previously unblocked signals. */
2746 sigandnsets(&newblocked, newset, ¤t->blocked);
2747 retarget_shared_pending(tsk, &newblocked);
2749 tsk->blocked = *newset;
2750 recalc_sigpending();
2754 * set_current_blocked - change current->blocked mask
2757 * It is wrong to change ->blocked directly, this helper should be used
2758 * to ensure the process can't miss a shared signal we are going to block.
2760 void set_current_blocked(sigset_t *newset)
2762 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2763 __set_current_blocked(newset);
2766 void __set_current_blocked(const sigset_t *newset)
2768 struct task_struct *tsk = current;
2771 * In case the signal mask hasn't changed, there is nothing we need
2772 * to do. The current->blocked shouldn't be modified by other task.
2774 if (sigequalsets(&tsk->blocked, newset))
2777 spin_lock_irq(&tsk->sighand->siglock);
2778 __set_task_blocked(tsk, newset);
2779 spin_unlock_irq(&tsk->sighand->siglock);
2783 * This is also useful for kernel threads that want to temporarily
2784 * (or permanently) block certain signals.
2786 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2787 * interface happily blocks "unblockable" signals like SIGKILL
2790 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2792 struct task_struct *tsk = current;
2795 /* Lockless, only current can change ->blocked, never from irq */
2797 *oldset = tsk->blocked;
2801 sigorsets(&newset, &tsk->blocked, set);
2804 sigandnsets(&newset, &tsk->blocked, set);
2813 __set_current_blocked(&newset);
2818 * sys_rt_sigprocmask - change the list of currently blocked signals
2819 * @how: whether to add, remove, or set signals
2820 * @nset: stores pending signals
2821 * @oset: previous value of signal mask if non-null
2822 * @sigsetsize: size of sigset_t type
2824 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2825 sigset_t __user *, oset, size_t, sigsetsize)
2827 sigset_t old_set, new_set;
2830 /* XXX: Don't preclude handling different sized sigset_t's. */
2831 if (sigsetsize != sizeof(sigset_t))
2834 old_set = current->blocked;
2837 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2839 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2841 error = sigprocmask(how, &new_set, NULL);
2847 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2854 #ifdef CONFIG_COMPAT
2855 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2856 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2858 sigset_t old_set = current->blocked;
2860 /* XXX: Don't preclude handling different sized sigset_t's. */
2861 if (sigsetsize != sizeof(sigset_t))
2867 if (get_compat_sigset(&new_set, nset))
2869 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2871 error = sigprocmask(how, &new_set, NULL);
2875 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2879 static void do_sigpending(sigset_t *set)
2881 spin_lock_irq(¤t->sighand->siglock);
2882 sigorsets(set, ¤t->pending.signal,
2883 ¤t->signal->shared_pending.signal);
2884 spin_unlock_irq(¤t->sighand->siglock);
2886 /* Outside the lock because only this thread touches it. */
2887 sigandsets(set, ¤t->blocked, set);
2891 * sys_rt_sigpending - examine a pending signal that has been raised
2893 * @uset: stores pending signals
2894 * @sigsetsize: size of sigset_t type or larger
2896 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2900 if (sigsetsize > sizeof(*uset))
2903 do_sigpending(&set);
2905 if (copy_to_user(uset, &set, sigsetsize))
2911 #ifdef CONFIG_COMPAT
2912 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2913 compat_size_t, sigsetsize)
2917 if (sigsetsize > sizeof(*uset))
2920 do_sigpending(&set);
2922 return put_compat_sigset(uset, &set, sigsetsize);
2926 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
2928 enum siginfo_layout layout = SIL_KILL;
2929 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2930 static const struct {
2931 unsigned char limit, layout;
2933 [SIGILL] = { NSIGILL, SIL_FAULT },
2934 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2935 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2936 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2937 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2938 #if defined(SIGEMT) && defined(NSIGEMT)
2939 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2941 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2942 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2943 [SIGSYS] = { NSIGSYS, SIL_SYS },
2945 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2946 layout = filter[sig].layout;
2947 /* Handle the exceptions */
2948 if ((sig == SIGBUS) &&
2949 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2950 layout = SIL_FAULT_MCEERR;
2951 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2952 layout = SIL_FAULT_BNDERR;
2954 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2955 layout = SIL_FAULT_PKUERR;
2958 else if (si_code <= NSIGPOLL)
2961 if (si_code == SI_TIMER)
2963 else if (si_code == SI_SIGIO)
2965 else if (si_code < 0)
2971 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2973 if (copy_to_user(to, from , sizeof(struct siginfo)))
2978 #ifdef CONFIG_COMPAT
2979 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2980 const struct siginfo *from)
2981 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2983 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2985 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2986 const struct siginfo *from, bool x32_ABI)
2989 struct compat_siginfo new;
2990 memset(&new, 0, sizeof(new));
2992 new.si_signo = from->si_signo;
2993 new.si_errno = from->si_errno;
2994 new.si_code = from->si_code;
2995 switch(siginfo_layout(from->si_signo, from->si_code)) {
2997 new.si_pid = from->si_pid;
2998 new.si_uid = from->si_uid;
3001 new.si_tid = from->si_tid;
3002 new.si_overrun = from->si_overrun;
3003 new.si_int = from->si_int;
3006 new.si_band = from->si_band;
3007 new.si_fd = from->si_fd;
3010 new.si_addr = ptr_to_compat(from->si_addr);
3011 #ifdef __ARCH_SI_TRAPNO
3012 new.si_trapno = from->si_trapno;
3015 case SIL_FAULT_MCEERR:
3016 new.si_addr = ptr_to_compat(from->si_addr);
3017 #ifdef __ARCH_SI_TRAPNO
3018 new.si_trapno = from->si_trapno;
3020 new.si_addr_lsb = from->si_addr_lsb;
3022 case SIL_FAULT_BNDERR:
3023 new.si_addr = ptr_to_compat(from->si_addr);
3024 #ifdef __ARCH_SI_TRAPNO
3025 new.si_trapno = from->si_trapno;
3027 new.si_lower = ptr_to_compat(from->si_lower);
3028 new.si_upper = ptr_to_compat(from->si_upper);
3030 case SIL_FAULT_PKUERR:
3031 new.si_addr = ptr_to_compat(from->si_addr);
3032 #ifdef __ARCH_SI_TRAPNO
3033 new.si_trapno = from->si_trapno;
3035 new.si_pkey = from->si_pkey;
3038 new.si_pid = from->si_pid;
3039 new.si_uid = from->si_uid;
3040 new.si_status = from->si_status;
3041 #ifdef CONFIG_X86_X32_ABI
3043 new._sifields._sigchld_x32._utime = from->si_utime;
3044 new._sifields._sigchld_x32._stime = from->si_stime;
3048 new.si_utime = from->si_utime;
3049 new.si_stime = from->si_stime;
3053 new.si_pid = from->si_pid;
3054 new.si_uid = from->si_uid;
3055 new.si_int = from->si_int;
3058 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3059 new.si_syscall = from->si_syscall;
3060 new.si_arch = from->si_arch;
3064 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3070 int copy_siginfo_from_user32(struct siginfo *to,
3071 const struct compat_siginfo __user *ufrom)
3073 struct compat_siginfo from;
3075 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3079 to->si_signo = from.si_signo;
3080 to->si_errno = from.si_errno;
3081 to->si_code = from.si_code;
3082 switch(siginfo_layout(from.si_signo, from.si_code)) {
3084 to->si_pid = from.si_pid;
3085 to->si_uid = from.si_uid;
3088 to->si_tid = from.si_tid;
3089 to->si_overrun = from.si_overrun;
3090 to->si_int = from.si_int;
3093 to->si_band = from.si_band;
3094 to->si_fd = from.si_fd;
3097 to->si_addr = compat_ptr(from.si_addr);
3098 #ifdef __ARCH_SI_TRAPNO
3099 to->si_trapno = from.si_trapno;
3102 case SIL_FAULT_MCEERR:
3103 to->si_addr = compat_ptr(from.si_addr);
3104 #ifdef __ARCH_SI_TRAPNO
3105 to->si_trapno = from.si_trapno;
3107 to->si_addr_lsb = from.si_addr_lsb;
3109 case SIL_FAULT_BNDERR:
3110 to->si_addr = compat_ptr(from.si_addr);
3111 #ifdef __ARCH_SI_TRAPNO
3112 to->si_trapno = from.si_trapno;
3114 to->si_lower = compat_ptr(from.si_lower);
3115 to->si_upper = compat_ptr(from.si_upper);
3117 case SIL_FAULT_PKUERR:
3118 to->si_addr = compat_ptr(from.si_addr);
3119 #ifdef __ARCH_SI_TRAPNO
3120 to->si_trapno = from.si_trapno;
3122 to->si_pkey = from.si_pkey;
3125 to->si_pid = from.si_pid;
3126 to->si_uid = from.si_uid;
3127 to->si_status = from.si_status;
3128 #ifdef CONFIG_X86_X32_ABI
3129 if (in_x32_syscall()) {
3130 to->si_utime = from._sifields._sigchld_x32._utime;
3131 to->si_stime = from._sifields._sigchld_x32._stime;
3135 to->si_utime = from.si_utime;
3136 to->si_stime = from.si_stime;
3140 to->si_pid = from.si_pid;
3141 to->si_uid = from.si_uid;
3142 to->si_int = from.si_int;
3145 to->si_call_addr = compat_ptr(from.si_call_addr);
3146 to->si_syscall = from.si_syscall;
3147 to->si_arch = from.si_arch;
3152 #endif /* CONFIG_COMPAT */
3155 * do_sigtimedwait - wait for queued signals specified in @which
3156 * @which: queued signals to wait for
3157 * @info: if non-null, the signal's siginfo is returned here
3158 * @ts: upper bound on process time suspension
3160 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3161 const struct timespec *ts)
3163 ktime_t *to = NULL, timeout = KTIME_MAX;
3164 struct task_struct *tsk = current;
3165 sigset_t mask = *which;
3169 if (!timespec_valid(ts))
3171 timeout = timespec_to_ktime(*ts);
3176 * Invert the set of allowed signals to get those we want to block.
3178 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3181 spin_lock_irq(&tsk->sighand->siglock);
3182 sig = dequeue_signal(tsk, &mask, info);
3183 if (!sig && timeout) {
3185 * None ready, temporarily unblock those we're interested
3186 * while we are sleeping in so that we'll be awakened when
3187 * they arrive. Unblocking is always fine, we can avoid
3188 * set_current_blocked().
3190 tsk->real_blocked = tsk->blocked;
3191 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3192 recalc_sigpending();
3193 spin_unlock_irq(&tsk->sighand->siglock);
3195 __set_current_state(TASK_INTERRUPTIBLE);
3196 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3198 spin_lock_irq(&tsk->sighand->siglock);
3199 __set_task_blocked(tsk, &tsk->real_blocked);
3200 sigemptyset(&tsk->real_blocked);
3201 sig = dequeue_signal(tsk, &mask, info);
3203 spin_unlock_irq(&tsk->sighand->siglock);
3207 return ret ? -EINTR : -EAGAIN;
3211 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3213 * @uthese: queued signals to wait for
3214 * @uinfo: if non-null, the signal's siginfo is returned here
3215 * @uts: upper bound on process time suspension
3216 * @sigsetsize: size of sigset_t type
3218 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3219 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3227 /* XXX: Don't preclude handling different sized sigset_t's. */
3228 if (sigsetsize != sizeof(sigset_t))
3231 if (copy_from_user(&these, uthese, sizeof(these)))
3235 if (copy_from_user(&ts, uts, sizeof(ts)))
3239 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3241 if (ret > 0 && uinfo) {
3242 if (copy_siginfo_to_user(uinfo, &info))
3249 #ifdef CONFIG_COMPAT
3250 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3251 struct compat_siginfo __user *, uinfo,
3252 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3259 if (sigsetsize != sizeof(sigset_t))
3262 if (get_compat_sigset(&s, uthese))
3266 if (compat_get_timespec(&t, uts))
3270 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3272 if (ret > 0 && uinfo) {
3273 if (copy_siginfo_to_user32(uinfo, &info))
3282 * sys_kill - send a signal to a process
3283 * @pid: the PID of the process
3284 * @sig: signal to be sent
3286 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3288 struct siginfo info;
3290 clear_siginfo(&info);
3291 info.si_signo = sig;
3293 info.si_code = SI_USER;
3294 info.si_pid = task_tgid_vnr(current);
3295 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3297 return kill_something_info(sig, &info, pid);
3301 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3303 struct task_struct *p;
3307 p = find_task_by_vpid(pid);
3308 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3309 error = check_kill_permission(sig, info, p);
3311 * The null signal is a permissions and process existence
3312 * probe. No signal is actually delivered.
3314 if (!error && sig) {
3315 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3317 * If lock_task_sighand() failed we pretend the task
3318 * dies after receiving the signal. The window is tiny,
3319 * and the signal is private anyway.
3321 if (unlikely(error == -ESRCH))
3330 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3332 struct siginfo info;
3334 clear_siginfo(&info);
3335 info.si_signo = sig;
3337 info.si_code = SI_TKILL;
3338 info.si_pid = task_tgid_vnr(current);
3339 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3341 return do_send_specific(tgid, pid, sig, &info);
3345 * sys_tgkill - send signal to one specific thread
3346 * @tgid: the thread group ID of the thread
3347 * @pid: the PID of the thread
3348 * @sig: signal to be sent
3350 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3351 * exists but it's not belonging to the target process anymore. This
3352 * method solves the problem of threads exiting and PIDs getting reused.
3354 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3356 /* This is only valid for single tasks */
3357 if (pid <= 0 || tgid <= 0)
3360 return do_tkill(tgid, pid, sig);
3364 * sys_tkill - send signal to one specific task
3365 * @pid: the PID of the task
3366 * @sig: signal to be sent
3368 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3370 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3372 /* This is only valid for single tasks */
3376 return do_tkill(0, pid, sig);
3379 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3381 /* Not even root can pretend to send signals from the kernel.
3382 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3384 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3385 (task_pid_vnr(current) != pid))
3388 info->si_signo = sig;
3390 /* POSIX.1b doesn't mention process groups. */
3391 return kill_proc_info(sig, info, pid);
3395 * sys_rt_sigqueueinfo - send signal information to a signal
3396 * @pid: the PID of the thread
3397 * @sig: signal to be sent
3398 * @uinfo: signal info to be sent
3400 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3401 siginfo_t __user *, uinfo)
3404 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3406 return do_rt_sigqueueinfo(pid, sig, &info);
3409 #ifdef CONFIG_COMPAT
3410 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3413 struct compat_siginfo __user *, uinfo)
3416 int ret = copy_siginfo_from_user32(&info, uinfo);
3419 return do_rt_sigqueueinfo(pid, sig, &info);
3423 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3425 /* This is only valid for single tasks */
3426 if (pid <= 0 || tgid <= 0)
3429 /* Not even root can pretend to send signals from the kernel.
3430 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3432 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3433 (task_pid_vnr(current) != pid))
3436 info->si_signo = sig;
3438 return do_send_specific(tgid, pid, sig, info);
3441 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3442 siginfo_t __user *, uinfo)
3446 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3449 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3452 #ifdef CONFIG_COMPAT
3453 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3457 struct compat_siginfo __user *, uinfo)
3461 if (copy_siginfo_from_user32(&info, uinfo))
3463 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3468 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3470 void kernel_sigaction(int sig, __sighandler_t action)
3472 spin_lock_irq(¤t->sighand->siglock);
3473 current->sighand->action[sig - 1].sa.sa_handler = action;
3474 if (action == SIG_IGN) {
3478 sigaddset(&mask, sig);
3480 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3481 flush_sigqueue_mask(&mask, ¤t->pending);
3482 recalc_sigpending();
3484 spin_unlock_irq(¤t->sighand->siglock);
3486 EXPORT_SYMBOL(kernel_sigaction);
3488 void __weak sigaction_compat_abi(struct k_sigaction *act,
3489 struct k_sigaction *oact)
3493 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3495 struct task_struct *p = current, *t;
3496 struct k_sigaction *k;
3499 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3502 k = &p->sighand->action[sig-1];
3504 spin_lock_irq(&p->sighand->siglock);
3508 sigaction_compat_abi(act, oact);
3511 sigdelsetmask(&act->sa.sa_mask,
3512 sigmask(SIGKILL) | sigmask(SIGSTOP));
3516 * "Setting a signal action to SIG_IGN for a signal that is
3517 * pending shall cause the pending signal to be discarded,
3518 * whether or not it is blocked."
3520 * "Setting a signal action to SIG_DFL for a signal that is
3521 * pending and whose default action is to ignore the signal
3522 * (for example, SIGCHLD), shall cause the pending signal to
3523 * be discarded, whether or not it is blocked"
3525 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3527 sigaddset(&mask, sig);
3528 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3529 for_each_thread(p, t)
3530 flush_sigqueue_mask(&mask, &t->pending);
3534 spin_unlock_irq(&p->sighand->siglock);
3539 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3542 struct task_struct *t = current;
3545 memset(oss, 0, sizeof(stack_t));
3546 oss->ss_sp = (void __user *) t->sas_ss_sp;
3547 oss->ss_size = t->sas_ss_size;
3548 oss->ss_flags = sas_ss_flags(sp) |
3549 (current->sas_ss_flags & SS_FLAG_BITS);
3553 void __user *ss_sp = ss->ss_sp;
3554 size_t ss_size = ss->ss_size;
3555 unsigned ss_flags = ss->ss_flags;
3558 if (unlikely(on_sig_stack(sp)))
3561 ss_mode = ss_flags & ~SS_FLAG_BITS;
3562 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3566 if (ss_mode == SS_DISABLE) {
3570 if (unlikely(ss_size < min_ss_size))
3574 t->sas_ss_sp = (unsigned long) ss_sp;
3575 t->sas_ss_size = ss_size;
3576 t->sas_ss_flags = ss_flags;
3581 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3585 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3587 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3588 current_user_stack_pointer(),
3590 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3595 int restore_altstack(const stack_t __user *uss)
3598 if (copy_from_user(&new, uss, sizeof(stack_t)))
3600 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3602 /* squash all but EFAULT for now */
3606 int __save_altstack(stack_t __user *uss, unsigned long sp)
3608 struct task_struct *t = current;
3609 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3610 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3611 __put_user(t->sas_ss_size, &uss->ss_size);
3614 if (t->sas_ss_flags & SS_AUTODISARM)
3619 #ifdef CONFIG_COMPAT
3620 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3621 compat_stack_t __user *uoss_ptr)
3627 compat_stack_t uss32;
3628 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3630 uss.ss_sp = compat_ptr(uss32.ss_sp);
3631 uss.ss_flags = uss32.ss_flags;
3632 uss.ss_size = uss32.ss_size;
3634 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3635 compat_user_stack_pointer(),
3636 COMPAT_MINSIGSTKSZ);
3637 if (ret >= 0 && uoss_ptr) {
3639 memset(&old, 0, sizeof(old));
3640 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3641 old.ss_flags = uoss.ss_flags;
3642 old.ss_size = uoss.ss_size;
3643 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3649 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3650 const compat_stack_t __user *, uss_ptr,
3651 compat_stack_t __user *, uoss_ptr)
3653 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3656 int compat_restore_altstack(const compat_stack_t __user *uss)
3658 int err = do_compat_sigaltstack(uss, NULL);
3659 /* squash all but -EFAULT for now */
3660 return err == -EFAULT ? err : 0;
3663 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3666 struct task_struct *t = current;
3667 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3669 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3670 __put_user(t->sas_ss_size, &uss->ss_size);
3673 if (t->sas_ss_flags & SS_AUTODISARM)
3679 #ifdef __ARCH_WANT_SYS_SIGPENDING
3682 * sys_sigpending - examine pending signals
3683 * @uset: where mask of pending signal is returned
3685 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3689 if (sizeof(old_sigset_t) > sizeof(*uset))
3692 do_sigpending(&set);
3694 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3700 #ifdef CONFIG_COMPAT
3701 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3705 do_sigpending(&set);
3707 return put_user(set.sig[0], set32);
3713 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3715 * sys_sigprocmask - examine and change blocked signals
3716 * @how: whether to add, remove, or set signals
3717 * @nset: signals to add or remove (if non-null)
3718 * @oset: previous value of signal mask if non-null
3720 * Some platforms have their own version with special arguments;
3721 * others support only sys_rt_sigprocmask.
3724 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3725 old_sigset_t __user *, oset)
3727 old_sigset_t old_set, new_set;
3728 sigset_t new_blocked;
3730 old_set = current->blocked.sig[0];
3733 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3736 new_blocked = current->blocked;
3740 sigaddsetmask(&new_blocked, new_set);
3743 sigdelsetmask(&new_blocked, new_set);
3746 new_blocked.sig[0] = new_set;
3752 set_current_blocked(&new_blocked);
3756 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3762 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3764 #ifndef CONFIG_ODD_RT_SIGACTION
3766 * sys_rt_sigaction - alter an action taken by a process
3767 * @sig: signal to be sent
3768 * @act: new sigaction
3769 * @oact: used to save the previous sigaction
3770 * @sigsetsize: size of sigset_t type
3772 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3773 const struct sigaction __user *, act,
3774 struct sigaction __user *, oact,
3777 struct k_sigaction new_sa, old_sa;
3780 /* XXX: Don't preclude handling different sized sigset_t's. */
3781 if (sigsetsize != sizeof(sigset_t))
3784 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3787 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3791 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3796 #ifdef CONFIG_COMPAT
3797 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3798 const struct compat_sigaction __user *, act,
3799 struct compat_sigaction __user *, oact,
3800 compat_size_t, sigsetsize)
3802 struct k_sigaction new_ka, old_ka;
3803 #ifdef __ARCH_HAS_SA_RESTORER
3804 compat_uptr_t restorer;
3808 /* XXX: Don't preclude handling different sized sigset_t's. */
3809 if (sigsetsize != sizeof(compat_sigset_t))
3813 compat_uptr_t handler;
3814 ret = get_user(handler, &act->sa_handler);
3815 new_ka.sa.sa_handler = compat_ptr(handler);
3816 #ifdef __ARCH_HAS_SA_RESTORER
3817 ret |= get_user(restorer, &act->sa_restorer);
3818 new_ka.sa.sa_restorer = compat_ptr(restorer);
3820 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3821 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3826 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3828 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3830 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3831 sizeof(oact->sa_mask));
3832 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3833 #ifdef __ARCH_HAS_SA_RESTORER
3834 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3835 &oact->sa_restorer);
3841 #endif /* !CONFIG_ODD_RT_SIGACTION */
3843 #ifdef CONFIG_OLD_SIGACTION
3844 SYSCALL_DEFINE3(sigaction, int, sig,
3845 const struct old_sigaction __user *, act,
3846 struct old_sigaction __user *, oact)
3848 struct k_sigaction new_ka, old_ka;
3853 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3854 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3855 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3856 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3857 __get_user(mask, &act->sa_mask))
3859 #ifdef __ARCH_HAS_KA_RESTORER
3860 new_ka.ka_restorer = NULL;
3862 siginitset(&new_ka.sa.sa_mask, mask);
3865 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3868 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3869 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3870 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3871 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3872 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3879 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3880 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3881 const struct compat_old_sigaction __user *, act,
3882 struct compat_old_sigaction __user *, oact)
3884 struct k_sigaction new_ka, old_ka;
3886 compat_old_sigset_t mask;
3887 compat_uptr_t handler, restorer;
3890 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3891 __get_user(handler, &act->sa_handler) ||
3892 __get_user(restorer, &act->sa_restorer) ||
3893 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3894 __get_user(mask, &act->sa_mask))
3897 #ifdef __ARCH_HAS_KA_RESTORER
3898 new_ka.ka_restorer = NULL;
3900 new_ka.sa.sa_handler = compat_ptr(handler);
3901 new_ka.sa.sa_restorer = compat_ptr(restorer);
3902 siginitset(&new_ka.sa.sa_mask, mask);
3905 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3908 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3909 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3910 &oact->sa_handler) ||
3911 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3912 &oact->sa_restorer) ||
3913 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3914 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3921 #ifdef CONFIG_SGETMASK_SYSCALL
3924 * For backwards compatibility. Functionality superseded by sigprocmask.
3926 SYSCALL_DEFINE0(sgetmask)
3929 return current->blocked.sig[0];
3932 SYSCALL_DEFINE1(ssetmask, int, newmask)
3934 int old = current->blocked.sig[0];
3937 siginitset(&newset, newmask);
3938 set_current_blocked(&newset);
3942 #endif /* CONFIG_SGETMASK_SYSCALL */
3944 #ifdef __ARCH_WANT_SYS_SIGNAL
3946 * For backwards compatibility. Functionality superseded by sigaction.
3948 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3950 struct k_sigaction new_sa, old_sa;
3953 new_sa.sa.sa_handler = handler;
3954 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3955 sigemptyset(&new_sa.sa.sa_mask);
3957 ret = do_sigaction(sig, &new_sa, &old_sa);
3959 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3961 #endif /* __ARCH_WANT_SYS_SIGNAL */
3963 #ifdef __ARCH_WANT_SYS_PAUSE
3965 SYSCALL_DEFINE0(pause)
3967 while (!signal_pending(current)) {
3968 __set_current_state(TASK_INTERRUPTIBLE);
3971 return -ERESTARTNOHAND;
3976 static int sigsuspend(sigset_t *set)
3978 current->saved_sigmask = current->blocked;
3979 set_current_blocked(set);
3981 while (!signal_pending(current)) {
3982 __set_current_state(TASK_INTERRUPTIBLE);
3985 set_restore_sigmask();
3986 return -ERESTARTNOHAND;
3990 * sys_rt_sigsuspend - replace the signal mask for a value with the
3991 * @unewset value until a signal is received
3992 * @unewset: new signal mask value
3993 * @sigsetsize: size of sigset_t type
3995 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3999 /* XXX: Don't preclude handling different sized sigset_t's. */
4000 if (sigsetsize != sizeof(sigset_t))
4003 if (copy_from_user(&newset, unewset, sizeof(newset)))
4005 return sigsuspend(&newset);
4008 #ifdef CONFIG_COMPAT
4009 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4013 /* XXX: Don't preclude handling different sized sigset_t's. */
4014 if (sigsetsize != sizeof(sigset_t))
4017 if (get_compat_sigset(&newset, unewset))
4019 return sigsuspend(&newset);
4023 #ifdef CONFIG_OLD_SIGSUSPEND
4024 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4027 siginitset(&blocked, mask);
4028 return sigsuspend(&blocked);
4031 #ifdef CONFIG_OLD_SIGSUSPEND3
4032 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4035 siginitset(&blocked, mask);
4036 return sigsuspend(&blocked);
4040 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4045 void __init signals_init(void)
4047 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4048 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4049 != offsetof(struct siginfo, _sifields._pad));
4050 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4052 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4055 #ifdef CONFIG_KGDB_KDB
4056 #include <linux/kdb.h>
4058 * kdb_send_sig - Allows kdb to send signals without exposing
4059 * signal internals. This function checks if the required locks are
4060 * available before calling the main signal code, to avoid kdb
4063 void kdb_send_sig(struct task_struct *t, int sig)
4065 static struct task_struct *kdb_prev_t;
4067 if (!spin_trylock(&t->sighand->siglock)) {
4068 kdb_printf("Can't do kill command now.\n"
4069 "The sigmask lock is held somewhere else in "
4070 "kernel, try again later\n");
4073 new_t = kdb_prev_t != t;
4075 if (t->state != TASK_RUNNING && new_t) {
4076 spin_unlock(&t->sighand->siglock);
4077 kdb_printf("Process is not RUNNING, sending a signal from "
4078 "kdb risks deadlock\n"
4079 "on the run queue locks. "
4080 "The signal has _not_ been sent.\n"
4081 "Reissue the kill command if you want to risk "
4085 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4086 spin_unlock(&t->sighand->siglock);
4088 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4091 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4093 #endif /* CONFIG_KGDB_KDB */