2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 return sig_handler_ignored(handler, sig);
95 static bool sig_ignored(struct task_struct *t, int sig, bool force)
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
110 if (t->ptrace && sig != SIGKILL)
113 return sig_task_ignored(t, sig, force);
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
120 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
125 switch (_NSIG_WORDS) {
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
146 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
148 static bool recalc_sigpending_tsk(struct task_struct *t)
150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
151 PENDING(&t->pending, &t->blocked) ||
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
154 set_tsk_thread_flag(t, TIF_SIGPENDING);
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
170 void recalc_sigpending_and_wake(struct task_struct *t)
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
176 void recalc_sigpending(void)
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
180 clear_thread_flag(TIF_SIGPENDING);
183 EXPORT_SYMBOL(recalc_sigpending);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
403 * appropriate lock must be held to stop the target task from exiting
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408 struct sigqueue *q = NULL;
409 struct user_struct *user;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
416 user = get_uid(__task_cred(t)->user);
417 atomic_inc(&user->sigpending);
420 if (override_rlimit ||
421 atomic_read(&user->sigpending) <=
422 task_rlimit(t, RLIMIT_SIGPENDING)) {
423 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 print_dropped_signal(sig);
428 if (unlikely(q == NULL)) {
429 atomic_dec(&user->sigpending);
432 INIT_LIST_HEAD(&q->list);
440 static void __sigqueue_free(struct sigqueue *q)
442 if (q->flags & SIGQUEUE_PREALLOC)
444 atomic_dec(&q->user->sigpending);
446 kmem_cache_free(sigqueue_cachep, q);
449 void flush_sigqueue(struct sigpending *queue)
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
462 * Flush all pending signals for this kthread.
464 void flush_signals(struct task_struct *t)
468 spin_lock_irqsave(&t->sighand->siglock, flags);
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
474 EXPORT_SYMBOL(flush_signals);
476 #ifdef CONFIG_POSIX_TIMERS
477 static void __flush_itimer_signals(struct sigpending *pending)
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
482 signal = pending->signal;
483 sigemptyset(&retain);
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
497 sigorsets(&pending->signal, &signal, &retain);
500 void flush_itimer_signals(void)
502 struct task_struct *tsk = current;
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
512 void ignore_signals(struct task_struct *t)
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
523 * Flush all handlers for a task.
527 flush_signal_handlers(struct task_struct *t, int force_default)
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
535 #ifdef __ARCH_HAS_SA_RESTORER
536 ka->sa.sa_restorer = NULL;
538 sigemptyset(&ka->sa.sa_mask);
543 bool unhandled_signal(struct task_struct *tsk, int sig)
545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
546 if (is_global_init(tsk))
549 if (handler != SIG_IGN && handler != SIG_DFL)
552 /* if ptraced, let the tracer determine */
556 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
559 struct sigqueue *q, *first = NULL;
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
573 sigdelset(&list->signal, sig);
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
585 __sigqueue_free(first);
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
593 info->si_signo = sig;
595 info->si_code = SI_USER;
601 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
602 kernel_siginfo_t *info, bool *resched_timer)
604 int sig = next_signal(pending, mask);
607 collect_signal(sig, pending, info, resched_timer);
612 * Dequeue a signal and return the element to the caller, which is
613 * expected to free it.
615 * All callers have to hold the siglock.
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
619 bool resched_timer = false;
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
627 signr = __dequeue_signal(&tsk->signal->shared_pending,
628 mask, info, &resched_timer);
629 #ifdef CONFIG_POSIX_TIMERS
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
636 * compliant with the old way of self-restarting
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
646 if (!hrtimer_is_queued(tmr) &&
647 tsk->signal->it_real_incr != 0) {
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
660 if (unlikely(sig_kernel_stop(signr))) {
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
675 #ifdef CONFIG_POSIX_TIMERS
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
683 spin_unlock(&tsk->sighand->siglock);
684 posixtimer_rearm(info);
685 spin_lock(&tsk->sighand->siglock);
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
693 EXPORT_SYMBOL_GPL(dequeue_signal);
695 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
702 * Might a synchronous signal be in the queue?
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
708 * Return the first synchronous signal in the queue.
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
721 * Check if there is another siginfo for the same signal.
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
728 sigdelset(&pending->signal, sync->info.si_signo);
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
738 * Tell a process that it has a new active signal..
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
748 void signal_wake_up_state(struct task_struct *t, unsigned int state)
750 set_tsk_thread_flag(t, TIF_SIGPENDING);
752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
753 * case. We don't check t->state here because there is a race with it
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
766 * All callers must be holding the siglock.
768 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
770 struct sigqueue *q, *n;
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
777 sigandnsets(&s->signal, &s->signal, mask);
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
786 static inline int is_si_special(const struct kernel_siginfo *info)
788 return info <= SEND_SIG_PRIV;
791 static inline bool si_fromuser(const struct kernel_siginfo *info)
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
798 * called with RCU read lock from check_kill_permission()
800 static bool kill_ok_by_cred(struct task_struct *t)
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
813 * Bad permissions for sending the signal
814 * - the caller must hold the RCU read lock
816 static int check_kill_permission(int sig, struct kernel_siginfo *info,
817 struct task_struct *t)
822 if (!valid_signal(sig))
825 if (!si_fromuser(info))
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
832 if (!same_thread_group(current, t) &&
833 !kill_ok_by_cred(t)) {
836 sid = task_session(t);
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
841 if (!sid || sid == task_session(current))
849 return security_task_kill(t, info, sig, NULL);
853 * ptrace_trap_notify - schedule trap to notify ptracer
854 * @t: tracee wanting to notify tracer
856 * This function schedules sticky ptrace trap which is cleared on the next
857 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
860 * If @t is running, STOP trap will be taken. If trapped for STOP and
861 * ptracer is listening for events, tracee is woken up so that it can
862 * re-trap for the new event. If trapped otherwise, STOP trap will be
863 * eventually taken without returning to userland after the existing traps
864 * are finished by PTRACE_CONT.
867 * Must be called with @task->sighand->siglock held.
869 static void ptrace_trap_notify(struct task_struct *t)
871 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
872 assert_spin_locked(&t->sighand->siglock);
874 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
875 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
879 * Handle magic process-wide effects of stop/continue signals. Unlike
880 * the signal actions, these happen immediately at signal-generation
881 * time regardless of blocking, ignoring, or handling. This does the
882 * actual continuing for SIGCONT, but not the actual stopping for stop
883 * signals. The process stop is done as a signal action for SIG_DFL.
885 * Returns true if the signal should be actually delivered, otherwise
886 * it should be dropped.
888 static bool prepare_signal(int sig, struct task_struct *p, bool force)
890 struct signal_struct *signal = p->signal;
891 struct task_struct *t;
894 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
895 if (!(signal->flags & SIGNAL_GROUP_EXIT))
896 return sig == SIGKILL;
898 * The process is in the middle of dying, nothing to do.
900 } else if (sig_kernel_stop(sig)) {
902 * This is a stop signal. Remove SIGCONT from all queues.
904 siginitset(&flush, sigmask(SIGCONT));
905 flush_sigqueue_mask(&flush, &signal->shared_pending);
906 for_each_thread(p, t)
907 flush_sigqueue_mask(&flush, &t->pending);
908 } else if (sig == SIGCONT) {
911 * Remove all stop signals from all queues, wake all threads.
913 siginitset(&flush, SIG_KERNEL_STOP_MASK);
914 flush_sigqueue_mask(&flush, &signal->shared_pending);
915 for_each_thread(p, t) {
916 flush_sigqueue_mask(&flush, &t->pending);
917 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
918 if (likely(!(t->ptrace & PT_SEIZED)))
919 wake_up_state(t, __TASK_STOPPED);
921 ptrace_trap_notify(t);
925 * Notify the parent with CLD_CONTINUED if we were stopped.
927 * If we were in the middle of a group stop, we pretend it
928 * was already finished, and then continued. Since SIGCHLD
929 * doesn't queue we report only CLD_STOPPED, as if the next
930 * CLD_CONTINUED was dropped.
933 if (signal->flags & SIGNAL_STOP_STOPPED)
934 why |= SIGNAL_CLD_CONTINUED;
935 else if (signal->group_stop_count)
936 why |= SIGNAL_CLD_STOPPED;
940 * The first thread which returns from do_signal_stop()
941 * will take ->siglock, notice SIGNAL_CLD_MASK, and
942 * notify its parent. See get_signal().
944 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
945 signal->group_stop_count = 0;
946 signal->group_exit_code = 0;
950 return !sig_ignored(p, sig, force);
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
961 static inline bool wants_signal(int sig, struct task_struct *p)
963 if (sigismember(&p->blocked, sig))
966 if (p->flags & PF_EXITING)
972 if (task_is_stopped_or_traced(p))
975 return task_curr(p) || !signal_pending(p);
978 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
980 struct signal_struct *signal = p->signal;
981 struct task_struct *t;
984 * Now find a thread we can wake up to take the signal off the queue.
986 * If the main thread wants the signal, it gets first crack.
987 * Probably the least surprising to the average bear.
989 if (wants_signal(sig, p))
991 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
993 * There is just one thread and it does not need to be woken.
994 * It will dequeue unblocked signals before it runs again.
999 * Otherwise try to find a suitable thread.
1001 t = signal->curr_target;
1002 while (!wants_signal(sig, t)) {
1004 if (t == signal->curr_target)
1006 * No thread needs to be woken.
1007 * Any eligible threads will see
1008 * the signal in the queue soon.
1012 signal->curr_target = t;
1016 * Found a killable thread. If the signal will be fatal,
1017 * then start taking the whole group down immediately.
1019 if (sig_fatal(p, sig) &&
1020 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1021 !sigismember(&t->real_blocked, sig) &&
1022 (sig == SIGKILL || !p->ptrace)) {
1024 * This signal will be fatal to the whole group.
1026 if (!sig_kernel_coredump(sig)) {
1028 * Start a group exit and wake everybody up.
1029 * This way we don't have other threads
1030 * running and doing things after a slower
1031 * thread has the fatal signal pending.
1033 signal->flags = SIGNAL_GROUP_EXIT;
1034 signal->group_exit_code = sig;
1035 signal->group_stop_count = 0;
1038 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 signal_wake_up(t, 1);
1041 } while_each_thread(p, t);
1047 * The signal is already in the shared-pending queue.
1048 * Tell the chosen thread to wake up and dequeue it.
1050 signal_wake_up(t, sig == SIGKILL);
1054 static inline bool legacy_queue(struct sigpending *signals, int sig)
1056 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1059 #ifdef CONFIG_USER_NS
1060 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1062 if (current_user_ns() == task_cred_xxx(t, user_ns))
1065 if (SI_FROMKERNEL(info))
1069 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1070 make_kuid(current_user_ns(), info->si_uid));
1074 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1080 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1081 enum pid_type type, int from_ancestor_ns)
1083 struct sigpending *pending;
1085 int override_rlimit;
1086 int ret = 0, result;
1088 assert_spin_locked(&t->sighand->siglock);
1090 result = TRACE_SIGNAL_IGNORED;
1091 if (!prepare_signal(sig, t,
1092 from_ancestor_ns || (info == SEND_SIG_PRIV)))
1095 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1097 * Short-circuit ignored signals and support queuing
1098 * exactly one non-rt signal, so that we can get more
1099 * detailed information about the cause of the signal.
1101 result = TRACE_SIGNAL_ALREADY_PENDING;
1102 if (legacy_queue(pending, sig))
1105 result = TRACE_SIGNAL_DELIVERED;
1107 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1109 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1113 * Real-time signals must be queued if sent by sigqueue, or
1114 * some other real-time mechanism. It is implementation
1115 * defined whether kill() does so. We attempt to do so, on
1116 * the principle of least surprise, but since kill is not
1117 * allowed to fail with EAGAIN when low on memory we just
1118 * make sure at least one signal gets delivered and don't
1119 * pass on the info struct.
1122 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1124 override_rlimit = 0;
1126 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1128 list_add_tail(&q->list, &pending->list);
1129 switch ((unsigned long) info) {
1130 case (unsigned long) SEND_SIG_NOINFO:
1131 clear_siginfo(&q->info);
1132 q->info.si_signo = sig;
1133 q->info.si_errno = 0;
1134 q->info.si_code = SI_USER;
1135 q->info.si_pid = task_tgid_nr_ns(current,
1136 task_active_pid_ns(t));
1137 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1139 case (unsigned long) SEND_SIG_PRIV:
1140 clear_siginfo(&q->info);
1141 q->info.si_signo = sig;
1142 q->info.si_errno = 0;
1143 q->info.si_code = SI_KERNEL;
1148 copy_siginfo(&q->info, info);
1149 if (from_ancestor_ns)
1154 userns_fixup_signal_uid(&q->info, t);
1156 } else if (!is_si_special(info)) {
1157 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1159 * Queue overflow, abort. We may abort if the
1160 * signal was rt and sent by user using something
1161 * other than kill().
1163 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1168 * This is a silent loss of information. We still
1169 * send the signal, but the *info bits are lost.
1171 result = TRACE_SIGNAL_LOSE_INFO;
1176 signalfd_notify(t, sig);
1177 sigaddset(&pending->signal, sig);
1179 /* Let multiprocess signals appear after on-going forks */
1180 if (type > PIDTYPE_TGID) {
1181 struct multiprocess_signals *delayed;
1182 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1183 sigset_t *signal = &delayed->signal;
1184 /* Can't queue both a stop and a continue signal */
1186 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1187 else if (sig_kernel_stop(sig))
1188 sigdelset(signal, SIGCONT);
1189 sigaddset(signal, sig);
1193 complete_signal(sig, t, type);
1195 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1199 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1202 int from_ancestor_ns = 0;
1204 #ifdef CONFIG_PID_NS
1205 from_ancestor_ns = si_fromuser(info) &&
1206 !task_pid_nr_ns(current, task_active_pid_ns(t));
1209 return __send_signal(sig, info, t, type, from_ancestor_ns);
1212 static void print_fatal_signal(int signr)
1214 struct pt_regs *regs = signal_pt_regs();
1215 pr_info("potentially unexpected fatal signal %d.\n", signr);
1217 #if defined(__i386__) && !defined(__arch_um__)
1218 pr_info("code at %08lx: ", regs->ip);
1221 for (i = 0; i < 16; i++) {
1224 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1226 pr_cont("%02x ", insn);
1236 static int __init setup_print_fatal_signals(char *str)
1238 get_option (&str, &print_fatal_signals);
1243 __setup("print-fatal-signals=", setup_print_fatal_signals);
1246 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1248 return send_signal(sig, info, p, PIDTYPE_TGID);
1251 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1254 unsigned long flags;
1257 if (lock_task_sighand(p, &flags)) {
1258 ret = send_signal(sig, info, p, type);
1259 unlock_task_sighand(p, &flags);
1266 * Force a signal that the process can't ignore: if necessary
1267 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1269 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1270 * since we do not want to have a signal handler that was blocked
1271 * be invoked when user space had explicitly blocked it.
1273 * We don't want to have recursive SIGSEGV's etc, for example,
1274 * that is why we also clear SIGNAL_UNKILLABLE.
1277 force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1279 unsigned long int flags;
1280 int ret, blocked, ignored;
1281 struct k_sigaction *action;
1283 spin_lock_irqsave(&t->sighand->siglock, flags);
1284 action = &t->sighand->action[sig-1];
1285 ignored = action->sa.sa_handler == SIG_IGN;
1286 blocked = sigismember(&t->blocked, sig);
1287 if (blocked || ignored) {
1288 action->sa.sa_handler = SIG_DFL;
1290 sigdelset(&t->blocked, sig);
1291 recalc_sigpending_and_wake(t);
1295 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1296 * debugging to leave init killable.
1298 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1299 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1300 ret = send_signal(sig, info, t, PIDTYPE_PID);
1301 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1307 * Nuke all other threads in the group.
1309 int zap_other_threads(struct task_struct *p)
1311 struct task_struct *t = p;
1314 p->signal->group_stop_count = 0;
1316 while_each_thread(p, t) {
1317 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1320 /* Don't bother with already dead threads */
1323 sigaddset(&t->pending.signal, SIGKILL);
1324 signal_wake_up(t, 1);
1330 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1331 unsigned long *flags)
1333 struct sighand_struct *sighand;
1337 sighand = rcu_dereference(tsk->sighand);
1338 if (unlikely(sighand == NULL))
1342 * This sighand can be already freed and even reused, but
1343 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1344 * initializes ->siglock: this slab can't go away, it has
1345 * the same object type, ->siglock can't be reinitialized.
1347 * We need to ensure that tsk->sighand is still the same
1348 * after we take the lock, we can race with de_thread() or
1349 * __exit_signal(). In the latter case the next iteration
1350 * must see ->sighand == NULL.
1352 spin_lock_irqsave(&sighand->siglock, *flags);
1353 if (likely(sighand == tsk->sighand))
1355 spin_unlock_irqrestore(&sighand->siglock, *flags);
1363 * send signal info to all the members of a group
1365 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1366 struct task_struct *p, enum pid_type type)
1371 ret = check_kill_permission(sig, info, p);
1375 ret = do_send_sig_info(sig, info, p, type);
1381 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1382 * control characters do (^C, ^Z etc)
1383 * - the caller must hold at least a readlock on tasklist_lock
1385 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1387 struct task_struct *p = NULL;
1388 int retval, success;
1392 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1393 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1396 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1397 return success ? 0 : retval;
1400 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1403 struct task_struct *p;
1407 p = pid_task(pid, PIDTYPE_PID);
1409 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1411 if (likely(!p || error != -ESRCH))
1415 * The task was unhashed in between, try again. If it
1416 * is dead, pid_task() will return NULL, if we race with
1417 * de_thread() it will find the new leader.
1422 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1426 error = kill_pid_info(sig, info, find_vpid(pid));
1431 static inline bool kill_as_cred_perm(const struct cred *cred,
1432 struct task_struct *target)
1434 const struct cred *pcred = __task_cred(target);
1436 return uid_eq(cred->euid, pcred->suid) ||
1437 uid_eq(cred->euid, pcred->uid) ||
1438 uid_eq(cred->uid, pcred->suid) ||
1439 uid_eq(cred->uid, pcred->uid);
1442 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1443 int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
1444 const struct cred *cred)
1447 struct task_struct *p;
1448 unsigned long flags;
1450 if (!valid_signal(sig))
1454 p = pid_task(pid, PIDTYPE_PID);
1459 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1463 ret = security_task_kill(p, info, sig, cred);
1468 if (lock_task_sighand(p, &flags)) {
1469 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1470 unlock_task_sighand(p, &flags);
1478 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1481 * kill_something_info() interprets pid in interesting ways just like kill(2).
1483 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1484 * is probably wrong. Should make it like BSD or SYSV.
1487 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1493 ret = kill_pid_info(sig, info, find_vpid(pid));
1498 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1502 read_lock(&tasklist_lock);
1504 ret = __kill_pgrp_info(sig, info,
1505 pid ? find_vpid(-pid) : task_pgrp(current));
1507 int retval = 0, count = 0;
1508 struct task_struct * p;
1510 for_each_process(p) {
1511 if (task_pid_vnr(p) > 1 &&
1512 !same_thread_group(p, current)) {
1513 int err = group_send_sig_info(sig, info, p,
1520 ret = count ? retval : -ESRCH;
1522 read_unlock(&tasklist_lock);
1528 * These are for backward compatibility with the rest of the kernel source.
1531 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1534 * Make sure legacy kernel users don't send in bad values
1535 * (normal paths check this in check_kill_permission).
1537 if (!valid_signal(sig))
1540 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1542 EXPORT_SYMBOL(send_sig_info);
1544 #define __si_special(priv) \
1545 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1548 send_sig(int sig, struct task_struct *p, int priv)
1550 return send_sig_info(sig, __si_special(priv), p);
1552 EXPORT_SYMBOL(send_sig);
1554 void force_sig(int sig, struct task_struct *p)
1556 force_sig_info(sig, SEND_SIG_PRIV, p);
1558 EXPORT_SYMBOL(force_sig);
1561 * When things go south during signal handling, we
1562 * will force a SIGSEGV. And if the signal that caused
1563 * the problem was already a SIGSEGV, we'll want to
1564 * make sure we don't even try to deliver the signal..
1566 void force_sigsegv(int sig, struct task_struct *p)
1568 if (sig == SIGSEGV) {
1569 unsigned long flags;
1570 spin_lock_irqsave(&p->sighand->siglock, flags);
1571 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1572 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1574 force_sig(SIGSEGV, p);
1577 int force_sig_fault(int sig, int code, void __user *addr
1578 ___ARCH_SI_TRAPNO(int trapno)
1579 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1580 , struct task_struct *t)
1582 struct kernel_siginfo info;
1584 clear_siginfo(&info);
1585 info.si_signo = sig;
1587 info.si_code = code;
1588 info.si_addr = addr;
1589 #ifdef __ARCH_SI_TRAPNO
1590 info.si_trapno = trapno;
1594 info.si_flags = flags;
1597 return force_sig_info(info.si_signo, &info, t);
1600 int send_sig_fault(int sig, int code, void __user *addr
1601 ___ARCH_SI_TRAPNO(int trapno)
1602 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1603 , struct task_struct *t)
1605 struct kernel_siginfo info;
1607 clear_siginfo(&info);
1608 info.si_signo = sig;
1610 info.si_code = code;
1611 info.si_addr = addr;
1612 #ifdef __ARCH_SI_TRAPNO
1613 info.si_trapno = trapno;
1617 info.si_flags = flags;
1620 return send_sig_info(info.si_signo, &info, t);
1623 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1625 struct kernel_siginfo info;
1627 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1628 clear_siginfo(&info);
1629 info.si_signo = SIGBUS;
1631 info.si_code = code;
1632 info.si_addr = addr;
1633 info.si_addr_lsb = lsb;
1634 return force_sig_info(info.si_signo, &info, t);
1637 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1639 struct kernel_siginfo info;
1641 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1642 clear_siginfo(&info);
1643 info.si_signo = SIGBUS;
1645 info.si_code = code;
1646 info.si_addr = addr;
1647 info.si_addr_lsb = lsb;
1648 return send_sig_info(info.si_signo, &info, t);
1650 EXPORT_SYMBOL(send_sig_mceerr);
1652 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1654 struct kernel_siginfo info;
1656 clear_siginfo(&info);
1657 info.si_signo = SIGSEGV;
1659 info.si_code = SEGV_BNDERR;
1660 info.si_addr = addr;
1661 info.si_lower = lower;
1662 info.si_upper = upper;
1663 return force_sig_info(info.si_signo, &info, current);
1667 int force_sig_pkuerr(void __user *addr, u32 pkey)
1669 struct kernel_siginfo info;
1671 clear_siginfo(&info);
1672 info.si_signo = SIGSEGV;
1674 info.si_code = SEGV_PKUERR;
1675 info.si_addr = addr;
1676 info.si_pkey = pkey;
1677 return force_sig_info(info.si_signo, &info, current);
1681 /* For the crazy architectures that include trap information in
1682 * the errno field, instead of an actual errno value.
1684 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1686 struct kernel_siginfo info;
1688 clear_siginfo(&info);
1689 info.si_signo = SIGTRAP;
1690 info.si_errno = errno;
1691 info.si_code = TRAP_HWBKPT;
1692 info.si_addr = addr;
1693 return force_sig_info(info.si_signo, &info, current);
1696 int kill_pgrp(struct pid *pid, int sig, int priv)
1700 read_lock(&tasklist_lock);
1701 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1702 read_unlock(&tasklist_lock);
1706 EXPORT_SYMBOL(kill_pgrp);
1708 int kill_pid(struct pid *pid, int sig, int priv)
1710 return kill_pid_info(sig, __si_special(priv), pid);
1712 EXPORT_SYMBOL(kill_pid);
1715 * These functions support sending signals using preallocated sigqueue
1716 * structures. This is needed "because realtime applications cannot
1717 * afford to lose notifications of asynchronous events, like timer
1718 * expirations or I/O completions". In the case of POSIX Timers
1719 * we allocate the sigqueue structure from the timer_create. If this
1720 * allocation fails we are able to report the failure to the application
1721 * with an EAGAIN error.
1723 struct sigqueue *sigqueue_alloc(void)
1725 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1728 q->flags |= SIGQUEUE_PREALLOC;
1733 void sigqueue_free(struct sigqueue *q)
1735 unsigned long flags;
1736 spinlock_t *lock = ¤t->sighand->siglock;
1738 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1740 * We must hold ->siglock while testing q->list
1741 * to serialize with collect_signal() or with
1742 * __exit_signal()->flush_sigqueue().
1744 spin_lock_irqsave(lock, flags);
1745 q->flags &= ~SIGQUEUE_PREALLOC;
1747 * If it is queued it will be freed when dequeued,
1748 * like the "regular" sigqueue.
1750 if (!list_empty(&q->list))
1752 spin_unlock_irqrestore(lock, flags);
1758 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1760 int sig = q->info.si_signo;
1761 struct sigpending *pending;
1762 struct task_struct *t;
1763 unsigned long flags;
1766 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1770 t = pid_task(pid, type);
1771 if (!t || !likely(lock_task_sighand(t, &flags)))
1774 ret = 1; /* the signal is ignored */
1775 result = TRACE_SIGNAL_IGNORED;
1776 if (!prepare_signal(sig, t, false))
1780 if (unlikely(!list_empty(&q->list))) {
1782 * If an SI_TIMER entry is already queue just increment
1783 * the overrun count.
1785 BUG_ON(q->info.si_code != SI_TIMER);
1786 q->info.si_overrun++;
1787 result = TRACE_SIGNAL_ALREADY_PENDING;
1790 q->info.si_overrun = 0;
1792 signalfd_notify(t, sig);
1793 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1794 list_add_tail(&q->list, &pending->list);
1795 sigaddset(&pending->signal, sig);
1796 complete_signal(sig, t, type);
1797 result = TRACE_SIGNAL_DELIVERED;
1799 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1800 unlock_task_sighand(t, &flags);
1806 static void do_notify_pidfd(struct task_struct *task)
1810 pid = task_pid(task);
1811 wake_up_all(&pid->wait_pidfd);
1815 * Let a parent know about the death of a child.
1816 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1818 * Returns true if our parent ignored us and so we've switched to
1821 bool do_notify_parent(struct task_struct *tsk, int sig)
1823 struct kernel_siginfo info;
1824 unsigned long flags;
1825 struct sighand_struct *psig;
1826 bool autoreap = false;
1831 /* do_notify_parent_cldstop should have been called instead. */
1832 BUG_ON(task_is_stopped_or_traced(tsk));
1834 BUG_ON(!tsk->ptrace &&
1835 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1837 /* Wake up all pidfd waiters */
1838 do_notify_pidfd(tsk);
1840 if (sig != SIGCHLD) {
1842 * This is only possible if parent == real_parent.
1843 * Check if it has changed security domain.
1845 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1849 clear_siginfo(&info);
1850 info.si_signo = sig;
1853 * We are under tasklist_lock here so our parent is tied to
1854 * us and cannot change.
1856 * task_active_pid_ns will always return the same pid namespace
1857 * until a task passes through release_task.
1859 * write_lock() currently calls preempt_disable() which is the
1860 * same as rcu_read_lock(), but according to Oleg, this is not
1861 * correct to rely on this
1864 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1865 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1869 task_cputime(tsk, &utime, &stime);
1870 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1871 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1873 info.si_status = tsk->exit_code & 0x7f;
1874 if (tsk->exit_code & 0x80)
1875 info.si_code = CLD_DUMPED;
1876 else if (tsk->exit_code & 0x7f)
1877 info.si_code = CLD_KILLED;
1879 info.si_code = CLD_EXITED;
1880 info.si_status = tsk->exit_code >> 8;
1883 psig = tsk->parent->sighand;
1884 spin_lock_irqsave(&psig->siglock, flags);
1885 if (!tsk->ptrace && sig == SIGCHLD &&
1886 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1887 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1889 * We are exiting and our parent doesn't care. POSIX.1
1890 * defines special semantics for setting SIGCHLD to SIG_IGN
1891 * or setting the SA_NOCLDWAIT flag: we should be reaped
1892 * automatically and not left for our parent's wait4 call.
1893 * Rather than having the parent do it as a magic kind of
1894 * signal handler, we just set this to tell do_exit that we
1895 * can be cleaned up without becoming a zombie. Note that
1896 * we still call __wake_up_parent in this case, because a
1897 * blocked sys_wait4 might now return -ECHILD.
1899 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1900 * is implementation-defined: we do (if you don't want
1901 * it, just use SIG_IGN instead).
1904 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1907 if (valid_signal(sig) && sig)
1908 __group_send_sig_info(sig, &info, tsk->parent);
1909 __wake_up_parent(tsk, tsk->parent);
1910 spin_unlock_irqrestore(&psig->siglock, flags);
1916 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1917 * @tsk: task reporting the state change
1918 * @for_ptracer: the notification is for ptracer
1919 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1921 * Notify @tsk's parent that the stopped/continued state has changed. If
1922 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1923 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1926 * Must be called with tasklist_lock at least read locked.
1928 static void do_notify_parent_cldstop(struct task_struct *tsk,
1929 bool for_ptracer, int why)
1931 struct kernel_siginfo info;
1932 unsigned long flags;
1933 struct task_struct *parent;
1934 struct sighand_struct *sighand;
1938 parent = tsk->parent;
1940 tsk = tsk->group_leader;
1941 parent = tsk->real_parent;
1944 clear_siginfo(&info);
1945 info.si_signo = SIGCHLD;
1948 * see comment in do_notify_parent() about the following 4 lines
1951 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1952 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1955 task_cputime(tsk, &utime, &stime);
1956 info.si_utime = nsec_to_clock_t(utime);
1957 info.si_stime = nsec_to_clock_t(stime);
1962 info.si_status = SIGCONT;
1965 info.si_status = tsk->signal->group_exit_code & 0x7f;
1968 info.si_status = tsk->exit_code & 0x7f;
1974 sighand = parent->sighand;
1975 spin_lock_irqsave(&sighand->siglock, flags);
1976 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1977 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1978 __group_send_sig_info(SIGCHLD, &info, parent);
1980 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1982 __wake_up_parent(tsk, parent);
1983 spin_unlock_irqrestore(&sighand->siglock, flags);
1986 static inline bool may_ptrace_stop(void)
1988 if (!likely(current->ptrace))
1991 * Are we in the middle of do_coredump?
1992 * If so and our tracer is also part of the coredump stopping
1993 * is a deadlock situation, and pointless because our tracer
1994 * is dead so don't allow us to stop.
1995 * If SIGKILL was already sent before the caller unlocked
1996 * ->siglock we must see ->core_state != NULL. Otherwise it
1997 * is safe to enter schedule().
1999 * This is almost outdated, a task with the pending SIGKILL can't
2000 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2001 * after SIGKILL was already dequeued.
2003 if (unlikely(current->mm->core_state) &&
2004 unlikely(current->mm == current->parent->mm))
2011 * Return non-zero if there is a SIGKILL that should be waking us up.
2012 * Called with the siglock held.
2014 static bool sigkill_pending(struct task_struct *tsk)
2016 return sigismember(&tsk->pending.signal, SIGKILL) ||
2017 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2021 * This must be called with current->sighand->siglock held.
2023 * This should be the path for all ptrace stops.
2024 * We always set current->last_siginfo while stopped here.
2025 * That makes it a way to test a stopped process for
2026 * being ptrace-stopped vs being job-control-stopped.
2028 * If we actually decide not to stop at all because the tracer
2029 * is gone, we keep current->exit_code unless clear_code.
2031 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2032 __releases(¤t->sighand->siglock)
2033 __acquires(¤t->sighand->siglock)
2035 bool gstop_done = false;
2037 if (arch_ptrace_stop_needed(exit_code, info)) {
2039 * The arch code has something special to do before a
2040 * ptrace stop. This is allowed to block, e.g. for faults
2041 * on user stack pages. We can't keep the siglock while
2042 * calling arch_ptrace_stop, so we must release it now.
2043 * To preserve proper semantics, we must do this before
2044 * any signal bookkeeping like checking group_stop_count.
2045 * Meanwhile, a SIGKILL could come in before we retake the
2046 * siglock. That must prevent us from sleeping in TASK_TRACED.
2047 * So after regaining the lock, we must check for SIGKILL.
2049 spin_unlock_irq(¤t->sighand->siglock);
2050 arch_ptrace_stop(exit_code, info);
2051 spin_lock_irq(¤t->sighand->siglock);
2052 if (sigkill_pending(current))
2056 set_special_state(TASK_TRACED);
2059 * We're committing to trapping. TRACED should be visible before
2060 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2061 * Also, transition to TRACED and updates to ->jobctl should be
2062 * atomic with respect to siglock and should be done after the arch
2063 * hook as siglock is released and regrabbed across it.
2068 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2070 * set_current_state() smp_wmb();
2072 * wait_task_stopped()
2073 * task_stopped_code()
2074 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2078 current->last_siginfo = info;
2079 current->exit_code = exit_code;
2082 * If @why is CLD_STOPPED, we're trapping to participate in a group
2083 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2084 * across siglock relocks since INTERRUPT was scheduled, PENDING
2085 * could be clear now. We act as if SIGCONT is received after
2086 * TASK_TRACED is entered - ignore it.
2088 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2089 gstop_done = task_participate_group_stop(current);
2091 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2092 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2093 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2094 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2096 /* entering a trap, clear TRAPPING */
2097 task_clear_jobctl_trapping(current);
2099 spin_unlock_irq(¤t->sighand->siglock);
2100 read_lock(&tasklist_lock);
2101 if (may_ptrace_stop()) {
2103 * Notify parents of the stop.
2105 * While ptraced, there are two parents - the ptracer and
2106 * the real_parent of the group_leader. The ptracer should
2107 * know about every stop while the real parent is only
2108 * interested in the completion of group stop. The states
2109 * for the two don't interact with each other. Notify
2110 * separately unless they're gonna be duplicates.
2112 do_notify_parent_cldstop(current, true, why);
2113 if (gstop_done && ptrace_reparented(current))
2114 do_notify_parent_cldstop(current, false, why);
2117 * Don't want to allow preemption here, because
2118 * sys_ptrace() needs this task to be inactive.
2120 * XXX: implement read_unlock_no_resched().
2123 read_unlock(&tasklist_lock);
2124 preempt_enable_no_resched();
2125 cgroup_enter_frozen();
2126 freezable_schedule();
2127 cgroup_leave_frozen(true);
2130 * By the time we got the lock, our tracer went away.
2131 * Don't drop the lock yet, another tracer may come.
2133 * If @gstop_done, the ptracer went away between group stop
2134 * completion and here. During detach, it would have set
2135 * JOBCTL_STOP_PENDING on us and we'll re-enter
2136 * TASK_STOPPED in do_signal_stop() on return, so notifying
2137 * the real parent of the group stop completion is enough.
2140 do_notify_parent_cldstop(current, false, why);
2142 /* tasklist protects us from ptrace_freeze_traced() */
2143 __set_current_state(TASK_RUNNING);
2145 current->exit_code = 0;
2146 read_unlock(&tasklist_lock);
2150 * We are back. Now reacquire the siglock before touching
2151 * last_siginfo, so that we are sure to have synchronized with
2152 * any signal-sending on another CPU that wants to examine it.
2154 spin_lock_irq(¤t->sighand->siglock);
2155 current->last_siginfo = NULL;
2157 /* LISTENING can be set only during STOP traps, clear it */
2158 current->jobctl &= ~JOBCTL_LISTENING;
2161 * Queued signals ignored us while we were stopped for tracing.
2162 * So check for any that we should take before resuming user mode.
2163 * This sets TIF_SIGPENDING, but never clears it.
2165 recalc_sigpending_tsk(current);
2168 static void ptrace_do_notify(int signr, int exit_code, int why)
2170 kernel_siginfo_t info;
2172 clear_siginfo(&info);
2173 info.si_signo = signr;
2174 info.si_code = exit_code;
2175 info.si_pid = task_pid_vnr(current);
2176 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2178 /* Let the debugger run. */
2179 ptrace_stop(exit_code, why, 1, &info);
2182 void ptrace_notify(int exit_code)
2184 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2185 if (unlikely(current->task_works))
2188 spin_lock_irq(¤t->sighand->siglock);
2189 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2190 spin_unlock_irq(¤t->sighand->siglock);
2194 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2195 * @signr: signr causing group stop if initiating
2197 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2198 * and participate in it. If already set, participate in the existing
2199 * group stop. If participated in a group stop (and thus slept), %true is
2200 * returned with siglock released.
2202 * If ptraced, this function doesn't handle stop itself. Instead,
2203 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2204 * untouched. The caller must ensure that INTERRUPT trap handling takes
2205 * places afterwards.
2208 * Must be called with @current->sighand->siglock held, which is released
2212 * %false if group stop is already cancelled or ptrace trap is scheduled.
2213 * %true if participated in group stop.
2215 static bool do_signal_stop(int signr)
2216 __releases(¤t->sighand->siglock)
2218 struct signal_struct *sig = current->signal;
2220 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2221 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2222 struct task_struct *t;
2224 /* signr will be recorded in task->jobctl for retries */
2225 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2227 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2228 unlikely(signal_group_exit(sig)))
2231 * There is no group stop already in progress. We must
2234 * While ptraced, a task may be resumed while group stop is
2235 * still in effect and then receive a stop signal and
2236 * initiate another group stop. This deviates from the
2237 * usual behavior as two consecutive stop signals can't
2238 * cause two group stops when !ptraced. That is why we
2239 * also check !task_is_stopped(t) below.
2241 * The condition can be distinguished by testing whether
2242 * SIGNAL_STOP_STOPPED is already set. Don't generate
2243 * group_exit_code in such case.
2245 * This is not necessary for SIGNAL_STOP_CONTINUED because
2246 * an intervening stop signal is required to cause two
2247 * continued events regardless of ptrace.
2249 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2250 sig->group_exit_code = signr;
2252 sig->group_stop_count = 0;
2254 if (task_set_jobctl_pending(current, signr | gstop))
2255 sig->group_stop_count++;
2258 while_each_thread(current, t) {
2260 * Setting state to TASK_STOPPED for a group
2261 * stop is always done with the siglock held,
2262 * so this check has no races.
2264 if (!task_is_stopped(t) &&
2265 task_set_jobctl_pending(t, signr | gstop)) {
2266 sig->group_stop_count++;
2267 if (likely(!(t->ptrace & PT_SEIZED)))
2268 signal_wake_up(t, 0);
2270 ptrace_trap_notify(t);
2275 if (likely(!current->ptrace)) {
2279 * If there are no other threads in the group, or if there
2280 * is a group stop in progress and we are the last to stop,
2281 * report to the parent.
2283 if (task_participate_group_stop(current))
2284 notify = CLD_STOPPED;
2286 set_special_state(TASK_STOPPED);
2287 spin_unlock_irq(¤t->sighand->siglock);
2290 * Notify the parent of the group stop completion. Because
2291 * we're not holding either the siglock or tasklist_lock
2292 * here, ptracer may attach inbetween; however, this is for
2293 * group stop and should always be delivered to the real
2294 * parent of the group leader. The new ptracer will get
2295 * its notification when this task transitions into
2299 read_lock(&tasklist_lock);
2300 do_notify_parent_cldstop(current, false, notify);
2301 read_unlock(&tasklist_lock);
2304 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2305 cgroup_enter_frozen();
2306 freezable_schedule();
2310 * While ptraced, group stop is handled by STOP trap.
2311 * Schedule it and let the caller deal with it.
2313 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2319 * do_jobctl_trap - take care of ptrace jobctl traps
2321 * When PT_SEIZED, it's used for both group stop and explicit
2322 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2323 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2324 * the stop signal; otherwise, %SIGTRAP.
2326 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2327 * number as exit_code and no siginfo.
2330 * Must be called with @current->sighand->siglock held, which may be
2331 * released and re-acquired before returning with intervening sleep.
2333 static void do_jobctl_trap(void)
2335 struct signal_struct *signal = current->signal;
2336 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2338 if (current->ptrace & PT_SEIZED) {
2339 if (!signal->group_stop_count &&
2340 !(signal->flags & SIGNAL_STOP_STOPPED))
2342 WARN_ON_ONCE(!signr);
2343 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2346 WARN_ON_ONCE(!signr);
2347 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2348 current->exit_code = 0;
2353 * do_freezer_trap - handle the freezer jobctl trap
2355 * Puts the task into frozen state, if only the task is not about to quit.
2356 * In this case it drops JOBCTL_TRAP_FREEZE.
2359 * Must be called with @current->sighand->siglock held,
2360 * which is always released before returning.
2362 static void do_freezer_trap(void)
2363 __releases(¤t->sighand->siglock)
2366 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2367 * let's make another loop to give it a chance to be handled.
2368 * In any case, we'll return back.
2370 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2371 JOBCTL_TRAP_FREEZE) {
2372 spin_unlock_irq(¤t->sighand->siglock);
2377 * Now we're sure that there is no pending fatal signal and no
2378 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2379 * immediately (if there is a non-fatal signal pending), and
2380 * put the task into sleep.
2382 __set_current_state(TASK_INTERRUPTIBLE);
2383 clear_thread_flag(TIF_SIGPENDING);
2384 spin_unlock_irq(¤t->sighand->siglock);
2385 cgroup_enter_frozen();
2386 freezable_schedule();
2389 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2392 * We do not check sig_kernel_stop(signr) but set this marker
2393 * unconditionally because we do not know whether debugger will
2394 * change signr. This flag has no meaning unless we are going
2395 * to stop after return from ptrace_stop(). In this case it will
2396 * be checked in do_signal_stop(), we should only stop if it was
2397 * not cleared by SIGCONT while we were sleeping. See also the
2398 * comment in dequeue_signal().
2400 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2401 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2403 /* We're back. Did the debugger cancel the sig? */
2404 signr = current->exit_code;
2408 current->exit_code = 0;
2411 * Update the siginfo structure if the signal has
2412 * changed. If the debugger wanted something
2413 * specific in the siginfo structure then it should
2414 * have updated *info via PTRACE_SETSIGINFO.
2416 if (signr != info->si_signo) {
2417 clear_siginfo(info);
2418 info->si_signo = signr;
2420 info->si_code = SI_USER;
2422 info->si_pid = task_pid_vnr(current->parent);
2423 info->si_uid = from_kuid_munged(current_user_ns(),
2424 task_uid(current->parent));
2428 /* If the (new) signal is now blocked, requeue it. */
2429 if (sigismember(¤t->blocked, signr)) {
2430 send_signal(signr, info, current, PIDTYPE_PID);
2437 bool get_signal(struct ksignal *ksig)
2439 struct sighand_struct *sighand = current->sighand;
2440 struct signal_struct *signal = current->signal;
2443 if (unlikely(current->task_works))
2446 if (unlikely(uprobe_deny_signal()))
2450 * Do this once, we can't return to user-mode if freezing() == T.
2451 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2452 * thus do not need another check after return.
2457 spin_lock_irq(&sighand->siglock);
2459 * Every stopped thread goes here after wakeup. Check to see if
2460 * we should notify the parent, prepare_signal(SIGCONT) encodes
2461 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2463 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2466 if (signal->flags & SIGNAL_CLD_CONTINUED)
2467 why = CLD_CONTINUED;
2471 signal->flags &= ~SIGNAL_CLD_MASK;
2473 spin_unlock_irq(&sighand->siglock);
2476 * Notify the parent that we're continuing. This event is
2477 * always per-process and doesn't make whole lot of sense
2478 * for ptracers, who shouldn't consume the state via
2479 * wait(2) either, but, for backward compatibility, notify
2480 * the ptracer of the group leader too unless it's gonna be
2483 read_lock(&tasklist_lock);
2484 do_notify_parent_cldstop(current, false, why);
2486 if (ptrace_reparented(current->group_leader))
2487 do_notify_parent_cldstop(current->group_leader,
2489 read_unlock(&tasklist_lock);
2494 /* Has this task already been marked for death? */
2495 if (signal_group_exit(signal)) {
2496 ksig->info.si_signo = signr = SIGKILL;
2497 sigdelset(¤t->pending.signal, SIGKILL);
2498 recalc_sigpending();
2503 struct k_sigaction *ka;
2505 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2509 if (unlikely(current->jobctl &
2510 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2511 if (current->jobctl & JOBCTL_TRAP_MASK) {
2513 spin_unlock_irq(&sighand->siglock);
2514 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2521 * If the task is leaving the frozen state, let's update
2522 * cgroup counters and reset the frozen bit.
2524 if (unlikely(cgroup_task_frozen(current))) {
2525 spin_unlock_irq(&sighand->siglock);
2526 cgroup_leave_frozen(false);
2531 * Signals generated by the execution of an instruction
2532 * need to be delivered before any other pending signals
2533 * so that the instruction pointer in the signal stack
2534 * frame points to the faulting instruction.
2536 signr = dequeue_synchronous_signal(&ksig->info);
2538 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2541 break; /* will return 0 */
2543 if (unlikely(current->ptrace) && signr != SIGKILL) {
2544 signr = ptrace_signal(signr, &ksig->info);
2549 ka = &sighand->action[signr-1];
2551 /* Trace actually delivered signals. */
2552 trace_signal_deliver(signr, &ksig->info, ka);
2554 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2556 if (ka->sa.sa_handler != SIG_DFL) {
2557 /* Run the handler. */
2560 if (ka->sa.sa_flags & SA_ONESHOT)
2561 ka->sa.sa_handler = SIG_DFL;
2563 break; /* will return non-zero "signr" value */
2567 * Now we are doing the default action for this signal.
2569 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2573 * Global init gets no signals it doesn't want.
2574 * Container-init gets no signals it doesn't want from same
2577 * Note that if global/container-init sees a sig_kernel_only()
2578 * signal here, the signal must have been generated internally
2579 * or must have come from an ancestor namespace. In either
2580 * case, the signal cannot be dropped.
2582 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2583 !sig_kernel_only(signr))
2586 if (sig_kernel_stop(signr)) {
2588 * The default action is to stop all threads in
2589 * the thread group. The job control signals
2590 * do nothing in an orphaned pgrp, but SIGSTOP
2591 * always works. Note that siglock needs to be
2592 * dropped during the call to is_orphaned_pgrp()
2593 * because of lock ordering with tasklist_lock.
2594 * This allows an intervening SIGCONT to be posted.
2595 * We need to check for that and bail out if necessary.
2597 if (signr != SIGSTOP) {
2598 spin_unlock_irq(&sighand->siglock);
2600 /* signals can be posted during this window */
2602 if (is_current_pgrp_orphaned())
2605 spin_lock_irq(&sighand->siglock);
2608 if (likely(do_signal_stop(ksig->info.si_signo))) {
2609 /* It released the siglock. */
2614 * We didn't actually stop, due to a race
2615 * with SIGCONT or something like that.
2621 spin_unlock_irq(&sighand->siglock);
2622 if (unlikely(cgroup_task_frozen(current)))
2623 cgroup_leave_frozen(true);
2626 * Anything else is fatal, maybe with a core dump.
2628 current->flags |= PF_SIGNALED;
2630 if (sig_kernel_coredump(signr)) {
2631 if (print_fatal_signals)
2632 print_fatal_signal(ksig->info.si_signo);
2633 proc_coredump_connector(current);
2635 * If it was able to dump core, this kills all
2636 * other threads in the group and synchronizes with
2637 * their demise. If we lost the race with another
2638 * thread getting here, it set group_exit_code
2639 * first and our do_group_exit call below will use
2640 * that value and ignore the one we pass it.
2642 do_coredump(&ksig->info);
2646 * Death signals, no core dump.
2648 do_group_exit(ksig->info.si_signo);
2651 spin_unlock_irq(&sighand->siglock);
2654 return ksig->sig > 0;
2658 * signal_delivered -
2659 * @ksig: kernel signal struct
2660 * @stepping: nonzero if debugger single-step or block-step in use
2662 * This function should be called when a signal has successfully been
2663 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2664 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2665 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2667 static void signal_delivered(struct ksignal *ksig, int stepping)
2671 /* A signal was successfully delivered, and the
2672 saved sigmask was stored on the signal frame,
2673 and will be restored by sigreturn. So we can
2674 simply clear the restore sigmask flag. */
2675 clear_restore_sigmask();
2677 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2678 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2679 sigaddset(&blocked, ksig->sig);
2680 set_current_blocked(&blocked);
2681 tracehook_signal_handler(stepping);
2684 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2687 force_sigsegv(ksig->sig, current);
2689 signal_delivered(ksig, stepping);
2693 * It could be that complete_signal() picked us to notify about the
2694 * group-wide signal. Other threads should be notified now to take
2695 * the shared signals in @which since we will not.
2697 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2700 struct task_struct *t;
2702 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2703 if (sigisemptyset(&retarget))
2707 while_each_thread(tsk, t) {
2708 if (t->flags & PF_EXITING)
2711 if (!has_pending_signals(&retarget, &t->blocked))
2713 /* Remove the signals this thread can handle. */
2714 sigandsets(&retarget, &retarget, &t->blocked);
2716 if (!signal_pending(t))
2717 signal_wake_up(t, 0);
2719 if (sigisemptyset(&retarget))
2724 void exit_signals(struct task_struct *tsk)
2730 * @tsk is about to have PF_EXITING set - lock out users which
2731 * expect stable threadgroup.
2733 cgroup_threadgroup_change_begin(tsk);
2735 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2736 tsk->flags |= PF_EXITING;
2737 cgroup_threadgroup_change_end(tsk);
2741 spin_lock_irq(&tsk->sighand->siglock);
2743 * From now this task is not visible for group-wide signals,
2744 * see wants_signal(), do_signal_stop().
2746 tsk->flags |= PF_EXITING;
2748 cgroup_threadgroup_change_end(tsk);
2750 if (!signal_pending(tsk))
2753 unblocked = tsk->blocked;
2754 signotset(&unblocked);
2755 retarget_shared_pending(tsk, &unblocked);
2757 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2758 task_participate_group_stop(tsk))
2759 group_stop = CLD_STOPPED;
2761 spin_unlock_irq(&tsk->sighand->siglock);
2764 * If group stop has completed, deliver the notification. This
2765 * should always go to the real parent of the group leader.
2767 if (unlikely(group_stop)) {
2768 read_lock(&tasklist_lock);
2769 do_notify_parent_cldstop(tsk, false, group_stop);
2770 read_unlock(&tasklist_lock);
2775 * System call entry points.
2779 * sys_restart_syscall - restart a system call
2781 SYSCALL_DEFINE0(restart_syscall)
2783 struct restart_block *restart = ¤t->restart_block;
2784 return restart->fn(restart);
2787 long do_no_restart_syscall(struct restart_block *param)
2792 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2794 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2795 sigset_t newblocked;
2796 /* A set of now blocked but previously unblocked signals. */
2797 sigandnsets(&newblocked, newset, ¤t->blocked);
2798 retarget_shared_pending(tsk, &newblocked);
2800 tsk->blocked = *newset;
2801 recalc_sigpending();
2805 * set_current_blocked - change current->blocked mask
2808 * It is wrong to change ->blocked directly, this helper should be used
2809 * to ensure the process can't miss a shared signal we are going to block.
2811 void set_current_blocked(sigset_t *newset)
2813 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2814 __set_current_blocked(newset);
2817 void __set_current_blocked(const sigset_t *newset)
2819 struct task_struct *tsk = current;
2822 * In case the signal mask hasn't changed, there is nothing we need
2823 * to do. The current->blocked shouldn't be modified by other task.
2825 if (sigequalsets(&tsk->blocked, newset))
2828 spin_lock_irq(&tsk->sighand->siglock);
2829 __set_task_blocked(tsk, newset);
2830 spin_unlock_irq(&tsk->sighand->siglock);
2834 * This is also useful for kernel threads that want to temporarily
2835 * (or permanently) block certain signals.
2837 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2838 * interface happily blocks "unblockable" signals like SIGKILL
2841 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2843 struct task_struct *tsk = current;
2846 /* Lockless, only current can change ->blocked, never from irq */
2848 *oldset = tsk->blocked;
2852 sigorsets(&newset, &tsk->blocked, set);
2855 sigandnsets(&newset, &tsk->blocked, set);
2864 __set_current_blocked(&newset);
2867 EXPORT_SYMBOL(sigprocmask);
2870 * The api helps set app-provided sigmasks.
2872 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2873 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2875 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2876 sigset_t *oldset, size_t sigsetsize)
2881 if (sigsetsize != sizeof(sigset_t))
2883 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2886 *oldset = current->blocked;
2887 set_current_blocked(set);
2891 EXPORT_SYMBOL(set_user_sigmask);
2893 #ifdef CONFIG_COMPAT
2894 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2895 sigset_t *set, sigset_t *oldset,
2901 if (sigsetsize != sizeof(compat_sigset_t))
2903 if (get_compat_sigset(set, usigmask))
2906 *oldset = current->blocked;
2907 set_current_blocked(set);
2911 EXPORT_SYMBOL(set_compat_user_sigmask);
2915 * restore_user_sigmask:
2916 * usigmask: sigmask passed in from userland.
2917 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2920 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2921 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2923 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2929 * When signals are pending, do not restore them here.
2930 * Restoring sigmask here can lead to delivering signals that the above
2931 * syscalls are intended to block because of the sigmask passed in.
2933 if (signal_pending(current)) {
2934 current->saved_sigmask = *sigsaved;
2935 set_restore_sigmask();
2940 * This is needed because the fast syscall return path does not restore
2941 * saved_sigmask when signals are not pending.
2943 set_current_blocked(sigsaved);
2945 EXPORT_SYMBOL(restore_user_sigmask);
2948 * sys_rt_sigprocmask - change the list of currently blocked signals
2949 * @how: whether to add, remove, or set signals
2950 * @nset: stores pending signals
2951 * @oset: previous value of signal mask if non-null
2952 * @sigsetsize: size of sigset_t type
2954 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2955 sigset_t __user *, oset, size_t, sigsetsize)
2957 sigset_t old_set, new_set;
2960 /* XXX: Don't preclude handling different sized sigset_t's. */
2961 if (sigsetsize != sizeof(sigset_t))
2964 old_set = current->blocked;
2967 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2969 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2971 error = sigprocmask(how, &new_set, NULL);
2977 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2984 #ifdef CONFIG_COMPAT
2985 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2986 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2988 sigset_t old_set = current->blocked;
2990 /* XXX: Don't preclude handling different sized sigset_t's. */
2991 if (sigsetsize != sizeof(sigset_t))
2997 if (get_compat_sigset(&new_set, nset))
2999 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3001 error = sigprocmask(how, &new_set, NULL);
3005 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3009 static void do_sigpending(sigset_t *set)
3011 spin_lock_irq(¤t->sighand->siglock);
3012 sigorsets(set, ¤t->pending.signal,
3013 ¤t->signal->shared_pending.signal);
3014 spin_unlock_irq(¤t->sighand->siglock);
3016 /* Outside the lock because only this thread touches it. */
3017 sigandsets(set, ¤t->blocked, set);
3021 * sys_rt_sigpending - examine a pending signal that has been raised
3023 * @uset: stores pending signals
3024 * @sigsetsize: size of sigset_t type or larger
3026 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3030 if (sigsetsize > sizeof(*uset))
3033 do_sigpending(&set);
3035 if (copy_to_user(uset, &set, sigsetsize))
3041 #ifdef CONFIG_COMPAT
3042 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3043 compat_size_t, sigsetsize)
3047 if (sigsetsize > sizeof(*uset))
3050 do_sigpending(&set);
3052 return put_compat_sigset(uset, &set, sigsetsize);
3056 static const struct {
3057 unsigned char limit, layout;
3059 [SIGILL] = { NSIGILL, SIL_FAULT },
3060 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3061 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3062 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3063 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3065 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3067 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3068 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3069 [SIGSYS] = { NSIGSYS, SIL_SYS },
3072 static bool known_siginfo_layout(unsigned sig, int si_code)
3074 if (si_code == SI_KERNEL)
3076 else if ((si_code > SI_USER)) {
3077 if (sig_specific_sicodes(sig)) {
3078 if (si_code <= sig_sicodes[sig].limit)
3081 else if (si_code <= NSIGPOLL)
3084 else if (si_code >= SI_DETHREAD)
3086 else if (si_code == SI_ASYNCNL)
3091 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3093 enum siginfo_layout layout = SIL_KILL;
3094 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3095 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3096 (si_code <= sig_sicodes[sig].limit)) {
3097 layout = sig_sicodes[sig].layout;
3098 /* Handle the exceptions */
3099 if ((sig == SIGBUS) &&
3100 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3101 layout = SIL_FAULT_MCEERR;
3102 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3103 layout = SIL_FAULT_BNDERR;
3105 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3106 layout = SIL_FAULT_PKUERR;
3109 else if (si_code <= NSIGPOLL)
3112 if (si_code == SI_TIMER)
3114 else if (si_code == SI_SIGIO)
3116 else if (si_code < 0)
3122 static inline char __user *si_expansion(const siginfo_t __user *info)
3124 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3127 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3129 char __user *expansion = si_expansion(to);
3130 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3132 if (clear_user(expansion, SI_EXPANSION_SIZE))
3137 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3138 const siginfo_t __user *from)
3140 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3141 char __user *expansion = si_expansion(from);
3142 char buf[SI_EXPANSION_SIZE];
3145 * An unknown si_code might need more than
3146 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3147 * extra bytes are 0. This guarantees copy_siginfo_to_user
3148 * will return this data to userspace exactly.
3150 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3152 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3160 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3161 const siginfo_t __user *from)
3163 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3165 to->si_signo = signo;
3166 return post_copy_siginfo_from_user(to, from);
3169 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3171 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3173 return post_copy_siginfo_from_user(to, from);
3176 #ifdef CONFIG_COMPAT
3177 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3178 const struct kernel_siginfo *from)
3179 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3181 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3183 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3184 const struct kernel_siginfo *from, bool x32_ABI)
3187 struct compat_siginfo new;
3188 memset(&new, 0, sizeof(new));
3190 new.si_signo = from->si_signo;
3191 new.si_errno = from->si_errno;
3192 new.si_code = from->si_code;
3193 switch(siginfo_layout(from->si_signo, from->si_code)) {
3195 new.si_pid = from->si_pid;
3196 new.si_uid = from->si_uid;
3199 new.si_tid = from->si_tid;
3200 new.si_overrun = from->si_overrun;
3201 new.si_int = from->si_int;
3204 new.si_band = from->si_band;
3205 new.si_fd = from->si_fd;
3208 new.si_addr = ptr_to_compat(from->si_addr);
3209 #ifdef __ARCH_SI_TRAPNO
3210 new.si_trapno = from->si_trapno;
3213 case SIL_FAULT_MCEERR:
3214 new.si_addr = ptr_to_compat(from->si_addr);
3215 #ifdef __ARCH_SI_TRAPNO
3216 new.si_trapno = from->si_trapno;
3218 new.si_addr_lsb = from->si_addr_lsb;
3220 case SIL_FAULT_BNDERR:
3221 new.si_addr = ptr_to_compat(from->si_addr);
3222 #ifdef __ARCH_SI_TRAPNO
3223 new.si_trapno = from->si_trapno;
3225 new.si_lower = ptr_to_compat(from->si_lower);
3226 new.si_upper = ptr_to_compat(from->si_upper);
3228 case SIL_FAULT_PKUERR:
3229 new.si_addr = ptr_to_compat(from->si_addr);
3230 #ifdef __ARCH_SI_TRAPNO
3231 new.si_trapno = from->si_trapno;
3233 new.si_pkey = from->si_pkey;
3236 new.si_pid = from->si_pid;
3237 new.si_uid = from->si_uid;
3238 new.si_status = from->si_status;
3239 #ifdef CONFIG_X86_X32_ABI
3241 new._sifields._sigchld_x32._utime = from->si_utime;
3242 new._sifields._sigchld_x32._stime = from->si_stime;
3246 new.si_utime = from->si_utime;
3247 new.si_stime = from->si_stime;
3251 new.si_pid = from->si_pid;
3252 new.si_uid = from->si_uid;
3253 new.si_int = from->si_int;
3256 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3257 new.si_syscall = from->si_syscall;
3258 new.si_arch = from->si_arch;
3262 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3268 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3269 const struct compat_siginfo *from)
3272 to->si_signo = from->si_signo;
3273 to->si_errno = from->si_errno;
3274 to->si_code = from->si_code;
3275 switch(siginfo_layout(from->si_signo, from->si_code)) {
3277 to->si_pid = from->si_pid;
3278 to->si_uid = from->si_uid;
3281 to->si_tid = from->si_tid;
3282 to->si_overrun = from->si_overrun;
3283 to->si_int = from->si_int;
3286 to->si_band = from->si_band;
3287 to->si_fd = from->si_fd;
3290 to->si_addr = compat_ptr(from->si_addr);
3291 #ifdef __ARCH_SI_TRAPNO
3292 to->si_trapno = from->si_trapno;
3295 case SIL_FAULT_MCEERR:
3296 to->si_addr = compat_ptr(from->si_addr);
3297 #ifdef __ARCH_SI_TRAPNO
3298 to->si_trapno = from->si_trapno;
3300 to->si_addr_lsb = from->si_addr_lsb;
3302 case SIL_FAULT_BNDERR:
3303 to->si_addr = compat_ptr(from->si_addr);
3304 #ifdef __ARCH_SI_TRAPNO
3305 to->si_trapno = from->si_trapno;
3307 to->si_lower = compat_ptr(from->si_lower);
3308 to->si_upper = compat_ptr(from->si_upper);
3310 case SIL_FAULT_PKUERR:
3311 to->si_addr = compat_ptr(from->si_addr);
3312 #ifdef __ARCH_SI_TRAPNO
3313 to->si_trapno = from->si_trapno;
3315 to->si_pkey = from->si_pkey;
3318 to->si_pid = from->si_pid;
3319 to->si_uid = from->si_uid;
3320 to->si_status = from->si_status;
3321 #ifdef CONFIG_X86_X32_ABI
3322 if (in_x32_syscall()) {
3323 to->si_utime = from->_sifields._sigchld_x32._utime;
3324 to->si_stime = from->_sifields._sigchld_x32._stime;
3328 to->si_utime = from->si_utime;
3329 to->si_stime = from->si_stime;
3333 to->si_pid = from->si_pid;
3334 to->si_uid = from->si_uid;
3335 to->si_int = from->si_int;
3338 to->si_call_addr = compat_ptr(from->si_call_addr);
3339 to->si_syscall = from->si_syscall;
3340 to->si_arch = from->si_arch;
3346 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3347 const struct compat_siginfo __user *ufrom)
3349 struct compat_siginfo from;
3351 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3354 from.si_signo = signo;
3355 return post_copy_siginfo_from_user32(to, &from);
3358 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3359 const struct compat_siginfo __user *ufrom)
3361 struct compat_siginfo from;
3363 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3366 return post_copy_siginfo_from_user32(to, &from);
3368 #endif /* CONFIG_COMPAT */
3371 * do_sigtimedwait - wait for queued signals specified in @which
3372 * @which: queued signals to wait for
3373 * @info: if non-null, the signal's siginfo is returned here
3374 * @ts: upper bound on process time suspension
3376 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3377 const struct timespec64 *ts)
3379 ktime_t *to = NULL, timeout = KTIME_MAX;
3380 struct task_struct *tsk = current;
3381 sigset_t mask = *which;
3385 if (!timespec64_valid(ts))
3387 timeout = timespec64_to_ktime(*ts);
3392 * Invert the set of allowed signals to get those we want to block.
3394 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3397 spin_lock_irq(&tsk->sighand->siglock);
3398 sig = dequeue_signal(tsk, &mask, info);
3399 if (!sig && timeout) {
3401 * None ready, temporarily unblock those we're interested
3402 * while we are sleeping in so that we'll be awakened when
3403 * they arrive. Unblocking is always fine, we can avoid
3404 * set_current_blocked().
3406 tsk->real_blocked = tsk->blocked;
3407 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3408 recalc_sigpending();
3409 spin_unlock_irq(&tsk->sighand->siglock);
3411 __set_current_state(TASK_INTERRUPTIBLE);
3412 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3414 spin_lock_irq(&tsk->sighand->siglock);
3415 __set_task_blocked(tsk, &tsk->real_blocked);
3416 sigemptyset(&tsk->real_blocked);
3417 sig = dequeue_signal(tsk, &mask, info);
3419 spin_unlock_irq(&tsk->sighand->siglock);
3423 return ret ? -EINTR : -EAGAIN;
3427 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3429 * @uthese: queued signals to wait for
3430 * @uinfo: if non-null, the signal's siginfo is returned here
3431 * @uts: upper bound on process time suspension
3432 * @sigsetsize: size of sigset_t type
3434 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3435 siginfo_t __user *, uinfo,
3436 const struct __kernel_timespec __user *, uts,
3440 struct timespec64 ts;
3441 kernel_siginfo_t info;
3444 /* XXX: Don't preclude handling different sized sigset_t's. */
3445 if (sigsetsize != sizeof(sigset_t))
3448 if (copy_from_user(&these, uthese, sizeof(these)))
3452 if (get_timespec64(&ts, uts))
3456 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3458 if (ret > 0 && uinfo) {
3459 if (copy_siginfo_to_user(uinfo, &info))
3466 #ifdef CONFIG_COMPAT_32BIT_TIME
3467 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3468 siginfo_t __user *, uinfo,
3469 const struct old_timespec32 __user *, uts,
3473 struct timespec64 ts;
3474 kernel_siginfo_t info;
3477 if (sigsetsize != sizeof(sigset_t))
3480 if (copy_from_user(&these, uthese, sizeof(these)))
3484 if (get_old_timespec32(&ts, uts))
3488 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3490 if (ret > 0 && uinfo) {
3491 if (copy_siginfo_to_user(uinfo, &info))
3499 #ifdef CONFIG_COMPAT
3500 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3501 struct compat_siginfo __user *, uinfo,
3502 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3505 struct timespec64 t;
3506 kernel_siginfo_t info;
3509 if (sigsetsize != sizeof(sigset_t))
3512 if (get_compat_sigset(&s, uthese))
3516 if (get_timespec64(&t, uts))
3520 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3522 if (ret > 0 && uinfo) {
3523 if (copy_siginfo_to_user32(uinfo, &info))
3530 #ifdef CONFIG_COMPAT_32BIT_TIME
3531 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3532 struct compat_siginfo __user *, uinfo,
3533 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3536 struct timespec64 t;
3537 kernel_siginfo_t info;
3540 if (sigsetsize != sizeof(sigset_t))
3543 if (get_compat_sigset(&s, uthese))
3547 if (get_old_timespec32(&t, uts))
3551 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3553 if (ret > 0 && uinfo) {
3554 if (copy_siginfo_to_user32(uinfo, &info))
3563 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3565 clear_siginfo(info);
3566 info->si_signo = sig;
3568 info->si_code = SI_USER;
3569 info->si_pid = task_tgid_vnr(current);
3570 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3574 * sys_kill - send a signal to a process
3575 * @pid: the PID of the process
3576 * @sig: signal to be sent
3578 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3580 struct kernel_siginfo info;
3582 prepare_kill_siginfo(sig, &info);
3584 return kill_something_info(sig, &info, pid);
3588 * Verify that the signaler and signalee either are in the same pid namespace
3589 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3592 static bool access_pidfd_pidns(struct pid *pid)
3594 struct pid_namespace *active = task_active_pid_ns(current);
3595 struct pid_namespace *p = ns_of_pid(pid);
3608 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3610 #ifdef CONFIG_COMPAT
3612 * Avoid hooking up compat syscalls and instead handle necessary
3613 * conversions here. Note, this is a stop-gap measure and should not be
3614 * considered a generic solution.
3616 if (in_compat_syscall())
3617 return copy_siginfo_from_user32(
3618 kinfo, (struct compat_siginfo __user *)info);
3620 return copy_siginfo_from_user(kinfo, info);
3623 static struct pid *pidfd_to_pid(const struct file *file)
3625 if (file->f_op == &pidfd_fops)
3626 return file->private_data;
3628 return tgid_pidfd_to_pid(file);
3632 * sys_pidfd_send_signal - send a signal to a process through a task file
3634 * @pidfd: the file descriptor of the process
3635 * @sig: signal to be sent
3636 * @info: the signal info
3637 * @flags: future flags to be passed
3639 * The syscall currently only signals via PIDTYPE_PID which covers
3640 * kill(<positive-pid>, <signal>. It does not signal threads or process
3642 * In order to extend the syscall to threads and process groups the @flags
3643 * argument should be used. In essence, the @flags argument will determine
3644 * what is signaled and not the file descriptor itself. Put in other words,
3645 * grouping is a property of the flags argument not a property of the file
3648 * Return: 0 on success, negative errno on failure
3650 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3651 siginfo_t __user *, info, unsigned int, flags)
3656 kernel_siginfo_t kinfo;
3658 /* Enforce flags be set to 0 until we add an extension. */
3666 /* Is this a pidfd? */
3667 pid = pidfd_to_pid(f.file);
3674 if (!access_pidfd_pidns(pid))
3678 ret = copy_siginfo_from_user_any(&kinfo, info);
3683 if (unlikely(sig != kinfo.si_signo))
3686 /* Only allow sending arbitrary signals to yourself. */
3688 if ((task_pid(current) != pid) &&
3689 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3692 prepare_kill_siginfo(sig, &kinfo);
3695 ret = kill_pid_info(sig, &kinfo, pid);
3703 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3705 struct task_struct *p;
3709 p = find_task_by_vpid(pid);
3710 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3711 error = check_kill_permission(sig, info, p);
3713 * The null signal is a permissions and process existence
3714 * probe. No signal is actually delivered.
3716 if (!error && sig) {
3717 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3719 * If lock_task_sighand() failed we pretend the task
3720 * dies after receiving the signal. The window is tiny,
3721 * and the signal is private anyway.
3723 if (unlikely(error == -ESRCH))
3732 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3734 struct kernel_siginfo info;
3736 clear_siginfo(&info);
3737 info.si_signo = sig;
3739 info.si_code = SI_TKILL;
3740 info.si_pid = task_tgid_vnr(current);
3741 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3743 return do_send_specific(tgid, pid, sig, &info);
3747 * sys_tgkill - send signal to one specific thread
3748 * @tgid: the thread group ID of the thread
3749 * @pid: the PID of the thread
3750 * @sig: signal to be sent
3752 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3753 * exists but it's not belonging to the target process anymore. This
3754 * method solves the problem of threads exiting and PIDs getting reused.
3756 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3758 /* This is only valid for single tasks */
3759 if (pid <= 0 || tgid <= 0)
3762 return do_tkill(tgid, pid, sig);
3766 * sys_tkill - send signal to one specific task
3767 * @pid: the PID of the task
3768 * @sig: signal to be sent
3770 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3772 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3774 /* This is only valid for single tasks */
3778 return do_tkill(0, pid, sig);
3781 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3783 /* Not even root can pretend to send signals from the kernel.
3784 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3786 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3787 (task_pid_vnr(current) != pid))
3790 /* POSIX.1b doesn't mention process groups. */
3791 return kill_proc_info(sig, info, pid);
3795 * sys_rt_sigqueueinfo - send signal information to a signal
3796 * @pid: the PID of the thread
3797 * @sig: signal to be sent
3798 * @uinfo: signal info to be sent
3800 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3801 siginfo_t __user *, uinfo)
3803 kernel_siginfo_t info;
3804 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3807 return do_rt_sigqueueinfo(pid, sig, &info);
3810 #ifdef CONFIG_COMPAT
3811 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3814 struct compat_siginfo __user *, uinfo)
3816 kernel_siginfo_t info;
3817 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3820 return do_rt_sigqueueinfo(pid, sig, &info);
3824 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3826 /* This is only valid for single tasks */
3827 if (pid <= 0 || tgid <= 0)
3830 /* Not even root can pretend to send signals from the kernel.
3831 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3833 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3834 (task_pid_vnr(current) != pid))
3837 return do_send_specific(tgid, pid, sig, info);
3840 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3841 siginfo_t __user *, uinfo)
3843 kernel_siginfo_t info;
3844 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3847 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3850 #ifdef CONFIG_COMPAT
3851 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3855 struct compat_siginfo __user *, uinfo)
3857 kernel_siginfo_t info;
3858 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3861 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3866 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3868 void kernel_sigaction(int sig, __sighandler_t action)
3870 spin_lock_irq(¤t->sighand->siglock);
3871 current->sighand->action[sig - 1].sa.sa_handler = action;
3872 if (action == SIG_IGN) {
3876 sigaddset(&mask, sig);
3878 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3879 flush_sigqueue_mask(&mask, ¤t->pending);
3880 recalc_sigpending();
3882 spin_unlock_irq(¤t->sighand->siglock);
3884 EXPORT_SYMBOL(kernel_sigaction);
3886 void __weak sigaction_compat_abi(struct k_sigaction *act,
3887 struct k_sigaction *oact)
3891 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3893 struct task_struct *p = current, *t;
3894 struct k_sigaction *k;
3897 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3900 k = &p->sighand->action[sig-1];
3902 spin_lock_irq(&p->sighand->siglock);
3906 sigaction_compat_abi(act, oact);
3909 sigdelsetmask(&act->sa.sa_mask,
3910 sigmask(SIGKILL) | sigmask(SIGSTOP));
3914 * "Setting a signal action to SIG_IGN for a signal that is
3915 * pending shall cause the pending signal to be discarded,
3916 * whether or not it is blocked."
3918 * "Setting a signal action to SIG_DFL for a signal that is
3919 * pending and whose default action is to ignore the signal
3920 * (for example, SIGCHLD), shall cause the pending signal to
3921 * be discarded, whether or not it is blocked"
3923 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3925 sigaddset(&mask, sig);
3926 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3927 for_each_thread(p, t)
3928 flush_sigqueue_mask(&mask, &t->pending);
3932 spin_unlock_irq(&p->sighand->siglock);
3937 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3940 struct task_struct *t = current;
3943 memset(oss, 0, sizeof(stack_t));
3944 oss->ss_sp = (void __user *) t->sas_ss_sp;
3945 oss->ss_size = t->sas_ss_size;
3946 oss->ss_flags = sas_ss_flags(sp) |
3947 (current->sas_ss_flags & SS_FLAG_BITS);
3951 void __user *ss_sp = ss->ss_sp;
3952 size_t ss_size = ss->ss_size;
3953 unsigned ss_flags = ss->ss_flags;
3956 if (unlikely(on_sig_stack(sp)))
3959 ss_mode = ss_flags & ~SS_FLAG_BITS;
3960 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3964 if (ss_mode == SS_DISABLE) {
3968 if (unlikely(ss_size < min_ss_size))
3972 t->sas_ss_sp = (unsigned long) ss_sp;
3973 t->sas_ss_size = ss_size;
3974 t->sas_ss_flags = ss_flags;
3979 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3983 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3985 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3986 current_user_stack_pointer(),
3988 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3993 int restore_altstack(const stack_t __user *uss)
3996 if (copy_from_user(&new, uss, sizeof(stack_t)))
3998 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4000 /* squash all but EFAULT for now */
4004 int __save_altstack(stack_t __user *uss, unsigned long sp)
4006 struct task_struct *t = current;
4007 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4008 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4009 __put_user(t->sas_ss_size, &uss->ss_size);
4012 if (t->sas_ss_flags & SS_AUTODISARM)
4017 #ifdef CONFIG_COMPAT
4018 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4019 compat_stack_t __user *uoss_ptr)
4025 compat_stack_t uss32;
4026 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4028 uss.ss_sp = compat_ptr(uss32.ss_sp);
4029 uss.ss_flags = uss32.ss_flags;
4030 uss.ss_size = uss32.ss_size;
4032 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4033 compat_user_stack_pointer(),
4034 COMPAT_MINSIGSTKSZ);
4035 if (ret >= 0 && uoss_ptr) {
4037 memset(&old, 0, sizeof(old));
4038 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4039 old.ss_flags = uoss.ss_flags;
4040 old.ss_size = uoss.ss_size;
4041 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4047 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4048 const compat_stack_t __user *, uss_ptr,
4049 compat_stack_t __user *, uoss_ptr)
4051 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4054 int compat_restore_altstack(const compat_stack_t __user *uss)
4056 int err = do_compat_sigaltstack(uss, NULL);
4057 /* squash all but -EFAULT for now */
4058 return err == -EFAULT ? err : 0;
4061 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4064 struct task_struct *t = current;
4065 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4067 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4068 __put_user(t->sas_ss_size, &uss->ss_size);
4071 if (t->sas_ss_flags & SS_AUTODISARM)
4077 #ifdef __ARCH_WANT_SYS_SIGPENDING
4080 * sys_sigpending - examine pending signals
4081 * @uset: where mask of pending signal is returned
4083 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4087 if (sizeof(old_sigset_t) > sizeof(*uset))
4090 do_sigpending(&set);
4092 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4098 #ifdef CONFIG_COMPAT
4099 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4103 do_sigpending(&set);
4105 return put_user(set.sig[0], set32);
4111 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4113 * sys_sigprocmask - examine and change blocked signals
4114 * @how: whether to add, remove, or set signals
4115 * @nset: signals to add or remove (if non-null)
4116 * @oset: previous value of signal mask if non-null
4118 * Some platforms have their own version with special arguments;
4119 * others support only sys_rt_sigprocmask.
4122 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4123 old_sigset_t __user *, oset)
4125 old_sigset_t old_set, new_set;
4126 sigset_t new_blocked;
4128 old_set = current->blocked.sig[0];
4131 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4134 new_blocked = current->blocked;
4138 sigaddsetmask(&new_blocked, new_set);
4141 sigdelsetmask(&new_blocked, new_set);
4144 new_blocked.sig[0] = new_set;
4150 set_current_blocked(&new_blocked);
4154 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4160 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4162 #ifndef CONFIG_ODD_RT_SIGACTION
4164 * sys_rt_sigaction - alter an action taken by a process
4165 * @sig: signal to be sent
4166 * @act: new sigaction
4167 * @oact: used to save the previous sigaction
4168 * @sigsetsize: size of sigset_t type
4170 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4171 const struct sigaction __user *, act,
4172 struct sigaction __user *, oact,
4175 struct k_sigaction new_sa, old_sa;
4178 /* XXX: Don't preclude handling different sized sigset_t's. */
4179 if (sigsetsize != sizeof(sigset_t))
4182 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4185 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4189 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4194 #ifdef CONFIG_COMPAT
4195 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4196 const struct compat_sigaction __user *, act,
4197 struct compat_sigaction __user *, oact,
4198 compat_size_t, sigsetsize)
4200 struct k_sigaction new_ka, old_ka;
4201 #ifdef __ARCH_HAS_SA_RESTORER
4202 compat_uptr_t restorer;
4206 /* XXX: Don't preclude handling different sized sigset_t's. */
4207 if (sigsetsize != sizeof(compat_sigset_t))
4211 compat_uptr_t handler;
4212 ret = get_user(handler, &act->sa_handler);
4213 new_ka.sa.sa_handler = compat_ptr(handler);
4214 #ifdef __ARCH_HAS_SA_RESTORER
4215 ret |= get_user(restorer, &act->sa_restorer);
4216 new_ka.sa.sa_restorer = compat_ptr(restorer);
4218 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4219 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4224 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4226 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4228 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4229 sizeof(oact->sa_mask));
4230 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4231 #ifdef __ARCH_HAS_SA_RESTORER
4232 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4233 &oact->sa_restorer);
4239 #endif /* !CONFIG_ODD_RT_SIGACTION */
4241 #ifdef CONFIG_OLD_SIGACTION
4242 SYSCALL_DEFINE3(sigaction, int, sig,
4243 const struct old_sigaction __user *, act,
4244 struct old_sigaction __user *, oact)
4246 struct k_sigaction new_ka, old_ka;
4251 if (!access_ok(act, sizeof(*act)) ||
4252 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4253 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4254 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4255 __get_user(mask, &act->sa_mask))
4257 #ifdef __ARCH_HAS_KA_RESTORER
4258 new_ka.ka_restorer = NULL;
4260 siginitset(&new_ka.sa.sa_mask, mask);
4263 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4266 if (!access_ok(oact, sizeof(*oact)) ||
4267 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4268 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4269 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4270 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4277 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4278 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4279 const struct compat_old_sigaction __user *, act,
4280 struct compat_old_sigaction __user *, oact)
4282 struct k_sigaction new_ka, old_ka;
4284 compat_old_sigset_t mask;
4285 compat_uptr_t handler, restorer;
4288 if (!access_ok(act, sizeof(*act)) ||
4289 __get_user(handler, &act->sa_handler) ||
4290 __get_user(restorer, &act->sa_restorer) ||
4291 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4292 __get_user(mask, &act->sa_mask))
4295 #ifdef __ARCH_HAS_KA_RESTORER
4296 new_ka.ka_restorer = NULL;
4298 new_ka.sa.sa_handler = compat_ptr(handler);
4299 new_ka.sa.sa_restorer = compat_ptr(restorer);
4300 siginitset(&new_ka.sa.sa_mask, mask);
4303 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4306 if (!access_ok(oact, sizeof(*oact)) ||
4307 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4308 &oact->sa_handler) ||
4309 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4310 &oact->sa_restorer) ||
4311 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4312 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4319 #ifdef CONFIG_SGETMASK_SYSCALL
4322 * For backwards compatibility. Functionality superseded by sigprocmask.
4324 SYSCALL_DEFINE0(sgetmask)
4327 return current->blocked.sig[0];
4330 SYSCALL_DEFINE1(ssetmask, int, newmask)
4332 int old = current->blocked.sig[0];
4335 siginitset(&newset, newmask);
4336 set_current_blocked(&newset);
4340 #endif /* CONFIG_SGETMASK_SYSCALL */
4342 #ifdef __ARCH_WANT_SYS_SIGNAL
4344 * For backwards compatibility. Functionality superseded by sigaction.
4346 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4348 struct k_sigaction new_sa, old_sa;
4351 new_sa.sa.sa_handler = handler;
4352 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4353 sigemptyset(&new_sa.sa.sa_mask);
4355 ret = do_sigaction(sig, &new_sa, &old_sa);
4357 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4359 #endif /* __ARCH_WANT_SYS_SIGNAL */
4361 #ifdef __ARCH_WANT_SYS_PAUSE
4363 SYSCALL_DEFINE0(pause)
4365 while (!signal_pending(current)) {
4366 __set_current_state(TASK_INTERRUPTIBLE);
4369 return -ERESTARTNOHAND;
4374 static int sigsuspend(sigset_t *set)
4376 current->saved_sigmask = current->blocked;
4377 set_current_blocked(set);
4379 while (!signal_pending(current)) {
4380 __set_current_state(TASK_INTERRUPTIBLE);
4383 set_restore_sigmask();
4384 return -ERESTARTNOHAND;
4388 * sys_rt_sigsuspend - replace the signal mask for a value with the
4389 * @unewset value until a signal is received
4390 * @unewset: new signal mask value
4391 * @sigsetsize: size of sigset_t type
4393 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4397 /* XXX: Don't preclude handling different sized sigset_t's. */
4398 if (sigsetsize != sizeof(sigset_t))
4401 if (copy_from_user(&newset, unewset, sizeof(newset)))
4403 return sigsuspend(&newset);
4406 #ifdef CONFIG_COMPAT
4407 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4411 /* XXX: Don't preclude handling different sized sigset_t's. */
4412 if (sigsetsize != sizeof(sigset_t))
4415 if (get_compat_sigset(&newset, unewset))
4417 return sigsuspend(&newset);
4421 #ifdef CONFIG_OLD_SIGSUSPEND
4422 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4425 siginitset(&blocked, mask);
4426 return sigsuspend(&blocked);
4429 #ifdef CONFIG_OLD_SIGSUSPEND3
4430 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4433 siginitset(&blocked, mask);
4434 return sigsuspend(&blocked);
4438 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4443 static inline void siginfo_buildtime_checks(void)
4445 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4447 /* Verify the offsets in the two siginfos match */
4448 #define CHECK_OFFSET(field) \
4449 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4452 CHECK_OFFSET(si_pid);
4453 CHECK_OFFSET(si_uid);
4456 CHECK_OFFSET(si_tid);
4457 CHECK_OFFSET(si_overrun);
4458 CHECK_OFFSET(si_value);
4461 CHECK_OFFSET(si_pid);
4462 CHECK_OFFSET(si_uid);
4463 CHECK_OFFSET(si_value);
4466 CHECK_OFFSET(si_pid);
4467 CHECK_OFFSET(si_uid);
4468 CHECK_OFFSET(si_status);
4469 CHECK_OFFSET(si_utime);
4470 CHECK_OFFSET(si_stime);
4473 CHECK_OFFSET(si_addr);
4474 CHECK_OFFSET(si_addr_lsb);
4475 CHECK_OFFSET(si_lower);
4476 CHECK_OFFSET(si_upper);
4477 CHECK_OFFSET(si_pkey);
4480 CHECK_OFFSET(si_band);
4481 CHECK_OFFSET(si_fd);
4484 CHECK_OFFSET(si_call_addr);
4485 CHECK_OFFSET(si_syscall);
4486 CHECK_OFFSET(si_arch);
4490 void __init signals_init(void)
4492 siginfo_buildtime_checks();
4494 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4497 #ifdef CONFIG_KGDB_KDB
4498 #include <linux/kdb.h>
4500 * kdb_send_sig - Allows kdb to send signals without exposing
4501 * signal internals. This function checks if the required locks are
4502 * available before calling the main signal code, to avoid kdb
4505 void kdb_send_sig(struct task_struct *t, int sig)
4507 static struct task_struct *kdb_prev_t;
4509 if (!spin_trylock(&t->sighand->siglock)) {
4510 kdb_printf("Can't do kill command now.\n"
4511 "The sigmask lock is held somewhere else in "
4512 "kernel, try again later\n");
4515 new_t = kdb_prev_t != t;
4517 if (t->state != TASK_RUNNING && new_t) {
4518 spin_unlock(&t->sighand->siglock);
4519 kdb_printf("Process is not RUNNING, sending a signal from "
4520 "kdb risks deadlock\n"
4521 "on the run queue locks. "
4522 "The signal has _not_ been sent.\n"
4523 "Reissue the kill command if you want to risk "
4527 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4528 spin_unlock(&t->sighand->siglock);
4530 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4533 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4535 #endif /* CONFIG_KGDB_KDB */