2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 return sig_handler_ignored(handler, sig);
95 static bool sig_ignored(struct task_struct *t, int sig, bool force)
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
110 if (t->ptrace && sig != SIGKILL)
113 return sig_task_ignored(t, sig, force);
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
120 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
125 switch (_NSIG_WORDS) {
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
146 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
148 static bool recalc_sigpending_tsk(struct task_struct *t)
150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
151 PENDING(&t->pending, &t->blocked) ||
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
154 set_tsk_thread_flag(t, TIF_SIGPENDING);
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
170 void recalc_sigpending_and_wake(struct task_struct *t)
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
176 void recalc_sigpending(void)
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
180 clear_thread_flag(TIF_SIGPENDING);
183 EXPORT_SYMBOL(recalc_sigpending);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
403 * appropriate lock must be held to stop the target task from exiting
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408 struct sigqueue *q = NULL;
409 struct user_struct *user;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
416 user = get_uid(__task_cred(t)->user);
417 atomic_inc(&user->sigpending);
420 if (override_rlimit ||
421 atomic_read(&user->sigpending) <=
422 task_rlimit(t, RLIMIT_SIGPENDING)) {
423 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 print_dropped_signal(sig);
428 if (unlikely(q == NULL)) {
429 atomic_dec(&user->sigpending);
432 INIT_LIST_HEAD(&q->list);
440 static void __sigqueue_free(struct sigqueue *q)
442 if (q->flags & SIGQUEUE_PREALLOC)
444 atomic_dec(&q->user->sigpending);
446 kmem_cache_free(sigqueue_cachep, q);
449 void flush_sigqueue(struct sigpending *queue)
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
462 * Flush all pending signals for this kthread.
464 void flush_signals(struct task_struct *t)
468 spin_lock_irqsave(&t->sighand->siglock, flags);
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
474 EXPORT_SYMBOL(flush_signals);
476 #ifdef CONFIG_POSIX_TIMERS
477 static void __flush_itimer_signals(struct sigpending *pending)
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
482 signal = pending->signal;
483 sigemptyset(&retain);
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
497 sigorsets(&pending->signal, &signal, &retain);
500 void flush_itimer_signals(void)
502 struct task_struct *tsk = current;
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
512 void ignore_signals(struct task_struct *t)
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
523 * Flush all handlers for a task.
527 flush_signal_handlers(struct task_struct *t, int force_default)
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
535 #ifdef __ARCH_HAS_SA_RESTORER
536 ka->sa.sa_restorer = NULL;
538 sigemptyset(&ka->sa.sa_mask);
543 bool unhandled_signal(struct task_struct *tsk, int sig)
545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
546 if (is_global_init(tsk))
549 if (handler != SIG_IGN && handler != SIG_DFL)
552 /* if ptraced, let the tracer determine */
556 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
559 struct sigqueue *q, *first = NULL;
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
573 sigdelset(&list->signal, sig);
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
585 __sigqueue_free(first);
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
593 info->si_signo = sig;
595 info->si_code = SI_USER;
601 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
602 kernel_siginfo_t *info, bool *resched_timer)
604 int sig = next_signal(pending, mask);
607 collect_signal(sig, pending, info, resched_timer);
612 * Dequeue a signal and return the element to the caller, which is
613 * expected to free it.
615 * All callers have to hold the siglock.
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
619 bool resched_timer = false;
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
627 signr = __dequeue_signal(&tsk->signal->shared_pending,
628 mask, info, &resched_timer);
629 #ifdef CONFIG_POSIX_TIMERS
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
636 * compliant with the old way of self-restarting
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
646 if (!hrtimer_is_queued(tmr) &&
647 tsk->signal->it_real_incr != 0) {
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
660 if (unlikely(sig_kernel_stop(signr))) {
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
675 #ifdef CONFIG_POSIX_TIMERS
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
683 spin_unlock(&tsk->sighand->siglock);
684 posixtimer_rearm(info);
685 spin_lock(&tsk->sighand->siglock);
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
693 EXPORT_SYMBOL_GPL(dequeue_signal);
695 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
702 * Might a synchronous signal be in the queue?
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
708 * Return the first synchronous signal in the queue.
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
721 * Check if there is another siginfo for the same signal.
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
728 sigdelset(&pending->signal, sync->info.si_signo);
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
738 * Tell a process that it has a new active signal..
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
748 void signal_wake_up_state(struct task_struct *t, unsigned int state)
750 set_tsk_thread_flag(t, TIF_SIGPENDING);
752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
753 * case. We don't check t->state here because there is a race with it
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
766 * All callers must be holding the siglock.
768 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
770 struct sigqueue *q, *n;
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
777 sigandnsets(&s->signal, &s->signal, mask);
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
786 static inline int is_si_special(const struct kernel_siginfo *info)
788 return info <= SEND_SIG_PRIV;
791 static inline bool si_fromuser(const struct kernel_siginfo *info)
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
798 * called with RCU read lock from check_kill_permission()
800 static bool kill_ok_by_cred(struct task_struct *t)
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
813 * Bad permissions for sending the signal
814 * - the caller must hold the RCU read lock
816 static int check_kill_permission(int sig, struct kernel_siginfo *info,
817 struct task_struct *t)
822 if (!valid_signal(sig))
825 if (!si_fromuser(info))
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
832 if (!same_thread_group(current, t) &&
833 !kill_ok_by_cred(t)) {
836 sid = task_session(t);
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
841 if (!sid || sid == task_session(current))
849 return security_task_kill(t, info, sig, NULL);
853 * ptrace_trap_notify - schedule trap to notify ptracer
854 * @t: tracee wanting to notify tracer
856 * This function schedules sticky ptrace trap which is cleared on the next
857 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
860 * If @t is running, STOP trap will be taken. If trapped for STOP and
861 * ptracer is listening for events, tracee is woken up so that it can
862 * re-trap for the new event. If trapped otherwise, STOP trap will be
863 * eventually taken without returning to userland after the existing traps
864 * are finished by PTRACE_CONT.
867 * Must be called with @task->sighand->siglock held.
869 static void ptrace_trap_notify(struct task_struct *t)
871 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
872 assert_spin_locked(&t->sighand->siglock);
874 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
875 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
879 * Handle magic process-wide effects of stop/continue signals. Unlike
880 * the signal actions, these happen immediately at signal-generation
881 * time regardless of blocking, ignoring, or handling. This does the
882 * actual continuing for SIGCONT, but not the actual stopping for stop
883 * signals. The process stop is done as a signal action for SIG_DFL.
885 * Returns true if the signal should be actually delivered, otherwise
886 * it should be dropped.
888 static bool prepare_signal(int sig, struct task_struct *p, bool force)
890 struct signal_struct *signal = p->signal;
891 struct task_struct *t;
894 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
895 if (!(signal->flags & SIGNAL_GROUP_EXIT))
896 return sig == SIGKILL;
898 * The process is in the middle of dying, nothing to do.
900 } else if (sig_kernel_stop(sig)) {
902 * This is a stop signal. Remove SIGCONT from all queues.
904 siginitset(&flush, sigmask(SIGCONT));
905 flush_sigqueue_mask(&flush, &signal->shared_pending);
906 for_each_thread(p, t)
907 flush_sigqueue_mask(&flush, &t->pending);
908 } else if (sig == SIGCONT) {
911 * Remove all stop signals from all queues, wake all threads.
913 siginitset(&flush, SIG_KERNEL_STOP_MASK);
914 flush_sigqueue_mask(&flush, &signal->shared_pending);
915 for_each_thread(p, t) {
916 flush_sigqueue_mask(&flush, &t->pending);
917 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
918 if (likely(!(t->ptrace & PT_SEIZED)))
919 wake_up_state(t, __TASK_STOPPED);
921 ptrace_trap_notify(t);
925 * Notify the parent with CLD_CONTINUED if we were stopped.
927 * If we were in the middle of a group stop, we pretend it
928 * was already finished, and then continued. Since SIGCHLD
929 * doesn't queue we report only CLD_STOPPED, as if the next
930 * CLD_CONTINUED was dropped.
933 if (signal->flags & SIGNAL_STOP_STOPPED)
934 why |= SIGNAL_CLD_CONTINUED;
935 else if (signal->group_stop_count)
936 why |= SIGNAL_CLD_STOPPED;
940 * The first thread which returns from do_signal_stop()
941 * will take ->siglock, notice SIGNAL_CLD_MASK, and
942 * notify its parent. See get_signal().
944 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
945 signal->group_stop_count = 0;
946 signal->group_exit_code = 0;
950 return !sig_ignored(p, sig, force);
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
961 static inline bool wants_signal(int sig, struct task_struct *p)
963 if (sigismember(&p->blocked, sig))
966 if (p->flags & PF_EXITING)
972 if (task_is_stopped_or_traced(p))
975 return task_curr(p) || !signal_pending(p);
978 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
980 struct signal_struct *signal = p->signal;
981 struct task_struct *t;
984 * Now find a thread we can wake up to take the signal off the queue.
986 * If the main thread wants the signal, it gets first crack.
987 * Probably the least surprising to the average bear.
989 if (wants_signal(sig, p))
991 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
993 * There is just one thread and it does not need to be woken.
994 * It will dequeue unblocked signals before it runs again.
999 * Otherwise try to find a suitable thread.
1001 t = signal->curr_target;
1002 while (!wants_signal(sig, t)) {
1004 if (t == signal->curr_target)
1006 * No thread needs to be woken.
1007 * Any eligible threads will see
1008 * the signal in the queue soon.
1012 signal->curr_target = t;
1016 * Found a killable thread. If the signal will be fatal,
1017 * then start taking the whole group down immediately.
1019 if (sig_fatal(p, sig) &&
1020 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1021 !sigismember(&t->real_blocked, sig) &&
1022 (sig == SIGKILL || !p->ptrace)) {
1024 * This signal will be fatal to the whole group.
1026 if (!sig_kernel_coredump(sig)) {
1028 * Start a group exit and wake everybody up.
1029 * This way we don't have other threads
1030 * running and doing things after a slower
1031 * thread has the fatal signal pending.
1033 signal->flags = SIGNAL_GROUP_EXIT;
1034 signal->group_exit_code = sig;
1035 signal->group_stop_count = 0;
1038 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 signal_wake_up(t, 1);
1041 } while_each_thread(p, t);
1047 * The signal is already in the shared-pending queue.
1048 * Tell the chosen thread to wake up and dequeue it.
1050 signal_wake_up(t, sig == SIGKILL);
1054 static inline bool legacy_queue(struct sigpending *signals, int sig)
1056 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1059 #ifdef CONFIG_USER_NS
1060 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1062 if (current_user_ns() == task_cred_xxx(t, user_ns))
1065 if (SI_FROMKERNEL(info))
1069 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1070 make_kuid(current_user_ns(), info->si_uid));
1074 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1080 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1081 enum pid_type type, int from_ancestor_ns)
1083 struct sigpending *pending;
1085 int override_rlimit;
1086 int ret = 0, result;
1088 assert_spin_locked(&t->sighand->siglock);
1090 result = TRACE_SIGNAL_IGNORED;
1091 if (!prepare_signal(sig, t,
1092 from_ancestor_ns || (info == SEND_SIG_PRIV)))
1095 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1097 * Short-circuit ignored signals and support queuing
1098 * exactly one non-rt signal, so that we can get more
1099 * detailed information about the cause of the signal.
1101 result = TRACE_SIGNAL_ALREADY_PENDING;
1102 if (legacy_queue(pending, sig))
1105 result = TRACE_SIGNAL_DELIVERED;
1107 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1109 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1113 * Real-time signals must be queued if sent by sigqueue, or
1114 * some other real-time mechanism. It is implementation
1115 * defined whether kill() does so. We attempt to do so, on
1116 * the principle of least surprise, but since kill is not
1117 * allowed to fail with EAGAIN when low on memory we just
1118 * make sure at least one signal gets delivered and don't
1119 * pass on the info struct.
1122 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1124 override_rlimit = 0;
1126 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1128 list_add_tail(&q->list, &pending->list);
1129 switch ((unsigned long) info) {
1130 case (unsigned long) SEND_SIG_NOINFO:
1131 clear_siginfo(&q->info);
1132 q->info.si_signo = sig;
1133 q->info.si_errno = 0;
1134 q->info.si_code = SI_USER;
1135 q->info.si_pid = task_tgid_nr_ns(current,
1136 task_active_pid_ns(t));
1137 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1139 case (unsigned long) SEND_SIG_PRIV:
1140 clear_siginfo(&q->info);
1141 q->info.si_signo = sig;
1142 q->info.si_errno = 0;
1143 q->info.si_code = SI_KERNEL;
1148 copy_siginfo(&q->info, info);
1149 if (from_ancestor_ns)
1154 userns_fixup_signal_uid(&q->info, t);
1156 } else if (!is_si_special(info)) {
1157 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1159 * Queue overflow, abort. We may abort if the
1160 * signal was rt and sent by user using something
1161 * other than kill().
1163 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1168 * This is a silent loss of information. We still
1169 * send the signal, but the *info bits are lost.
1171 result = TRACE_SIGNAL_LOSE_INFO;
1176 signalfd_notify(t, sig);
1177 sigaddset(&pending->signal, sig);
1179 /* Let multiprocess signals appear after on-going forks */
1180 if (type > PIDTYPE_TGID) {
1181 struct multiprocess_signals *delayed;
1182 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1183 sigset_t *signal = &delayed->signal;
1184 /* Can't queue both a stop and a continue signal */
1186 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1187 else if (sig_kernel_stop(sig))
1188 sigdelset(signal, SIGCONT);
1189 sigaddset(signal, sig);
1193 complete_signal(sig, t, type);
1195 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1199 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1202 int from_ancestor_ns = 0;
1204 #ifdef CONFIG_PID_NS
1205 from_ancestor_ns = si_fromuser(info) &&
1206 !task_pid_nr_ns(current, task_active_pid_ns(t));
1209 return __send_signal(sig, info, t, type, from_ancestor_ns);
1212 static void print_fatal_signal(int signr)
1214 struct pt_regs *regs = signal_pt_regs();
1215 pr_info("potentially unexpected fatal signal %d.\n", signr);
1217 #if defined(__i386__) && !defined(__arch_um__)
1218 pr_info("code at %08lx: ", regs->ip);
1221 for (i = 0; i < 16; i++) {
1224 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1226 pr_cont("%02x ", insn);
1236 static int __init setup_print_fatal_signals(char *str)
1238 get_option (&str, &print_fatal_signals);
1243 __setup("print-fatal-signals=", setup_print_fatal_signals);
1246 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1248 return send_signal(sig, info, p, PIDTYPE_TGID);
1251 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1254 unsigned long flags;
1257 if (lock_task_sighand(p, &flags)) {
1258 ret = send_signal(sig, info, p, type);
1259 unlock_task_sighand(p, &flags);
1266 * Force a signal that the process can't ignore: if necessary
1267 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1269 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1270 * since we do not want to have a signal handler that was blocked
1271 * be invoked when user space had explicitly blocked it.
1273 * We don't want to have recursive SIGSEGV's etc, for example,
1274 * that is why we also clear SIGNAL_UNKILLABLE.
1277 force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1279 unsigned long int flags;
1280 int ret, blocked, ignored;
1281 struct k_sigaction *action;
1283 spin_lock_irqsave(&t->sighand->siglock, flags);
1284 action = &t->sighand->action[sig-1];
1285 ignored = action->sa.sa_handler == SIG_IGN;
1286 blocked = sigismember(&t->blocked, sig);
1287 if (blocked || ignored) {
1288 action->sa.sa_handler = SIG_DFL;
1290 sigdelset(&t->blocked, sig);
1291 recalc_sigpending_and_wake(t);
1295 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1296 * debugging to leave init killable.
1298 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1299 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1300 ret = send_signal(sig, info, t, PIDTYPE_PID);
1301 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1307 * Nuke all other threads in the group.
1309 int zap_other_threads(struct task_struct *p)
1311 struct task_struct *t = p;
1314 p->signal->group_stop_count = 0;
1316 while_each_thread(p, t) {
1317 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1320 /* Don't bother with already dead threads */
1323 sigaddset(&t->pending.signal, SIGKILL);
1324 signal_wake_up(t, 1);
1330 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1331 unsigned long *flags)
1333 struct sighand_struct *sighand;
1337 sighand = rcu_dereference(tsk->sighand);
1338 if (unlikely(sighand == NULL))
1342 * This sighand can be already freed and even reused, but
1343 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1344 * initializes ->siglock: this slab can't go away, it has
1345 * the same object type, ->siglock can't be reinitialized.
1347 * We need to ensure that tsk->sighand is still the same
1348 * after we take the lock, we can race with de_thread() or
1349 * __exit_signal(). In the latter case the next iteration
1350 * must see ->sighand == NULL.
1352 spin_lock_irqsave(&sighand->siglock, *flags);
1353 if (likely(sighand == tsk->sighand))
1355 spin_unlock_irqrestore(&sighand->siglock, *flags);
1363 * send signal info to all the members of a group
1365 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1366 struct task_struct *p, enum pid_type type)
1371 ret = check_kill_permission(sig, info, p);
1375 ret = do_send_sig_info(sig, info, p, type);
1381 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1382 * control characters do (^C, ^Z etc)
1383 * - the caller must hold at least a readlock on tasklist_lock
1385 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1387 struct task_struct *p = NULL;
1388 int retval, success;
1392 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1393 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1396 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1397 return success ? 0 : retval;
1400 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1403 struct task_struct *p;
1407 p = pid_task(pid, PIDTYPE_PID);
1409 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1411 if (likely(!p || error != -ESRCH))
1415 * The task was unhashed in between, try again. If it
1416 * is dead, pid_task() will return NULL, if we race with
1417 * de_thread() it will find the new leader.
1422 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1426 error = kill_pid_info(sig, info, find_vpid(pid));
1431 static inline bool kill_as_cred_perm(const struct cred *cred,
1432 struct task_struct *target)
1434 const struct cred *pcred = __task_cred(target);
1436 return uid_eq(cred->euid, pcred->suid) ||
1437 uid_eq(cred->euid, pcred->uid) ||
1438 uid_eq(cred->uid, pcred->suid) ||
1439 uid_eq(cred->uid, pcred->uid);
1442 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1443 int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
1444 const struct cred *cred)
1447 struct task_struct *p;
1448 unsigned long flags;
1450 if (!valid_signal(sig))
1454 p = pid_task(pid, PIDTYPE_PID);
1459 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1463 ret = security_task_kill(p, info, sig, cred);
1468 if (lock_task_sighand(p, &flags)) {
1469 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1470 unlock_task_sighand(p, &flags);
1478 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1481 * kill_something_info() interprets pid in interesting ways just like kill(2).
1483 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1484 * is probably wrong. Should make it like BSD or SYSV.
1487 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1493 ret = kill_pid_info(sig, info, find_vpid(pid));
1498 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1502 read_lock(&tasklist_lock);
1504 ret = __kill_pgrp_info(sig, info,
1505 pid ? find_vpid(-pid) : task_pgrp(current));
1507 int retval = 0, count = 0;
1508 struct task_struct * p;
1510 for_each_process(p) {
1511 if (task_pid_vnr(p) > 1 &&
1512 !same_thread_group(p, current)) {
1513 int err = group_send_sig_info(sig, info, p,
1520 ret = count ? retval : -ESRCH;
1522 read_unlock(&tasklist_lock);
1528 * These are for backward compatibility with the rest of the kernel source.
1531 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1534 * Make sure legacy kernel users don't send in bad values
1535 * (normal paths check this in check_kill_permission).
1537 if (!valid_signal(sig))
1540 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1542 EXPORT_SYMBOL(send_sig_info);
1544 #define __si_special(priv) \
1545 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1548 send_sig(int sig, struct task_struct *p, int priv)
1550 return send_sig_info(sig, __si_special(priv), p);
1552 EXPORT_SYMBOL(send_sig);
1554 void force_sig(int sig, struct task_struct *p)
1556 force_sig_info(sig, SEND_SIG_PRIV, p);
1558 EXPORT_SYMBOL(force_sig);
1561 * When things go south during signal handling, we
1562 * will force a SIGSEGV. And if the signal that caused
1563 * the problem was already a SIGSEGV, we'll want to
1564 * make sure we don't even try to deliver the signal..
1566 void force_sigsegv(int sig, struct task_struct *p)
1568 if (sig == SIGSEGV) {
1569 unsigned long flags;
1570 spin_lock_irqsave(&p->sighand->siglock, flags);
1571 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1572 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1574 force_sig(SIGSEGV, p);
1577 int force_sig_fault(int sig, int code, void __user *addr
1578 ___ARCH_SI_TRAPNO(int trapno)
1579 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1580 , struct task_struct *t)
1582 struct kernel_siginfo info;
1584 clear_siginfo(&info);
1585 info.si_signo = sig;
1587 info.si_code = code;
1588 info.si_addr = addr;
1589 #ifdef __ARCH_SI_TRAPNO
1590 info.si_trapno = trapno;
1594 info.si_flags = flags;
1597 return force_sig_info(info.si_signo, &info, t);
1600 int send_sig_fault(int sig, int code, void __user *addr
1601 ___ARCH_SI_TRAPNO(int trapno)
1602 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1603 , struct task_struct *t)
1605 struct kernel_siginfo info;
1607 clear_siginfo(&info);
1608 info.si_signo = sig;
1610 info.si_code = code;
1611 info.si_addr = addr;
1612 #ifdef __ARCH_SI_TRAPNO
1613 info.si_trapno = trapno;
1617 info.si_flags = flags;
1620 return send_sig_info(info.si_signo, &info, t);
1623 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1625 struct kernel_siginfo info;
1627 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1628 clear_siginfo(&info);
1629 info.si_signo = SIGBUS;
1631 info.si_code = code;
1632 info.si_addr = addr;
1633 info.si_addr_lsb = lsb;
1634 return force_sig_info(info.si_signo, &info, t);
1637 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1639 struct kernel_siginfo info;
1641 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1642 clear_siginfo(&info);
1643 info.si_signo = SIGBUS;
1645 info.si_code = code;
1646 info.si_addr = addr;
1647 info.si_addr_lsb = lsb;
1648 return send_sig_info(info.si_signo, &info, t);
1650 EXPORT_SYMBOL(send_sig_mceerr);
1652 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1654 struct kernel_siginfo info;
1656 clear_siginfo(&info);
1657 info.si_signo = SIGSEGV;
1659 info.si_code = SEGV_BNDERR;
1660 info.si_addr = addr;
1661 info.si_lower = lower;
1662 info.si_upper = upper;
1663 return force_sig_info(info.si_signo, &info, current);
1667 int force_sig_pkuerr(void __user *addr, u32 pkey)
1669 struct kernel_siginfo info;
1671 clear_siginfo(&info);
1672 info.si_signo = SIGSEGV;
1674 info.si_code = SEGV_PKUERR;
1675 info.si_addr = addr;
1676 info.si_pkey = pkey;
1677 return force_sig_info(info.si_signo, &info, current);
1681 /* For the crazy architectures that include trap information in
1682 * the errno field, instead of an actual errno value.
1684 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1686 struct kernel_siginfo info;
1688 clear_siginfo(&info);
1689 info.si_signo = SIGTRAP;
1690 info.si_errno = errno;
1691 info.si_code = TRAP_HWBKPT;
1692 info.si_addr = addr;
1693 return force_sig_info(info.si_signo, &info, current);
1696 int kill_pgrp(struct pid *pid, int sig, int priv)
1700 read_lock(&tasklist_lock);
1701 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1702 read_unlock(&tasklist_lock);
1706 EXPORT_SYMBOL(kill_pgrp);
1708 int kill_pid(struct pid *pid, int sig, int priv)
1710 return kill_pid_info(sig, __si_special(priv), pid);
1712 EXPORT_SYMBOL(kill_pid);
1715 * These functions support sending signals using preallocated sigqueue
1716 * structures. This is needed "because realtime applications cannot
1717 * afford to lose notifications of asynchronous events, like timer
1718 * expirations or I/O completions". In the case of POSIX Timers
1719 * we allocate the sigqueue structure from the timer_create. If this
1720 * allocation fails we are able to report the failure to the application
1721 * with an EAGAIN error.
1723 struct sigqueue *sigqueue_alloc(void)
1725 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1728 q->flags |= SIGQUEUE_PREALLOC;
1733 void sigqueue_free(struct sigqueue *q)
1735 unsigned long flags;
1736 spinlock_t *lock = ¤t->sighand->siglock;
1738 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1740 * We must hold ->siglock while testing q->list
1741 * to serialize with collect_signal() or with
1742 * __exit_signal()->flush_sigqueue().
1744 spin_lock_irqsave(lock, flags);
1745 q->flags &= ~SIGQUEUE_PREALLOC;
1747 * If it is queued it will be freed when dequeued,
1748 * like the "regular" sigqueue.
1750 if (!list_empty(&q->list))
1752 spin_unlock_irqrestore(lock, flags);
1758 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1760 int sig = q->info.si_signo;
1761 struct sigpending *pending;
1762 struct task_struct *t;
1763 unsigned long flags;
1766 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1770 t = pid_task(pid, type);
1771 if (!t || !likely(lock_task_sighand(t, &flags)))
1774 ret = 1; /* the signal is ignored */
1775 result = TRACE_SIGNAL_IGNORED;
1776 if (!prepare_signal(sig, t, false))
1780 if (unlikely(!list_empty(&q->list))) {
1782 * If an SI_TIMER entry is already queue just increment
1783 * the overrun count.
1785 BUG_ON(q->info.si_code != SI_TIMER);
1786 q->info.si_overrun++;
1787 result = TRACE_SIGNAL_ALREADY_PENDING;
1790 q->info.si_overrun = 0;
1792 signalfd_notify(t, sig);
1793 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1794 list_add_tail(&q->list, &pending->list);
1795 sigaddset(&pending->signal, sig);
1796 complete_signal(sig, t, type);
1797 result = TRACE_SIGNAL_DELIVERED;
1799 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1800 unlock_task_sighand(t, &flags);
1807 * Let a parent know about the death of a child.
1808 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1810 * Returns true if our parent ignored us and so we've switched to
1813 bool do_notify_parent(struct task_struct *tsk, int sig)
1815 struct kernel_siginfo info;
1816 unsigned long flags;
1817 struct sighand_struct *psig;
1818 bool autoreap = false;
1823 /* do_notify_parent_cldstop should have been called instead. */
1824 BUG_ON(task_is_stopped_or_traced(tsk));
1826 BUG_ON(!tsk->ptrace &&
1827 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1829 if (sig != SIGCHLD) {
1831 * This is only possible if parent == real_parent.
1832 * Check if it has changed security domain.
1834 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1838 clear_siginfo(&info);
1839 info.si_signo = sig;
1842 * We are under tasklist_lock here so our parent is tied to
1843 * us and cannot change.
1845 * task_active_pid_ns will always return the same pid namespace
1846 * until a task passes through release_task.
1848 * write_lock() currently calls preempt_disable() which is the
1849 * same as rcu_read_lock(), but according to Oleg, this is not
1850 * correct to rely on this
1853 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1854 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1858 task_cputime(tsk, &utime, &stime);
1859 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1860 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1862 info.si_status = tsk->exit_code & 0x7f;
1863 if (tsk->exit_code & 0x80)
1864 info.si_code = CLD_DUMPED;
1865 else if (tsk->exit_code & 0x7f)
1866 info.si_code = CLD_KILLED;
1868 info.si_code = CLD_EXITED;
1869 info.si_status = tsk->exit_code >> 8;
1872 psig = tsk->parent->sighand;
1873 spin_lock_irqsave(&psig->siglock, flags);
1874 if (!tsk->ptrace && sig == SIGCHLD &&
1875 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1876 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1878 * We are exiting and our parent doesn't care. POSIX.1
1879 * defines special semantics for setting SIGCHLD to SIG_IGN
1880 * or setting the SA_NOCLDWAIT flag: we should be reaped
1881 * automatically and not left for our parent's wait4 call.
1882 * Rather than having the parent do it as a magic kind of
1883 * signal handler, we just set this to tell do_exit that we
1884 * can be cleaned up without becoming a zombie. Note that
1885 * we still call __wake_up_parent in this case, because a
1886 * blocked sys_wait4 might now return -ECHILD.
1888 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1889 * is implementation-defined: we do (if you don't want
1890 * it, just use SIG_IGN instead).
1893 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1896 if (valid_signal(sig) && sig)
1897 __group_send_sig_info(sig, &info, tsk->parent);
1898 __wake_up_parent(tsk, tsk->parent);
1899 spin_unlock_irqrestore(&psig->siglock, flags);
1905 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1906 * @tsk: task reporting the state change
1907 * @for_ptracer: the notification is for ptracer
1908 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1910 * Notify @tsk's parent that the stopped/continued state has changed. If
1911 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1912 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1915 * Must be called with tasklist_lock at least read locked.
1917 static void do_notify_parent_cldstop(struct task_struct *tsk,
1918 bool for_ptracer, int why)
1920 struct kernel_siginfo info;
1921 unsigned long flags;
1922 struct task_struct *parent;
1923 struct sighand_struct *sighand;
1927 parent = tsk->parent;
1929 tsk = tsk->group_leader;
1930 parent = tsk->real_parent;
1933 clear_siginfo(&info);
1934 info.si_signo = SIGCHLD;
1937 * see comment in do_notify_parent() about the following 4 lines
1940 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1941 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1944 task_cputime(tsk, &utime, &stime);
1945 info.si_utime = nsec_to_clock_t(utime);
1946 info.si_stime = nsec_to_clock_t(stime);
1951 info.si_status = SIGCONT;
1954 info.si_status = tsk->signal->group_exit_code & 0x7f;
1957 info.si_status = tsk->exit_code & 0x7f;
1963 sighand = parent->sighand;
1964 spin_lock_irqsave(&sighand->siglock, flags);
1965 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1966 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1967 __group_send_sig_info(SIGCHLD, &info, parent);
1969 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1971 __wake_up_parent(tsk, parent);
1972 spin_unlock_irqrestore(&sighand->siglock, flags);
1975 static inline bool may_ptrace_stop(void)
1977 if (!likely(current->ptrace))
1980 * Are we in the middle of do_coredump?
1981 * If so and our tracer is also part of the coredump stopping
1982 * is a deadlock situation, and pointless because our tracer
1983 * is dead so don't allow us to stop.
1984 * If SIGKILL was already sent before the caller unlocked
1985 * ->siglock we must see ->core_state != NULL. Otherwise it
1986 * is safe to enter schedule().
1988 * This is almost outdated, a task with the pending SIGKILL can't
1989 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1990 * after SIGKILL was already dequeued.
1992 if (unlikely(current->mm->core_state) &&
1993 unlikely(current->mm == current->parent->mm))
2000 * Return non-zero if there is a SIGKILL that should be waking us up.
2001 * Called with the siglock held.
2003 static bool sigkill_pending(struct task_struct *tsk)
2005 return sigismember(&tsk->pending.signal, SIGKILL) ||
2006 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2010 * This must be called with current->sighand->siglock held.
2012 * This should be the path for all ptrace stops.
2013 * We always set current->last_siginfo while stopped here.
2014 * That makes it a way to test a stopped process for
2015 * being ptrace-stopped vs being job-control-stopped.
2017 * If we actually decide not to stop at all because the tracer
2018 * is gone, we keep current->exit_code unless clear_code.
2020 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2021 __releases(¤t->sighand->siglock)
2022 __acquires(¤t->sighand->siglock)
2024 bool gstop_done = false;
2026 if (arch_ptrace_stop_needed(exit_code, info)) {
2028 * The arch code has something special to do before a
2029 * ptrace stop. This is allowed to block, e.g. for faults
2030 * on user stack pages. We can't keep the siglock while
2031 * calling arch_ptrace_stop, so we must release it now.
2032 * To preserve proper semantics, we must do this before
2033 * any signal bookkeeping like checking group_stop_count.
2034 * Meanwhile, a SIGKILL could come in before we retake the
2035 * siglock. That must prevent us from sleeping in TASK_TRACED.
2036 * So after regaining the lock, we must check for SIGKILL.
2038 spin_unlock_irq(¤t->sighand->siglock);
2039 arch_ptrace_stop(exit_code, info);
2040 spin_lock_irq(¤t->sighand->siglock);
2041 if (sigkill_pending(current))
2045 set_special_state(TASK_TRACED);
2048 * We're committing to trapping. TRACED should be visible before
2049 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2050 * Also, transition to TRACED and updates to ->jobctl should be
2051 * atomic with respect to siglock and should be done after the arch
2052 * hook as siglock is released and regrabbed across it.
2057 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2059 * set_current_state() smp_wmb();
2061 * wait_task_stopped()
2062 * task_stopped_code()
2063 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2067 current->last_siginfo = info;
2068 current->exit_code = exit_code;
2071 * If @why is CLD_STOPPED, we're trapping to participate in a group
2072 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2073 * across siglock relocks since INTERRUPT was scheduled, PENDING
2074 * could be clear now. We act as if SIGCONT is received after
2075 * TASK_TRACED is entered - ignore it.
2077 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2078 gstop_done = task_participate_group_stop(current);
2080 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2081 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2082 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2083 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2085 /* entering a trap, clear TRAPPING */
2086 task_clear_jobctl_trapping(current);
2088 spin_unlock_irq(¤t->sighand->siglock);
2089 read_lock(&tasklist_lock);
2090 if (may_ptrace_stop()) {
2092 * Notify parents of the stop.
2094 * While ptraced, there are two parents - the ptracer and
2095 * the real_parent of the group_leader. The ptracer should
2096 * know about every stop while the real parent is only
2097 * interested in the completion of group stop. The states
2098 * for the two don't interact with each other. Notify
2099 * separately unless they're gonna be duplicates.
2101 do_notify_parent_cldstop(current, true, why);
2102 if (gstop_done && ptrace_reparented(current))
2103 do_notify_parent_cldstop(current, false, why);
2106 * Don't want to allow preemption here, because
2107 * sys_ptrace() needs this task to be inactive.
2109 * XXX: implement read_unlock_no_resched().
2112 read_unlock(&tasklist_lock);
2113 preempt_enable_no_resched();
2114 cgroup_enter_frozen();
2115 freezable_schedule();
2118 * By the time we got the lock, our tracer went away.
2119 * Don't drop the lock yet, another tracer may come.
2121 * If @gstop_done, the ptracer went away between group stop
2122 * completion and here. During detach, it would have set
2123 * JOBCTL_STOP_PENDING on us and we'll re-enter
2124 * TASK_STOPPED in do_signal_stop() on return, so notifying
2125 * the real parent of the group stop completion is enough.
2128 do_notify_parent_cldstop(current, false, why);
2130 /* tasklist protects us from ptrace_freeze_traced() */
2131 __set_current_state(TASK_RUNNING);
2133 current->exit_code = 0;
2134 read_unlock(&tasklist_lock);
2138 * We are back. Now reacquire the siglock before touching
2139 * last_siginfo, so that we are sure to have synchronized with
2140 * any signal-sending on another CPU that wants to examine it.
2142 spin_lock_irq(¤t->sighand->siglock);
2143 current->last_siginfo = NULL;
2145 /* LISTENING can be set only during STOP traps, clear it */
2146 current->jobctl &= ~JOBCTL_LISTENING;
2149 * Queued signals ignored us while we were stopped for tracing.
2150 * So check for any that we should take before resuming user mode.
2151 * This sets TIF_SIGPENDING, but never clears it.
2153 recalc_sigpending_tsk(current);
2156 static void ptrace_do_notify(int signr, int exit_code, int why)
2158 kernel_siginfo_t info;
2160 clear_siginfo(&info);
2161 info.si_signo = signr;
2162 info.si_code = exit_code;
2163 info.si_pid = task_pid_vnr(current);
2164 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2166 /* Let the debugger run. */
2167 ptrace_stop(exit_code, why, 1, &info);
2170 void ptrace_notify(int exit_code)
2172 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2173 if (unlikely(current->task_works))
2176 spin_lock_irq(¤t->sighand->siglock);
2177 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2178 spin_unlock_irq(¤t->sighand->siglock);
2182 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2183 * @signr: signr causing group stop if initiating
2185 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2186 * and participate in it. If already set, participate in the existing
2187 * group stop. If participated in a group stop (and thus slept), %true is
2188 * returned with siglock released.
2190 * If ptraced, this function doesn't handle stop itself. Instead,
2191 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2192 * untouched. The caller must ensure that INTERRUPT trap handling takes
2193 * places afterwards.
2196 * Must be called with @current->sighand->siglock held, which is released
2200 * %false if group stop is already cancelled or ptrace trap is scheduled.
2201 * %true if participated in group stop.
2203 static bool do_signal_stop(int signr)
2204 __releases(¤t->sighand->siglock)
2206 struct signal_struct *sig = current->signal;
2208 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2209 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2210 struct task_struct *t;
2212 /* signr will be recorded in task->jobctl for retries */
2213 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2215 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2216 unlikely(signal_group_exit(sig)))
2219 * There is no group stop already in progress. We must
2222 * While ptraced, a task may be resumed while group stop is
2223 * still in effect and then receive a stop signal and
2224 * initiate another group stop. This deviates from the
2225 * usual behavior as two consecutive stop signals can't
2226 * cause two group stops when !ptraced. That is why we
2227 * also check !task_is_stopped(t) below.
2229 * The condition can be distinguished by testing whether
2230 * SIGNAL_STOP_STOPPED is already set. Don't generate
2231 * group_exit_code in such case.
2233 * This is not necessary for SIGNAL_STOP_CONTINUED because
2234 * an intervening stop signal is required to cause two
2235 * continued events regardless of ptrace.
2237 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2238 sig->group_exit_code = signr;
2240 sig->group_stop_count = 0;
2242 if (task_set_jobctl_pending(current, signr | gstop))
2243 sig->group_stop_count++;
2246 while_each_thread(current, t) {
2248 * Setting state to TASK_STOPPED for a group
2249 * stop is always done with the siglock held,
2250 * so this check has no races.
2252 if (!task_is_stopped(t) &&
2253 task_set_jobctl_pending(t, signr | gstop)) {
2254 sig->group_stop_count++;
2255 if (likely(!(t->ptrace & PT_SEIZED)))
2256 signal_wake_up(t, 0);
2258 ptrace_trap_notify(t);
2263 if (likely(!current->ptrace)) {
2267 * If there are no other threads in the group, or if there
2268 * is a group stop in progress and we are the last to stop,
2269 * report to the parent.
2271 if (task_participate_group_stop(current))
2272 notify = CLD_STOPPED;
2274 set_special_state(TASK_STOPPED);
2275 spin_unlock_irq(¤t->sighand->siglock);
2278 * Notify the parent of the group stop completion. Because
2279 * we're not holding either the siglock or tasklist_lock
2280 * here, ptracer may attach inbetween; however, this is for
2281 * group stop and should always be delivered to the real
2282 * parent of the group leader. The new ptracer will get
2283 * its notification when this task transitions into
2287 read_lock(&tasklist_lock);
2288 do_notify_parent_cldstop(current, false, notify);
2289 read_unlock(&tasklist_lock);
2292 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2293 cgroup_enter_frozen();
2294 freezable_schedule();
2298 * While ptraced, group stop is handled by STOP trap.
2299 * Schedule it and let the caller deal with it.
2301 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2307 * do_jobctl_trap - take care of ptrace jobctl traps
2309 * When PT_SEIZED, it's used for both group stop and explicit
2310 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2311 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2312 * the stop signal; otherwise, %SIGTRAP.
2314 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2315 * number as exit_code and no siginfo.
2318 * Must be called with @current->sighand->siglock held, which may be
2319 * released and re-acquired before returning with intervening sleep.
2321 static void do_jobctl_trap(void)
2323 struct signal_struct *signal = current->signal;
2324 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2326 if (current->ptrace & PT_SEIZED) {
2327 if (!signal->group_stop_count &&
2328 !(signal->flags & SIGNAL_STOP_STOPPED))
2330 WARN_ON_ONCE(!signr);
2331 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2334 WARN_ON_ONCE(!signr);
2335 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2336 current->exit_code = 0;
2341 * do_freezer_trap - handle the freezer jobctl trap
2343 * Puts the task into frozen state, if only the task is not about to quit.
2344 * In this case it drops JOBCTL_TRAP_FREEZE.
2347 * Must be called with @current->sighand->siglock held,
2348 * which is always released before returning.
2350 static void do_freezer_trap(void)
2351 __releases(¤t->sighand->siglock)
2354 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2355 * let's make another loop to give it a chance to be handled.
2356 * In any case, we'll return back.
2358 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2359 JOBCTL_TRAP_FREEZE) {
2360 spin_unlock_irq(¤t->sighand->siglock);
2365 * Now we're sure that there is no pending fatal signal and no
2366 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2367 * immediately (if there is a non-fatal signal pending), and
2368 * put the task into sleep.
2370 __set_current_state(TASK_INTERRUPTIBLE);
2371 clear_thread_flag(TIF_SIGPENDING);
2372 spin_unlock_irq(¤t->sighand->siglock);
2373 cgroup_enter_frozen();
2374 freezable_schedule();
2377 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2380 * We do not check sig_kernel_stop(signr) but set this marker
2381 * unconditionally because we do not know whether debugger will
2382 * change signr. This flag has no meaning unless we are going
2383 * to stop after return from ptrace_stop(). In this case it will
2384 * be checked in do_signal_stop(), we should only stop if it was
2385 * not cleared by SIGCONT while we were sleeping. See also the
2386 * comment in dequeue_signal().
2388 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2389 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2391 /* We're back. Did the debugger cancel the sig? */
2392 signr = current->exit_code;
2396 current->exit_code = 0;
2399 * Update the siginfo structure if the signal has
2400 * changed. If the debugger wanted something
2401 * specific in the siginfo structure then it should
2402 * have updated *info via PTRACE_SETSIGINFO.
2404 if (signr != info->si_signo) {
2405 clear_siginfo(info);
2406 info->si_signo = signr;
2408 info->si_code = SI_USER;
2410 info->si_pid = task_pid_vnr(current->parent);
2411 info->si_uid = from_kuid_munged(current_user_ns(),
2412 task_uid(current->parent));
2416 /* If the (new) signal is now blocked, requeue it. */
2417 if (sigismember(¤t->blocked, signr)) {
2418 send_signal(signr, info, current, PIDTYPE_PID);
2425 bool get_signal(struct ksignal *ksig)
2427 struct sighand_struct *sighand = current->sighand;
2428 struct signal_struct *signal = current->signal;
2431 if (unlikely(current->task_works))
2434 if (unlikely(uprobe_deny_signal()))
2438 * Do this once, we can't return to user-mode if freezing() == T.
2439 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2440 * thus do not need another check after return.
2445 spin_lock_irq(&sighand->siglock);
2447 * Every stopped thread goes here after wakeup. Check to see if
2448 * we should notify the parent, prepare_signal(SIGCONT) encodes
2449 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2451 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2454 if (signal->flags & SIGNAL_CLD_CONTINUED)
2455 why = CLD_CONTINUED;
2459 signal->flags &= ~SIGNAL_CLD_MASK;
2461 spin_unlock_irq(&sighand->siglock);
2464 * Notify the parent that we're continuing. This event is
2465 * always per-process and doesn't make whole lot of sense
2466 * for ptracers, who shouldn't consume the state via
2467 * wait(2) either, but, for backward compatibility, notify
2468 * the ptracer of the group leader too unless it's gonna be
2471 read_lock(&tasklist_lock);
2472 do_notify_parent_cldstop(current, false, why);
2474 if (ptrace_reparented(current->group_leader))
2475 do_notify_parent_cldstop(current->group_leader,
2477 read_unlock(&tasklist_lock);
2482 /* Has this task already been marked for death? */
2483 if (signal_group_exit(signal)) {
2484 ksig->info.si_signo = signr = SIGKILL;
2485 sigdelset(¤t->pending.signal, SIGKILL);
2486 recalc_sigpending();
2491 struct k_sigaction *ka;
2493 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2497 if (unlikely(current->jobctl &
2498 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2499 if (current->jobctl & JOBCTL_TRAP_MASK) {
2501 spin_unlock_irq(&sighand->siglock);
2502 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2509 * If the task is leaving the frozen state, let's update
2510 * cgroup counters and reset the frozen bit.
2512 if (unlikely(cgroup_task_frozen(current))) {
2513 spin_unlock_irq(&sighand->siglock);
2514 cgroup_leave_frozen(false);
2519 * Signals generated by the execution of an instruction
2520 * need to be delivered before any other pending signals
2521 * so that the instruction pointer in the signal stack
2522 * frame points to the faulting instruction.
2524 signr = dequeue_synchronous_signal(&ksig->info);
2526 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2529 break; /* will return 0 */
2531 if (unlikely(current->ptrace) && signr != SIGKILL) {
2532 signr = ptrace_signal(signr, &ksig->info);
2537 ka = &sighand->action[signr-1];
2539 /* Trace actually delivered signals. */
2540 trace_signal_deliver(signr, &ksig->info, ka);
2542 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2544 if (ka->sa.sa_handler != SIG_DFL) {
2545 /* Run the handler. */
2548 if (ka->sa.sa_flags & SA_ONESHOT)
2549 ka->sa.sa_handler = SIG_DFL;
2551 break; /* will return non-zero "signr" value */
2555 * Now we are doing the default action for this signal.
2557 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2561 * Global init gets no signals it doesn't want.
2562 * Container-init gets no signals it doesn't want from same
2565 * Note that if global/container-init sees a sig_kernel_only()
2566 * signal here, the signal must have been generated internally
2567 * or must have come from an ancestor namespace. In either
2568 * case, the signal cannot be dropped.
2570 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2571 !sig_kernel_only(signr))
2574 if (sig_kernel_stop(signr)) {
2576 * The default action is to stop all threads in
2577 * the thread group. The job control signals
2578 * do nothing in an orphaned pgrp, but SIGSTOP
2579 * always works. Note that siglock needs to be
2580 * dropped during the call to is_orphaned_pgrp()
2581 * because of lock ordering with tasklist_lock.
2582 * This allows an intervening SIGCONT to be posted.
2583 * We need to check for that and bail out if necessary.
2585 if (signr != SIGSTOP) {
2586 spin_unlock_irq(&sighand->siglock);
2588 /* signals can be posted during this window */
2590 if (is_current_pgrp_orphaned())
2593 spin_lock_irq(&sighand->siglock);
2596 if (likely(do_signal_stop(ksig->info.si_signo))) {
2597 /* It released the siglock. */
2602 * We didn't actually stop, due to a race
2603 * with SIGCONT or something like that.
2609 spin_unlock_irq(&sighand->siglock);
2610 if (unlikely(cgroup_task_frozen(current)))
2611 cgroup_leave_frozen(true);
2614 * Anything else is fatal, maybe with a core dump.
2616 current->flags |= PF_SIGNALED;
2618 if (sig_kernel_coredump(signr)) {
2619 if (print_fatal_signals)
2620 print_fatal_signal(ksig->info.si_signo);
2621 proc_coredump_connector(current);
2623 * If it was able to dump core, this kills all
2624 * other threads in the group and synchronizes with
2625 * their demise. If we lost the race with another
2626 * thread getting here, it set group_exit_code
2627 * first and our do_group_exit call below will use
2628 * that value and ignore the one we pass it.
2630 do_coredump(&ksig->info);
2634 * Death signals, no core dump.
2636 do_group_exit(ksig->info.si_signo);
2639 spin_unlock_irq(&sighand->siglock);
2642 return ksig->sig > 0;
2646 * signal_delivered -
2647 * @ksig: kernel signal struct
2648 * @stepping: nonzero if debugger single-step or block-step in use
2650 * This function should be called when a signal has successfully been
2651 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2652 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2653 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2655 static void signal_delivered(struct ksignal *ksig, int stepping)
2659 /* A signal was successfully delivered, and the
2660 saved sigmask was stored on the signal frame,
2661 and will be restored by sigreturn. So we can
2662 simply clear the restore sigmask flag. */
2663 clear_restore_sigmask();
2665 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2666 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2667 sigaddset(&blocked, ksig->sig);
2668 set_current_blocked(&blocked);
2669 tracehook_signal_handler(stepping);
2672 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2675 force_sigsegv(ksig->sig, current);
2677 signal_delivered(ksig, stepping);
2681 * It could be that complete_signal() picked us to notify about the
2682 * group-wide signal. Other threads should be notified now to take
2683 * the shared signals in @which since we will not.
2685 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2688 struct task_struct *t;
2690 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2691 if (sigisemptyset(&retarget))
2695 while_each_thread(tsk, t) {
2696 if (t->flags & PF_EXITING)
2699 if (!has_pending_signals(&retarget, &t->blocked))
2701 /* Remove the signals this thread can handle. */
2702 sigandsets(&retarget, &retarget, &t->blocked);
2704 if (!signal_pending(t))
2705 signal_wake_up(t, 0);
2707 if (sigisemptyset(&retarget))
2712 void exit_signals(struct task_struct *tsk)
2718 * @tsk is about to have PF_EXITING set - lock out users which
2719 * expect stable threadgroup.
2721 cgroup_threadgroup_change_begin(tsk);
2723 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2724 tsk->flags |= PF_EXITING;
2725 cgroup_threadgroup_change_end(tsk);
2729 spin_lock_irq(&tsk->sighand->siglock);
2731 * From now this task is not visible for group-wide signals,
2732 * see wants_signal(), do_signal_stop().
2734 tsk->flags |= PF_EXITING;
2736 cgroup_threadgroup_change_end(tsk);
2738 if (!signal_pending(tsk))
2741 unblocked = tsk->blocked;
2742 signotset(&unblocked);
2743 retarget_shared_pending(tsk, &unblocked);
2745 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2746 task_participate_group_stop(tsk))
2747 group_stop = CLD_STOPPED;
2749 spin_unlock_irq(&tsk->sighand->siglock);
2752 * If group stop has completed, deliver the notification. This
2753 * should always go to the real parent of the group leader.
2755 if (unlikely(group_stop)) {
2756 read_lock(&tasklist_lock);
2757 do_notify_parent_cldstop(tsk, false, group_stop);
2758 read_unlock(&tasklist_lock);
2763 * System call entry points.
2767 * sys_restart_syscall - restart a system call
2769 SYSCALL_DEFINE0(restart_syscall)
2771 struct restart_block *restart = ¤t->restart_block;
2772 return restart->fn(restart);
2775 long do_no_restart_syscall(struct restart_block *param)
2780 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2782 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2783 sigset_t newblocked;
2784 /* A set of now blocked but previously unblocked signals. */
2785 sigandnsets(&newblocked, newset, ¤t->blocked);
2786 retarget_shared_pending(tsk, &newblocked);
2788 tsk->blocked = *newset;
2789 recalc_sigpending();
2793 * set_current_blocked - change current->blocked mask
2796 * It is wrong to change ->blocked directly, this helper should be used
2797 * to ensure the process can't miss a shared signal we are going to block.
2799 void set_current_blocked(sigset_t *newset)
2801 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2802 __set_current_blocked(newset);
2805 void __set_current_blocked(const sigset_t *newset)
2807 struct task_struct *tsk = current;
2810 * In case the signal mask hasn't changed, there is nothing we need
2811 * to do. The current->blocked shouldn't be modified by other task.
2813 if (sigequalsets(&tsk->blocked, newset))
2816 spin_lock_irq(&tsk->sighand->siglock);
2817 __set_task_blocked(tsk, newset);
2818 spin_unlock_irq(&tsk->sighand->siglock);
2822 * This is also useful for kernel threads that want to temporarily
2823 * (or permanently) block certain signals.
2825 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2826 * interface happily blocks "unblockable" signals like SIGKILL
2829 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2831 struct task_struct *tsk = current;
2834 /* Lockless, only current can change ->blocked, never from irq */
2836 *oldset = tsk->blocked;
2840 sigorsets(&newset, &tsk->blocked, set);
2843 sigandnsets(&newset, &tsk->blocked, set);
2852 __set_current_blocked(&newset);
2855 EXPORT_SYMBOL(sigprocmask);
2858 * The api helps set app-provided sigmasks.
2860 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2861 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2863 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2864 sigset_t *oldset, size_t sigsetsize)
2869 if (sigsetsize != sizeof(sigset_t))
2871 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2874 *oldset = current->blocked;
2875 set_current_blocked(set);
2879 EXPORT_SYMBOL(set_user_sigmask);
2881 #ifdef CONFIG_COMPAT
2882 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2883 sigset_t *set, sigset_t *oldset,
2889 if (sigsetsize != sizeof(compat_sigset_t))
2891 if (get_compat_sigset(set, usigmask))
2894 *oldset = current->blocked;
2895 set_current_blocked(set);
2899 EXPORT_SYMBOL(set_compat_user_sigmask);
2903 * restore_user_sigmask:
2904 * usigmask: sigmask passed in from userland.
2905 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2908 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2909 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2911 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2917 * When signals are pending, do not restore them here.
2918 * Restoring sigmask here can lead to delivering signals that the above
2919 * syscalls are intended to block because of the sigmask passed in.
2921 if (signal_pending(current)) {
2922 current->saved_sigmask = *sigsaved;
2923 set_restore_sigmask();
2928 * This is needed because the fast syscall return path does not restore
2929 * saved_sigmask when signals are not pending.
2931 set_current_blocked(sigsaved);
2933 EXPORT_SYMBOL(restore_user_sigmask);
2936 * sys_rt_sigprocmask - change the list of currently blocked signals
2937 * @how: whether to add, remove, or set signals
2938 * @nset: stores pending signals
2939 * @oset: previous value of signal mask if non-null
2940 * @sigsetsize: size of sigset_t type
2942 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2943 sigset_t __user *, oset, size_t, sigsetsize)
2945 sigset_t old_set, new_set;
2948 /* XXX: Don't preclude handling different sized sigset_t's. */
2949 if (sigsetsize != sizeof(sigset_t))
2952 old_set = current->blocked;
2955 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2957 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2959 error = sigprocmask(how, &new_set, NULL);
2965 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2972 #ifdef CONFIG_COMPAT
2973 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2974 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2976 sigset_t old_set = current->blocked;
2978 /* XXX: Don't preclude handling different sized sigset_t's. */
2979 if (sigsetsize != sizeof(sigset_t))
2985 if (get_compat_sigset(&new_set, nset))
2987 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2989 error = sigprocmask(how, &new_set, NULL);
2993 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2997 static void do_sigpending(sigset_t *set)
2999 spin_lock_irq(¤t->sighand->siglock);
3000 sigorsets(set, ¤t->pending.signal,
3001 ¤t->signal->shared_pending.signal);
3002 spin_unlock_irq(¤t->sighand->siglock);
3004 /* Outside the lock because only this thread touches it. */
3005 sigandsets(set, ¤t->blocked, set);
3009 * sys_rt_sigpending - examine a pending signal that has been raised
3011 * @uset: stores pending signals
3012 * @sigsetsize: size of sigset_t type or larger
3014 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3018 if (sigsetsize > sizeof(*uset))
3021 do_sigpending(&set);
3023 if (copy_to_user(uset, &set, sigsetsize))
3029 #ifdef CONFIG_COMPAT
3030 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3031 compat_size_t, sigsetsize)
3035 if (sigsetsize > sizeof(*uset))
3038 do_sigpending(&set);
3040 return put_compat_sigset(uset, &set, sigsetsize);
3044 static const struct {
3045 unsigned char limit, layout;
3047 [SIGILL] = { NSIGILL, SIL_FAULT },
3048 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3049 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3050 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3051 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3053 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3055 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3056 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3057 [SIGSYS] = { NSIGSYS, SIL_SYS },
3060 static bool known_siginfo_layout(unsigned sig, int si_code)
3062 if (si_code == SI_KERNEL)
3064 else if ((si_code > SI_USER)) {
3065 if (sig_specific_sicodes(sig)) {
3066 if (si_code <= sig_sicodes[sig].limit)
3069 else if (si_code <= NSIGPOLL)
3072 else if (si_code >= SI_DETHREAD)
3074 else if (si_code == SI_ASYNCNL)
3079 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3081 enum siginfo_layout layout = SIL_KILL;
3082 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3083 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3084 (si_code <= sig_sicodes[sig].limit)) {
3085 layout = sig_sicodes[sig].layout;
3086 /* Handle the exceptions */
3087 if ((sig == SIGBUS) &&
3088 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3089 layout = SIL_FAULT_MCEERR;
3090 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3091 layout = SIL_FAULT_BNDERR;
3093 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3094 layout = SIL_FAULT_PKUERR;
3097 else if (si_code <= NSIGPOLL)
3100 if (si_code == SI_TIMER)
3102 else if (si_code == SI_SIGIO)
3104 else if (si_code < 0)
3110 static inline char __user *si_expansion(const siginfo_t __user *info)
3112 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3115 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3117 char __user *expansion = si_expansion(to);
3118 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3120 if (clear_user(expansion, SI_EXPANSION_SIZE))
3125 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3126 const siginfo_t __user *from)
3128 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3129 char __user *expansion = si_expansion(from);
3130 char buf[SI_EXPANSION_SIZE];
3133 * An unknown si_code might need more than
3134 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3135 * extra bytes are 0. This guarantees copy_siginfo_to_user
3136 * will return this data to userspace exactly.
3138 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3140 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3148 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3149 const siginfo_t __user *from)
3151 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3153 to->si_signo = signo;
3154 return post_copy_siginfo_from_user(to, from);
3157 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3159 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3161 return post_copy_siginfo_from_user(to, from);
3164 #ifdef CONFIG_COMPAT
3165 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3166 const struct kernel_siginfo *from)
3167 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3169 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3171 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3172 const struct kernel_siginfo *from, bool x32_ABI)
3175 struct compat_siginfo new;
3176 memset(&new, 0, sizeof(new));
3178 new.si_signo = from->si_signo;
3179 new.si_errno = from->si_errno;
3180 new.si_code = from->si_code;
3181 switch(siginfo_layout(from->si_signo, from->si_code)) {
3183 new.si_pid = from->si_pid;
3184 new.si_uid = from->si_uid;
3187 new.si_tid = from->si_tid;
3188 new.si_overrun = from->si_overrun;
3189 new.si_int = from->si_int;
3192 new.si_band = from->si_band;
3193 new.si_fd = from->si_fd;
3196 new.si_addr = ptr_to_compat(from->si_addr);
3197 #ifdef __ARCH_SI_TRAPNO
3198 new.si_trapno = from->si_trapno;
3201 case SIL_FAULT_MCEERR:
3202 new.si_addr = ptr_to_compat(from->si_addr);
3203 #ifdef __ARCH_SI_TRAPNO
3204 new.si_trapno = from->si_trapno;
3206 new.si_addr_lsb = from->si_addr_lsb;
3208 case SIL_FAULT_BNDERR:
3209 new.si_addr = ptr_to_compat(from->si_addr);
3210 #ifdef __ARCH_SI_TRAPNO
3211 new.si_trapno = from->si_trapno;
3213 new.si_lower = ptr_to_compat(from->si_lower);
3214 new.si_upper = ptr_to_compat(from->si_upper);
3216 case SIL_FAULT_PKUERR:
3217 new.si_addr = ptr_to_compat(from->si_addr);
3218 #ifdef __ARCH_SI_TRAPNO
3219 new.si_trapno = from->si_trapno;
3221 new.si_pkey = from->si_pkey;
3224 new.si_pid = from->si_pid;
3225 new.si_uid = from->si_uid;
3226 new.si_status = from->si_status;
3227 #ifdef CONFIG_X86_X32_ABI
3229 new._sifields._sigchld_x32._utime = from->si_utime;
3230 new._sifields._sigchld_x32._stime = from->si_stime;
3234 new.si_utime = from->si_utime;
3235 new.si_stime = from->si_stime;
3239 new.si_pid = from->si_pid;
3240 new.si_uid = from->si_uid;
3241 new.si_int = from->si_int;
3244 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3245 new.si_syscall = from->si_syscall;
3246 new.si_arch = from->si_arch;
3250 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3256 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3257 const struct compat_siginfo *from)
3260 to->si_signo = from->si_signo;
3261 to->si_errno = from->si_errno;
3262 to->si_code = from->si_code;
3263 switch(siginfo_layout(from->si_signo, from->si_code)) {
3265 to->si_pid = from->si_pid;
3266 to->si_uid = from->si_uid;
3269 to->si_tid = from->si_tid;
3270 to->si_overrun = from->si_overrun;
3271 to->si_int = from->si_int;
3274 to->si_band = from->si_band;
3275 to->si_fd = from->si_fd;
3278 to->si_addr = compat_ptr(from->si_addr);
3279 #ifdef __ARCH_SI_TRAPNO
3280 to->si_trapno = from->si_trapno;
3283 case SIL_FAULT_MCEERR:
3284 to->si_addr = compat_ptr(from->si_addr);
3285 #ifdef __ARCH_SI_TRAPNO
3286 to->si_trapno = from->si_trapno;
3288 to->si_addr_lsb = from->si_addr_lsb;
3290 case SIL_FAULT_BNDERR:
3291 to->si_addr = compat_ptr(from->si_addr);
3292 #ifdef __ARCH_SI_TRAPNO
3293 to->si_trapno = from->si_trapno;
3295 to->si_lower = compat_ptr(from->si_lower);
3296 to->si_upper = compat_ptr(from->si_upper);
3298 case SIL_FAULT_PKUERR:
3299 to->si_addr = compat_ptr(from->si_addr);
3300 #ifdef __ARCH_SI_TRAPNO
3301 to->si_trapno = from->si_trapno;
3303 to->si_pkey = from->si_pkey;
3306 to->si_pid = from->si_pid;
3307 to->si_uid = from->si_uid;
3308 to->si_status = from->si_status;
3309 #ifdef CONFIG_X86_X32_ABI
3310 if (in_x32_syscall()) {
3311 to->si_utime = from->_sifields._sigchld_x32._utime;
3312 to->si_stime = from->_sifields._sigchld_x32._stime;
3316 to->si_utime = from->si_utime;
3317 to->si_stime = from->si_stime;
3321 to->si_pid = from->si_pid;
3322 to->si_uid = from->si_uid;
3323 to->si_int = from->si_int;
3326 to->si_call_addr = compat_ptr(from->si_call_addr);
3327 to->si_syscall = from->si_syscall;
3328 to->si_arch = from->si_arch;
3334 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3335 const struct compat_siginfo __user *ufrom)
3337 struct compat_siginfo from;
3339 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3342 from.si_signo = signo;
3343 return post_copy_siginfo_from_user32(to, &from);
3346 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3347 const struct compat_siginfo __user *ufrom)
3349 struct compat_siginfo from;
3351 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3354 return post_copy_siginfo_from_user32(to, &from);
3356 #endif /* CONFIG_COMPAT */
3359 * do_sigtimedwait - wait for queued signals specified in @which
3360 * @which: queued signals to wait for
3361 * @info: if non-null, the signal's siginfo is returned here
3362 * @ts: upper bound on process time suspension
3364 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3365 const struct timespec64 *ts)
3367 ktime_t *to = NULL, timeout = KTIME_MAX;
3368 struct task_struct *tsk = current;
3369 sigset_t mask = *which;
3373 if (!timespec64_valid(ts))
3375 timeout = timespec64_to_ktime(*ts);
3380 * Invert the set of allowed signals to get those we want to block.
3382 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3385 spin_lock_irq(&tsk->sighand->siglock);
3386 sig = dequeue_signal(tsk, &mask, info);
3387 if (!sig && timeout) {
3389 * None ready, temporarily unblock those we're interested
3390 * while we are sleeping in so that we'll be awakened when
3391 * they arrive. Unblocking is always fine, we can avoid
3392 * set_current_blocked().
3394 tsk->real_blocked = tsk->blocked;
3395 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3396 recalc_sigpending();
3397 spin_unlock_irq(&tsk->sighand->siglock);
3399 __set_current_state(TASK_INTERRUPTIBLE);
3400 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3402 spin_lock_irq(&tsk->sighand->siglock);
3403 __set_task_blocked(tsk, &tsk->real_blocked);
3404 sigemptyset(&tsk->real_blocked);
3405 sig = dequeue_signal(tsk, &mask, info);
3407 spin_unlock_irq(&tsk->sighand->siglock);
3411 return ret ? -EINTR : -EAGAIN;
3415 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3417 * @uthese: queued signals to wait for
3418 * @uinfo: if non-null, the signal's siginfo is returned here
3419 * @uts: upper bound on process time suspension
3420 * @sigsetsize: size of sigset_t type
3422 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3423 siginfo_t __user *, uinfo,
3424 const struct __kernel_timespec __user *, uts,
3428 struct timespec64 ts;
3429 kernel_siginfo_t info;
3432 /* XXX: Don't preclude handling different sized sigset_t's. */
3433 if (sigsetsize != sizeof(sigset_t))
3436 if (copy_from_user(&these, uthese, sizeof(these)))
3440 if (get_timespec64(&ts, uts))
3444 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3446 if (ret > 0 && uinfo) {
3447 if (copy_siginfo_to_user(uinfo, &info))
3454 #ifdef CONFIG_COMPAT_32BIT_TIME
3455 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3456 siginfo_t __user *, uinfo,
3457 const struct old_timespec32 __user *, uts,
3461 struct timespec64 ts;
3462 kernel_siginfo_t info;
3465 if (sigsetsize != sizeof(sigset_t))
3468 if (copy_from_user(&these, uthese, sizeof(these)))
3472 if (get_old_timespec32(&ts, uts))
3476 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3478 if (ret > 0 && uinfo) {
3479 if (copy_siginfo_to_user(uinfo, &info))
3487 #ifdef CONFIG_COMPAT
3488 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3489 struct compat_siginfo __user *, uinfo,
3490 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3493 struct timespec64 t;
3494 kernel_siginfo_t info;
3497 if (sigsetsize != sizeof(sigset_t))
3500 if (get_compat_sigset(&s, uthese))
3504 if (get_timespec64(&t, uts))
3508 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3510 if (ret > 0 && uinfo) {
3511 if (copy_siginfo_to_user32(uinfo, &info))
3518 #ifdef CONFIG_COMPAT_32BIT_TIME
3519 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3520 struct compat_siginfo __user *, uinfo,
3521 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3524 struct timespec64 t;
3525 kernel_siginfo_t info;
3528 if (sigsetsize != sizeof(sigset_t))
3531 if (get_compat_sigset(&s, uthese))
3535 if (get_old_timespec32(&t, uts))
3539 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3541 if (ret > 0 && uinfo) {
3542 if (copy_siginfo_to_user32(uinfo, &info))
3551 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3553 clear_siginfo(info);
3554 info->si_signo = sig;
3556 info->si_code = SI_USER;
3557 info->si_pid = task_tgid_vnr(current);
3558 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3562 * sys_kill - send a signal to a process
3563 * @pid: the PID of the process
3564 * @sig: signal to be sent
3566 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3568 struct kernel_siginfo info;
3570 prepare_kill_siginfo(sig, &info);
3572 return kill_something_info(sig, &info, pid);
3576 * Verify that the signaler and signalee either are in the same pid namespace
3577 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3580 static bool access_pidfd_pidns(struct pid *pid)
3582 struct pid_namespace *active = task_active_pid_ns(current);
3583 struct pid_namespace *p = ns_of_pid(pid);
3596 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3598 #ifdef CONFIG_COMPAT
3600 * Avoid hooking up compat syscalls and instead handle necessary
3601 * conversions here. Note, this is a stop-gap measure and should not be
3602 * considered a generic solution.
3604 if (in_compat_syscall())
3605 return copy_siginfo_from_user32(
3606 kinfo, (struct compat_siginfo __user *)info);
3608 return copy_siginfo_from_user(kinfo, info);
3611 static struct pid *pidfd_to_pid(const struct file *file)
3613 if (file->f_op == &pidfd_fops)
3614 return file->private_data;
3616 return tgid_pidfd_to_pid(file);
3620 * sys_pidfd_send_signal - send a signal to a process through a task file
3622 * @pidfd: the file descriptor of the process
3623 * @sig: signal to be sent
3624 * @info: the signal info
3625 * @flags: future flags to be passed
3627 * The syscall currently only signals via PIDTYPE_PID which covers
3628 * kill(<positive-pid>, <signal>. It does not signal threads or process
3630 * In order to extend the syscall to threads and process groups the @flags
3631 * argument should be used. In essence, the @flags argument will determine
3632 * what is signaled and not the file descriptor itself. Put in other words,
3633 * grouping is a property of the flags argument not a property of the file
3636 * Return: 0 on success, negative errno on failure
3638 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3639 siginfo_t __user *, info, unsigned int, flags)
3644 kernel_siginfo_t kinfo;
3646 /* Enforce flags be set to 0 until we add an extension. */
3654 /* Is this a pidfd? */
3655 pid = pidfd_to_pid(f.file);
3662 if (!access_pidfd_pidns(pid))
3666 ret = copy_siginfo_from_user_any(&kinfo, info);
3671 if (unlikely(sig != kinfo.si_signo))
3674 /* Only allow sending arbitrary signals to yourself. */
3676 if ((task_pid(current) != pid) &&
3677 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3680 prepare_kill_siginfo(sig, &kinfo);
3683 ret = kill_pid_info(sig, &kinfo, pid);
3691 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3693 struct task_struct *p;
3697 p = find_task_by_vpid(pid);
3698 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3699 error = check_kill_permission(sig, info, p);
3701 * The null signal is a permissions and process existence
3702 * probe. No signal is actually delivered.
3704 if (!error && sig) {
3705 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3707 * If lock_task_sighand() failed we pretend the task
3708 * dies after receiving the signal. The window is tiny,
3709 * and the signal is private anyway.
3711 if (unlikely(error == -ESRCH))
3720 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3722 struct kernel_siginfo info;
3724 clear_siginfo(&info);
3725 info.si_signo = sig;
3727 info.si_code = SI_TKILL;
3728 info.si_pid = task_tgid_vnr(current);
3729 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3731 return do_send_specific(tgid, pid, sig, &info);
3735 * sys_tgkill - send signal to one specific thread
3736 * @tgid: the thread group ID of the thread
3737 * @pid: the PID of the thread
3738 * @sig: signal to be sent
3740 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3741 * exists but it's not belonging to the target process anymore. This
3742 * method solves the problem of threads exiting and PIDs getting reused.
3744 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3746 /* This is only valid for single tasks */
3747 if (pid <= 0 || tgid <= 0)
3750 return do_tkill(tgid, pid, sig);
3754 * sys_tkill - send signal to one specific task
3755 * @pid: the PID of the task
3756 * @sig: signal to be sent
3758 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3760 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3762 /* This is only valid for single tasks */
3766 return do_tkill(0, pid, sig);
3769 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3771 /* Not even root can pretend to send signals from the kernel.
3772 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3774 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3775 (task_pid_vnr(current) != pid))
3778 /* POSIX.1b doesn't mention process groups. */
3779 return kill_proc_info(sig, info, pid);
3783 * sys_rt_sigqueueinfo - send signal information to a signal
3784 * @pid: the PID of the thread
3785 * @sig: signal to be sent
3786 * @uinfo: signal info to be sent
3788 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3789 siginfo_t __user *, uinfo)
3791 kernel_siginfo_t info;
3792 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3795 return do_rt_sigqueueinfo(pid, sig, &info);
3798 #ifdef CONFIG_COMPAT
3799 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3802 struct compat_siginfo __user *, uinfo)
3804 kernel_siginfo_t info;
3805 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3808 return do_rt_sigqueueinfo(pid, sig, &info);
3812 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3814 /* This is only valid for single tasks */
3815 if (pid <= 0 || tgid <= 0)
3818 /* Not even root can pretend to send signals from the kernel.
3819 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3821 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3822 (task_pid_vnr(current) != pid))
3825 return do_send_specific(tgid, pid, sig, info);
3828 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3829 siginfo_t __user *, uinfo)
3831 kernel_siginfo_t info;
3832 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3835 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3838 #ifdef CONFIG_COMPAT
3839 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3843 struct compat_siginfo __user *, uinfo)
3845 kernel_siginfo_t info;
3846 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3849 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3854 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3856 void kernel_sigaction(int sig, __sighandler_t action)
3858 spin_lock_irq(¤t->sighand->siglock);
3859 current->sighand->action[sig - 1].sa.sa_handler = action;
3860 if (action == SIG_IGN) {
3864 sigaddset(&mask, sig);
3866 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3867 flush_sigqueue_mask(&mask, ¤t->pending);
3868 recalc_sigpending();
3870 spin_unlock_irq(¤t->sighand->siglock);
3872 EXPORT_SYMBOL(kernel_sigaction);
3874 void __weak sigaction_compat_abi(struct k_sigaction *act,
3875 struct k_sigaction *oact)
3879 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3881 struct task_struct *p = current, *t;
3882 struct k_sigaction *k;
3885 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3888 k = &p->sighand->action[sig-1];
3890 spin_lock_irq(&p->sighand->siglock);
3894 sigaction_compat_abi(act, oact);
3897 sigdelsetmask(&act->sa.sa_mask,
3898 sigmask(SIGKILL) | sigmask(SIGSTOP));
3902 * "Setting a signal action to SIG_IGN for a signal that is
3903 * pending shall cause the pending signal to be discarded,
3904 * whether or not it is blocked."
3906 * "Setting a signal action to SIG_DFL for a signal that is
3907 * pending and whose default action is to ignore the signal
3908 * (for example, SIGCHLD), shall cause the pending signal to
3909 * be discarded, whether or not it is blocked"
3911 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3913 sigaddset(&mask, sig);
3914 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3915 for_each_thread(p, t)
3916 flush_sigqueue_mask(&mask, &t->pending);
3920 spin_unlock_irq(&p->sighand->siglock);
3925 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3928 struct task_struct *t = current;
3931 memset(oss, 0, sizeof(stack_t));
3932 oss->ss_sp = (void __user *) t->sas_ss_sp;
3933 oss->ss_size = t->sas_ss_size;
3934 oss->ss_flags = sas_ss_flags(sp) |
3935 (current->sas_ss_flags & SS_FLAG_BITS);
3939 void __user *ss_sp = ss->ss_sp;
3940 size_t ss_size = ss->ss_size;
3941 unsigned ss_flags = ss->ss_flags;
3944 if (unlikely(on_sig_stack(sp)))
3947 ss_mode = ss_flags & ~SS_FLAG_BITS;
3948 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3952 if (ss_mode == SS_DISABLE) {
3956 if (unlikely(ss_size < min_ss_size))
3960 t->sas_ss_sp = (unsigned long) ss_sp;
3961 t->sas_ss_size = ss_size;
3962 t->sas_ss_flags = ss_flags;
3967 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3971 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3973 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3974 current_user_stack_pointer(),
3976 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3981 int restore_altstack(const stack_t __user *uss)
3984 if (copy_from_user(&new, uss, sizeof(stack_t)))
3986 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3988 /* squash all but EFAULT for now */
3992 int __save_altstack(stack_t __user *uss, unsigned long sp)
3994 struct task_struct *t = current;
3995 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3996 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3997 __put_user(t->sas_ss_size, &uss->ss_size);
4000 if (t->sas_ss_flags & SS_AUTODISARM)
4005 #ifdef CONFIG_COMPAT
4006 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4007 compat_stack_t __user *uoss_ptr)
4013 compat_stack_t uss32;
4014 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4016 uss.ss_sp = compat_ptr(uss32.ss_sp);
4017 uss.ss_flags = uss32.ss_flags;
4018 uss.ss_size = uss32.ss_size;
4020 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4021 compat_user_stack_pointer(),
4022 COMPAT_MINSIGSTKSZ);
4023 if (ret >= 0 && uoss_ptr) {
4025 memset(&old, 0, sizeof(old));
4026 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4027 old.ss_flags = uoss.ss_flags;
4028 old.ss_size = uoss.ss_size;
4029 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4035 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4036 const compat_stack_t __user *, uss_ptr,
4037 compat_stack_t __user *, uoss_ptr)
4039 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4042 int compat_restore_altstack(const compat_stack_t __user *uss)
4044 int err = do_compat_sigaltstack(uss, NULL);
4045 /* squash all but -EFAULT for now */
4046 return err == -EFAULT ? err : 0;
4049 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4052 struct task_struct *t = current;
4053 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4055 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4056 __put_user(t->sas_ss_size, &uss->ss_size);
4059 if (t->sas_ss_flags & SS_AUTODISARM)
4065 #ifdef __ARCH_WANT_SYS_SIGPENDING
4068 * sys_sigpending - examine pending signals
4069 * @uset: where mask of pending signal is returned
4071 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4075 if (sizeof(old_sigset_t) > sizeof(*uset))
4078 do_sigpending(&set);
4080 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4086 #ifdef CONFIG_COMPAT
4087 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4091 do_sigpending(&set);
4093 return put_user(set.sig[0], set32);
4099 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4101 * sys_sigprocmask - examine and change blocked signals
4102 * @how: whether to add, remove, or set signals
4103 * @nset: signals to add or remove (if non-null)
4104 * @oset: previous value of signal mask if non-null
4106 * Some platforms have their own version with special arguments;
4107 * others support only sys_rt_sigprocmask.
4110 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4111 old_sigset_t __user *, oset)
4113 old_sigset_t old_set, new_set;
4114 sigset_t new_blocked;
4116 old_set = current->blocked.sig[0];
4119 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4122 new_blocked = current->blocked;
4126 sigaddsetmask(&new_blocked, new_set);
4129 sigdelsetmask(&new_blocked, new_set);
4132 new_blocked.sig[0] = new_set;
4138 set_current_blocked(&new_blocked);
4142 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4148 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4150 #ifndef CONFIG_ODD_RT_SIGACTION
4152 * sys_rt_sigaction - alter an action taken by a process
4153 * @sig: signal to be sent
4154 * @act: new sigaction
4155 * @oact: used to save the previous sigaction
4156 * @sigsetsize: size of sigset_t type
4158 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4159 const struct sigaction __user *, act,
4160 struct sigaction __user *, oact,
4163 struct k_sigaction new_sa, old_sa;
4166 /* XXX: Don't preclude handling different sized sigset_t's. */
4167 if (sigsetsize != sizeof(sigset_t))
4170 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4173 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4177 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4182 #ifdef CONFIG_COMPAT
4183 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4184 const struct compat_sigaction __user *, act,
4185 struct compat_sigaction __user *, oact,
4186 compat_size_t, sigsetsize)
4188 struct k_sigaction new_ka, old_ka;
4189 #ifdef __ARCH_HAS_SA_RESTORER
4190 compat_uptr_t restorer;
4194 /* XXX: Don't preclude handling different sized sigset_t's. */
4195 if (sigsetsize != sizeof(compat_sigset_t))
4199 compat_uptr_t handler;
4200 ret = get_user(handler, &act->sa_handler);
4201 new_ka.sa.sa_handler = compat_ptr(handler);
4202 #ifdef __ARCH_HAS_SA_RESTORER
4203 ret |= get_user(restorer, &act->sa_restorer);
4204 new_ka.sa.sa_restorer = compat_ptr(restorer);
4206 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4207 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4212 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4214 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4216 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4217 sizeof(oact->sa_mask));
4218 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4219 #ifdef __ARCH_HAS_SA_RESTORER
4220 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4221 &oact->sa_restorer);
4227 #endif /* !CONFIG_ODD_RT_SIGACTION */
4229 #ifdef CONFIG_OLD_SIGACTION
4230 SYSCALL_DEFINE3(sigaction, int, sig,
4231 const struct old_sigaction __user *, act,
4232 struct old_sigaction __user *, oact)
4234 struct k_sigaction new_ka, old_ka;
4239 if (!access_ok(act, sizeof(*act)) ||
4240 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4241 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4242 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4243 __get_user(mask, &act->sa_mask))
4245 #ifdef __ARCH_HAS_KA_RESTORER
4246 new_ka.ka_restorer = NULL;
4248 siginitset(&new_ka.sa.sa_mask, mask);
4251 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4254 if (!access_ok(oact, sizeof(*oact)) ||
4255 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4256 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4257 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4258 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4265 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4266 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4267 const struct compat_old_sigaction __user *, act,
4268 struct compat_old_sigaction __user *, oact)
4270 struct k_sigaction new_ka, old_ka;
4272 compat_old_sigset_t mask;
4273 compat_uptr_t handler, restorer;
4276 if (!access_ok(act, sizeof(*act)) ||
4277 __get_user(handler, &act->sa_handler) ||
4278 __get_user(restorer, &act->sa_restorer) ||
4279 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4280 __get_user(mask, &act->sa_mask))
4283 #ifdef __ARCH_HAS_KA_RESTORER
4284 new_ka.ka_restorer = NULL;
4286 new_ka.sa.sa_handler = compat_ptr(handler);
4287 new_ka.sa.sa_restorer = compat_ptr(restorer);
4288 siginitset(&new_ka.sa.sa_mask, mask);
4291 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4294 if (!access_ok(oact, sizeof(*oact)) ||
4295 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4296 &oact->sa_handler) ||
4297 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4298 &oact->sa_restorer) ||
4299 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4300 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4307 #ifdef CONFIG_SGETMASK_SYSCALL
4310 * For backwards compatibility. Functionality superseded by sigprocmask.
4312 SYSCALL_DEFINE0(sgetmask)
4315 return current->blocked.sig[0];
4318 SYSCALL_DEFINE1(ssetmask, int, newmask)
4320 int old = current->blocked.sig[0];
4323 siginitset(&newset, newmask);
4324 set_current_blocked(&newset);
4328 #endif /* CONFIG_SGETMASK_SYSCALL */
4330 #ifdef __ARCH_WANT_SYS_SIGNAL
4332 * For backwards compatibility. Functionality superseded by sigaction.
4334 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4336 struct k_sigaction new_sa, old_sa;
4339 new_sa.sa.sa_handler = handler;
4340 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4341 sigemptyset(&new_sa.sa.sa_mask);
4343 ret = do_sigaction(sig, &new_sa, &old_sa);
4345 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4347 #endif /* __ARCH_WANT_SYS_SIGNAL */
4349 #ifdef __ARCH_WANT_SYS_PAUSE
4351 SYSCALL_DEFINE0(pause)
4353 while (!signal_pending(current)) {
4354 __set_current_state(TASK_INTERRUPTIBLE);
4357 return -ERESTARTNOHAND;
4362 static int sigsuspend(sigset_t *set)
4364 current->saved_sigmask = current->blocked;
4365 set_current_blocked(set);
4367 while (!signal_pending(current)) {
4368 __set_current_state(TASK_INTERRUPTIBLE);
4371 set_restore_sigmask();
4372 return -ERESTARTNOHAND;
4376 * sys_rt_sigsuspend - replace the signal mask for a value with the
4377 * @unewset value until a signal is received
4378 * @unewset: new signal mask value
4379 * @sigsetsize: size of sigset_t type
4381 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4385 /* XXX: Don't preclude handling different sized sigset_t's. */
4386 if (sigsetsize != sizeof(sigset_t))
4389 if (copy_from_user(&newset, unewset, sizeof(newset)))
4391 return sigsuspend(&newset);
4394 #ifdef CONFIG_COMPAT
4395 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4399 /* XXX: Don't preclude handling different sized sigset_t's. */
4400 if (sigsetsize != sizeof(sigset_t))
4403 if (get_compat_sigset(&newset, unewset))
4405 return sigsuspend(&newset);
4409 #ifdef CONFIG_OLD_SIGSUSPEND
4410 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4413 siginitset(&blocked, mask);
4414 return sigsuspend(&blocked);
4417 #ifdef CONFIG_OLD_SIGSUSPEND3
4418 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4421 siginitset(&blocked, mask);
4422 return sigsuspend(&blocked);
4426 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4431 static inline void siginfo_buildtime_checks(void)
4433 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4435 /* Verify the offsets in the two siginfos match */
4436 #define CHECK_OFFSET(field) \
4437 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4440 CHECK_OFFSET(si_pid);
4441 CHECK_OFFSET(si_uid);
4444 CHECK_OFFSET(si_tid);
4445 CHECK_OFFSET(si_overrun);
4446 CHECK_OFFSET(si_value);
4449 CHECK_OFFSET(si_pid);
4450 CHECK_OFFSET(si_uid);
4451 CHECK_OFFSET(si_value);
4454 CHECK_OFFSET(si_pid);
4455 CHECK_OFFSET(si_uid);
4456 CHECK_OFFSET(si_status);
4457 CHECK_OFFSET(si_utime);
4458 CHECK_OFFSET(si_stime);
4461 CHECK_OFFSET(si_addr);
4462 CHECK_OFFSET(si_addr_lsb);
4463 CHECK_OFFSET(si_lower);
4464 CHECK_OFFSET(si_upper);
4465 CHECK_OFFSET(si_pkey);
4468 CHECK_OFFSET(si_band);
4469 CHECK_OFFSET(si_fd);
4472 CHECK_OFFSET(si_call_addr);
4473 CHECK_OFFSET(si_syscall);
4474 CHECK_OFFSET(si_arch);
4478 void __init signals_init(void)
4480 siginfo_buildtime_checks();
4482 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4485 #ifdef CONFIG_KGDB_KDB
4486 #include <linux/kdb.h>
4488 * kdb_send_sig - Allows kdb to send signals without exposing
4489 * signal internals. This function checks if the required locks are
4490 * available before calling the main signal code, to avoid kdb
4493 void kdb_send_sig(struct task_struct *t, int sig)
4495 static struct task_struct *kdb_prev_t;
4497 if (!spin_trylock(&t->sighand->siglock)) {
4498 kdb_printf("Can't do kill command now.\n"
4499 "The sigmask lock is held somewhere else in "
4500 "kernel, try again later\n");
4503 new_t = kdb_prev_t != t;
4505 if (t->state != TASK_RUNNING && new_t) {
4506 spin_unlock(&t->sighand->siglock);
4507 kdb_printf("Process is not RUNNING, sending a signal from "
4508 "kdb risks deadlock\n"
4509 "on the run queue locks. "
4510 "The signal has _not_ been sent.\n"
4511 "Reissue the kill command if you want to risk "
4515 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4516 spin_unlock(&t->sighand->siglock);
4518 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4521 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4523 #endif /* CONFIG_KGDB_KDB */