1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 struct signal_struct *sig = current->signal;
397 if (sig->group_stop_count) {
398 sig->group_stop_count++;
399 mask |= JOBCTL_STOP_CONSUME;
400 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
403 /* Have the new thread join an on-going signal group stop */
404 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 * allocate a new signal queue record
409 * - this may be called without locks if and only if t == current, otherwise an
410 * appropriate lock must be held to stop the target task from exiting
412 static struct sigqueue *
413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
415 struct sigqueue *q = NULL;
416 struct user_struct *user;
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
428 user = __task_cred(t)->user;
429 sigpending = atomic_inc_return(&user->sigpending);
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, flags);
437 print_dropped_signal(sig);
440 if (unlikely(q == NULL)) {
441 if (atomic_dec_and_test(&user->sigpending))
444 INIT_LIST_HEAD(&q->list);
452 static void __sigqueue_free(struct sigqueue *q)
454 if (q->flags & SIGQUEUE_PREALLOC)
456 if (atomic_dec_and_test(&q->user->sigpending))
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* if ptraced, let the tracer determine */
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
571 struct sigqueue *q, *first = NULL;
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
585 sigdelset(&list->signal, sig);
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
597 __sigqueue_free(first);
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
605 info->si_signo = sig;
607 info->si_code = SI_USER;
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
616 int sig = next_signal(pending, mask);
619 collect_signal(sig, pending, info, resched_timer);
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
627 * All callers have to hold the siglock.
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
631 bool resched_timer = false;
634 /* We only dequeue private signals from ourselves, we don't let
635 * signalfd steal them
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
672 if (unlikely(sig_kernel_stop(signr))) {
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
687 #ifdef CONFIG_POSIX_TIMERS
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
705 EXPORT_SYMBOL_GPL(dequeue_signal);
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
714 * Might a synchronous signal be in the queue?
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 * Return the first synchronous signal in the queue.
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
733 * Check if there is another siginfo for the same signal.
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
740 sigdelset(&pending->signal, sync->info.si_signo);
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
750 * Tell a process that it has a new active signal..
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 * case. We don't check t->state here because there is a race with it
766 * executing another processor and just now entering stopped state.
767 * By using wake_up_state, we ensure the process will wake up and
768 * handle its death signal.
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
775 * Remove signals in mask from the pending set and queue.
776 * Returns 1 if any signals were found.
778 * All callers must be holding the siglock.
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
782 struct sigqueue *q, *n;
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
798 static inline int is_si_special(const struct kernel_siginfo *info)
800 return info <= SEND_SIG_PRIV;
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
810 * called with RCU read lock from check_kill_permission()
812 static bool kill_ok_by_cred(struct task_struct *t)
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
825 * Bad permissions for sending the signal
826 * - the caller must hold the RCU read lock
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
834 if (!valid_signal(sig))
837 if (!si_fromuser(info))
840 error = audit_signal_info(sig, t); /* Let audit system see the signal */
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
848 sid = task_session(t);
850 * We don't return the error if sid == NULL. The
851 * task was unhashed, the caller must notice this.
853 if (!sid || sid == task_session(current))
861 return security_task_kill(t, info, sig, NULL);
865 * ptrace_trap_notify - schedule trap to notify ptracer
866 * @t: tracee wanting to notify tracer
868 * This function schedules sticky ptrace trap which is cleared on the next
869 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * If @t is running, STOP trap will be taken. If trapped for STOP and
873 * ptracer is listening for events, tracee is woken up so that it can
874 * re-trap for the new event. If trapped otherwise, STOP trap will be
875 * eventually taken without returning to userland after the existing traps
876 * are finished by PTRACE_CONT.
879 * Must be called with @task->sighand->siglock held.
881 static void ptrace_trap_notify(struct task_struct *t)
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
891 * Handle magic process-wide effects of stop/continue signals. Unlike
892 * the signal actions, these happen immediately at signal-generation
893 * time regardless of blocking, ignoring, or handling. This does the
894 * actual continuing for SIGCONT, but not the actual stopping for stop
895 * signals. The process stop is done as a signal action for SIG_DFL.
897 * Returns true if the signal should be actually delivered, otherwise
898 * it should be dropped.
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
910 * The process is in the middle of dying, nothing to do.
912 } else if (sig_kernel_stop(sig)) {
914 * This is a stop signal. Remove SIGCONT from all queues.
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
923 * Remove all stop signals from all queues, wake all threads.
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
933 ptrace_trap_notify(t);
937 * Notify the parent with CLD_CONTINUED if we were stopped.
939 * If we were in the middle of a group stop, we pretend it
940 * was already finished, and then continued. Since SIGCHLD
941 * doesn't queue we report only CLD_STOPPED, as if the next
942 * CLD_CONTINUED was dropped.
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
952 * The first thread which returns from do_signal_stop()
953 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 * notify its parent. See get_signal().
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
962 return !sig_ignored(p, sig, force);
966 * Test if P wants to take SIG. After we've checked all threads with this,
967 * it's equivalent to finding no threads not blocking SIG. Any threads not
968 * blocking SIG were ruled out because they are not running and already
969 * have pending signals. Such threads will dequeue from the shared queue
970 * as soon as they're available, so putting the signal on the shared queue
971 * will be equivalent to sending it to one such thread.
973 static inline bool wants_signal(int sig, struct task_struct *p)
975 if (sigismember(&p->blocked, sig))
978 if (p->flags & PF_EXITING)
984 if (task_is_stopped_or_traced(p))
987 return task_curr(p) || !task_sigpending(p);
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
996 * Now find a thread we can wake up to take the signal off the queue.
998 * If the main thread wants the signal, it gets first crack.
999 * Probably the least surprising to the average bear.
1001 if (wants_signal(sig, p))
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1005 * There is just one thread and it does not need to be woken.
1006 * It will dequeue unblocked signals before it runs again.
1011 * Otherwise try to find a suitable thread.
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1016 if (t == signal->curr_target)
1018 * No thread needs to be woken.
1019 * Any eligible threads will see
1020 * the signal in the queue soon.
1024 signal->curr_target = t;
1028 * Found a killable thread. If the signal will be fatal,
1029 * then start taking the whole group down immediately.
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1036 * This signal will be fatal to the whole group.
1038 if (!sig_kernel_coredump(sig)) {
1040 * Start a group exit and wake everybody up.
1041 * This way we don't have other threads
1042 * running and doing things after a slower
1043 * thread has the fatal signal pending.
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1059 * The signal is already in the shared-pending queue.
1060 * Tell the chosen thread to wake up and dequeue it.
1062 signal_wake_up(t, sig == SIGKILL);
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1074 struct sigpending *pending;
1076 int override_rlimit;
1077 int ret = 0, result;
1079 assert_spin_locked(&t->sighand->siglock);
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1087 * Short-circuit ignored signals and support queuing
1088 * exactly one non-rt signal, so that we can get more
1089 * detailed information about the cause of the signal.
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1095 result = TRACE_SIGNAL_DELIVERED;
1097 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1103 * Real-time signals must be queued if sent by sigqueue, or
1104 * some other real-time mechanism. It is implementation
1105 * defined whether kill() does so. We attempt to do so, on
1106 * the principle of least surprise, but since kill is not
1107 * allowed to fail with EAGAIN when low on memory we just
1108 * make sure at least one signal gets delivered and don't
1109 * pass on the info struct.
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1114 override_rlimit = 0;
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1118 list_add_tail(&q->list, &pending->list);
1119 switch ((unsigned long) info) {
1120 case (unsigned long) SEND_SIG_NOINFO:
1121 clear_siginfo(&q->info);
1122 q->info.si_signo = sig;
1123 q->info.si_errno = 0;
1124 q->info.si_code = SI_USER;
1125 q->info.si_pid = task_tgid_nr_ns(current,
1126 task_active_pid_ns(t));
1129 from_kuid_munged(task_cred_xxx(t, user_ns),
1133 case (unsigned long) SEND_SIG_PRIV:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_KERNEL;
1142 copy_siginfo(&q->info, info);
1145 } else if (!is_si_special(info) &&
1146 sig >= SIGRTMIN && info->si_code != SI_USER) {
1148 * Queue overflow, abort. We may abort if the
1149 * signal was rt and sent by user using something
1150 * other than kill().
1152 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1157 * This is a silent loss of information. We still
1158 * send the signal, but the *info bits are lost.
1160 result = TRACE_SIGNAL_LOSE_INFO;
1164 signalfd_notify(t, sig);
1165 sigaddset(&pending->signal, sig);
1167 /* Let multiprocess signals appear after on-going forks */
1168 if (type > PIDTYPE_TGID) {
1169 struct multiprocess_signals *delayed;
1170 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 sigset_t *signal = &delayed->signal;
1172 /* Can't queue both a stop and a continue signal */
1174 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 else if (sig_kernel_stop(sig))
1176 sigdelset(signal, SIGCONT);
1177 sigaddset(signal, sig);
1181 complete_signal(sig, t, type);
1183 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1187 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1190 switch (siginfo_layout(info->si_signo, info->si_code)) {
1199 case SIL_FAULT_MCEERR:
1200 case SIL_FAULT_BNDERR:
1201 case SIL_FAULT_PKUERR:
1209 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1212 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1215 if (info == SEND_SIG_NOINFO) {
1216 /* Force if sent from an ancestor pid namespace */
1217 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 } else if (info == SEND_SIG_PRIV) {
1219 /* Don't ignore kernel generated signals */
1221 } else if (has_si_pid_and_uid(info)) {
1222 /* SIGKILL and SIGSTOP is special or has ids */
1223 struct user_namespace *t_user_ns;
1226 t_user_ns = task_cred_xxx(t, user_ns);
1227 if (current_user_ns() != t_user_ns) {
1228 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 info->si_uid = from_kuid_munged(t_user_ns, uid);
1233 /* A kernel generated signal? */
1234 force = (info->si_code == SI_KERNEL);
1236 /* From an ancestor pid namespace? */
1237 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1242 return __send_signal(sig, info, t, type, force);
1245 static void print_fatal_signal(int signr)
1247 struct pt_regs *regs = signal_pt_regs();
1248 pr_info("potentially unexpected fatal signal %d.\n", signr);
1250 #if defined(__i386__) && !defined(__arch_um__)
1251 pr_info("code at %08lx: ", regs->ip);
1254 for (i = 0; i < 16; i++) {
1257 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1259 pr_cont("%02x ", insn);
1269 static int __init setup_print_fatal_signals(char *str)
1271 get_option (&str, &print_fatal_signals);
1276 __setup("print-fatal-signals=", setup_print_fatal_signals);
1279 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1281 return send_signal(sig, info, p, PIDTYPE_TGID);
1284 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1287 unsigned long flags;
1290 if (lock_task_sighand(p, &flags)) {
1291 ret = send_signal(sig, info, p, type);
1292 unlock_task_sighand(p, &flags);
1299 * Force a signal that the process can't ignore: if necessary
1300 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1302 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303 * since we do not want to have a signal handler that was blocked
1304 * be invoked when user space had explicitly blocked it.
1306 * We don't want to have recursive SIGSEGV's etc, for example,
1307 * that is why we also clear SIGNAL_UNKILLABLE.
1310 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1312 unsigned long int flags;
1313 int ret, blocked, ignored;
1314 struct k_sigaction *action;
1315 int sig = info->si_signo;
1317 spin_lock_irqsave(&t->sighand->siglock, flags);
1318 action = &t->sighand->action[sig-1];
1319 ignored = action->sa.sa_handler == SIG_IGN;
1320 blocked = sigismember(&t->blocked, sig);
1321 if (blocked || ignored) {
1322 action->sa.sa_handler = SIG_DFL;
1324 sigdelset(&t->blocked, sig);
1325 recalc_sigpending_and_wake(t);
1329 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 * debugging to leave init killable.
1332 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1333 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1334 ret = send_signal(sig, info, t, PIDTYPE_PID);
1335 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1340 int force_sig_info(struct kernel_siginfo *info)
1342 return force_sig_info_to_task(info, current);
1346 * Nuke all other threads in the group.
1348 int zap_other_threads(struct task_struct *p)
1350 struct task_struct *t = p;
1353 p->signal->group_stop_count = 0;
1355 while_each_thread(p, t) {
1356 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1359 /* Don't bother with already dead threads */
1362 sigaddset(&t->pending.signal, SIGKILL);
1363 signal_wake_up(t, 1);
1369 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 unsigned long *flags)
1372 struct sighand_struct *sighand;
1376 sighand = rcu_dereference(tsk->sighand);
1377 if (unlikely(sighand == NULL))
1381 * This sighand can be already freed and even reused, but
1382 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1383 * initializes ->siglock: this slab can't go away, it has
1384 * the same object type, ->siglock can't be reinitialized.
1386 * We need to ensure that tsk->sighand is still the same
1387 * after we take the lock, we can race with de_thread() or
1388 * __exit_signal(). In the latter case the next iteration
1389 * must see ->sighand == NULL.
1391 spin_lock_irqsave(&sighand->siglock, *flags);
1392 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1394 spin_unlock_irqrestore(&sighand->siglock, *flags);
1402 * send signal info to all the members of a group
1404 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 struct task_struct *p, enum pid_type type)
1410 ret = check_kill_permission(sig, info, p);
1414 ret = do_send_sig_info(sig, info, p, type);
1420 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1421 * control characters do (^C, ^Z etc)
1422 * - the caller must hold at least a readlock on tasklist_lock
1424 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1426 struct task_struct *p = NULL;
1427 int retval, success;
1431 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1432 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1436 return success ? 0 : retval;
1439 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1442 struct task_struct *p;
1446 p = pid_task(pid, PIDTYPE_PID);
1448 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1450 if (likely(!p || error != -ESRCH))
1454 * The task was unhashed in between, try again. If it
1455 * is dead, pid_task() will return NULL, if we race with
1456 * de_thread() it will find the new leader.
1461 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1465 error = kill_pid_info(sig, info, find_vpid(pid));
1470 static inline bool kill_as_cred_perm(const struct cred *cred,
1471 struct task_struct *target)
1473 const struct cred *pcred = __task_cred(target);
1475 return uid_eq(cred->euid, pcred->suid) ||
1476 uid_eq(cred->euid, pcred->uid) ||
1477 uid_eq(cred->uid, pcred->suid) ||
1478 uid_eq(cred->uid, pcred->uid);
1482 * The usb asyncio usage of siginfo is wrong. The glibc support
1483 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484 * AKA after the generic fields:
1485 * kernel_pid_t si_pid;
1486 * kernel_uid32_t si_uid;
1487 * sigval_t si_value;
1489 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490 * after the generic fields is:
1491 * void __user *si_addr;
1493 * This is a practical problem when there is a 64bit big endian kernel
1494 * and a 32bit userspace. As the 32bit address will encoded in the low
1495 * 32bits of the pointer. Those low 32bits will be stored at higher
1496 * address than appear in a 32 bit pointer. So userspace will not
1497 * see the address it was expecting for it's completions.
1499 * There is nothing in the encoding that can allow
1500 * copy_siginfo_to_user32 to detect this confusion of formats, so
1501 * handle this by requiring the caller of kill_pid_usb_asyncio to
1502 * notice when this situration takes place and to store the 32bit
1503 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1506 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 struct pid *pid, const struct cred *cred)
1509 struct kernel_siginfo info;
1510 struct task_struct *p;
1511 unsigned long flags;
1514 if (!valid_signal(sig))
1517 clear_siginfo(&info);
1518 info.si_signo = sig;
1519 info.si_errno = errno;
1520 info.si_code = SI_ASYNCIO;
1521 *((sigval_t *)&info.si_pid) = addr;
1524 p = pid_task(pid, PIDTYPE_PID);
1529 if (!kill_as_cred_perm(cred, p)) {
1533 ret = security_task_kill(p, &info, sig, cred);
1538 if (lock_task_sighand(p, &flags)) {
1539 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1540 unlock_task_sighand(p, &flags);
1548 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1551 * kill_something_info() interprets pid in interesting ways just like kill(2).
1553 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554 * is probably wrong. Should make it like BSD or SYSV.
1557 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1562 return kill_proc_info(sig, info, pid);
1564 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1568 read_lock(&tasklist_lock);
1570 ret = __kill_pgrp_info(sig, info,
1571 pid ? find_vpid(-pid) : task_pgrp(current));
1573 int retval = 0, count = 0;
1574 struct task_struct * p;
1576 for_each_process(p) {
1577 if (task_pid_vnr(p) > 1 &&
1578 !same_thread_group(p, current)) {
1579 int err = group_send_sig_info(sig, info, p,
1586 ret = count ? retval : -ESRCH;
1588 read_unlock(&tasklist_lock);
1594 * These are for backward compatibility with the rest of the kernel source.
1597 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1600 * Make sure legacy kernel users don't send in bad values
1601 * (normal paths check this in check_kill_permission).
1603 if (!valid_signal(sig))
1606 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1608 EXPORT_SYMBOL(send_sig_info);
1610 #define __si_special(priv) \
1611 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1614 send_sig(int sig, struct task_struct *p, int priv)
1616 return send_sig_info(sig, __si_special(priv), p);
1618 EXPORT_SYMBOL(send_sig);
1620 void force_sig(int sig)
1622 struct kernel_siginfo info;
1624 clear_siginfo(&info);
1625 info.si_signo = sig;
1627 info.si_code = SI_KERNEL;
1630 force_sig_info(&info);
1632 EXPORT_SYMBOL(force_sig);
1635 * When things go south during signal handling, we
1636 * will force a SIGSEGV. And if the signal that caused
1637 * the problem was already a SIGSEGV, we'll want to
1638 * make sure we don't even try to deliver the signal..
1640 void force_sigsegv(int sig)
1642 struct task_struct *p = current;
1644 if (sig == SIGSEGV) {
1645 unsigned long flags;
1646 spin_lock_irqsave(&p->sighand->siglock, flags);
1647 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1653 int force_sig_fault_to_task(int sig, int code, void __user *addr
1654 ___ARCH_SI_TRAPNO(int trapno)
1655 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 , struct task_struct *t)
1658 struct kernel_siginfo info;
1660 clear_siginfo(&info);
1661 info.si_signo = sig;
1663 info.si_code = code;
1664 info.si_addr = addr;
1665 #ifdef __ARCH_SI_TRAPNO
1666 info.si_trapno = trapno;
1670 info.si_flags = flags;
1673 return force_sig_info_to_task(&info, t);
1676 int force_sig_fault(int sig, int code, void __user *addr
1677 ___ARCH_SI_TRAPNO(int trapno)
1678 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1680 return force_sig_fault_to_task(sig, code, addr
1681 ___ARCH_SI_TRAPNO(trapno)
1682 ___ARCH_SI_IA64(imm, flags, isr), current);
1685 int send_sig_fault(int sig, int code, void __user *addr
1686 ___ARCH_SI_TRAPNO(int trapno)
1687 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 , struct task_struct *t)
1690 struct kernel_siginfo info;
1692 clear_siginfo(&info);
1693 info.si_signo = sig;
1695 info.si_code = code;
1696 info.si_addr = addr;
1697 #ifdef __ARCH_SI_TRAPNO
1698 info.si_trapno = trapno;
1702 info.si_flags = flags;
1705 return send_sig_info(info.si_signo, &info, t);
1708 int force_sig_mceerr(int code, void __user *addr, short lsb)
1710 struct kernel_siginfo info;
1712 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 clear_siginfo(&info);
1714 info.si_signo = SIGBUS;
1716 info.si_code = code;
1717 info.si_addr = addr;
1718 info.si_addr_lsb = lsb;
1719 return force_sig_info(&info);
1722 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1724 struct kernel_siginfo info;
1726 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 clear_siginfo(&info);
1728 info.si_signo = SIGBUS;
1730 info.si_code = code;
1731 info.si_addr = addr;
1732 info.si_addr_lsb = lsb;
1733 return send_sig_info(info.si_signo, &info, t);
1735 EXPORT_SYMBOL(send_sig_mceerr);
1737 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1739 struct kernel_siginfo info;
1741 clear_siginfo(&info);
1742 info.si_signo = SIGSEGV;
1744 info.si_code = SEGV_BNDERR;
1745 info.si_addr = addr;
1746 info.si_lower = lower;
1747 info.si_upper = upper;
1748 return force_sig_info(&info);
1752 int force_sig_pkuerr(void __user *addr, u32 pkey)
1754 struct kernel_siginfo info;
1756 clear_siginfo(&info);
1757 info.si_signo = SIGSEGV;
1759 info.si_code = SEGV_PKUERR;
1760 info.si_addr = addr;
1761 info.si_pkey = pkey;
1762 return force_sig_info(&info);
1766 /* For the crazy architectures that include trap information in
1767 * the errno field, instead of an actual errno value.
1769 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1771 struct kernel_siginfo info;
1773 clear_siginfo(&info);
1774 info.si_signo = SIGTRAP;
1775 info.si_errno = errno;
1776 info.si_code = TRAP_HWBKPT;
1777 info.si_addr = addr;
1778 return force_sig_info(&info);
1781 int kill_pgrp(struct pid *pid, int sig, int priv)
1785 read_lock(&tasklist_lock);
1786 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1787 read_unlock(&tasklist_lock);
1791 EXPORT_SYMBOL(kill_pgrp);
1793 int kill_pid(struct pid *pid, int sig, int priv)
1795 return kill_pid_info(sig, __si_special(priv), pid);
1797 EXPORT_SYMBOL(kill_pid);
1800 * These functions support sending signals using preallocated sigqueue
1801 * structures. This is needed "because realtime applications cannot
1802 * afford to lose notifications of asynchronous events, like timer
1803 * expirations or I/O completions". In the case of POSIX Timers
1804 * we allocate the sigqueue structure from the timer_create. If this
1805 * allocation fails we are able to report the failure to the application
1806 * with an EAGAIN error.
1808 struct sigqueue *sigqueue_alloc(void)
1810 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1813 q->flags |= SIGQUEUE_PREALLOC;
1818 void sigqueue_free(struct sigqueue *q)
1820 unsigned long flags;
1821 spinlock_t *lock = ¤t->sighand->siglock;
1823 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1825 * We must hold ->siglock while testing q->list
1826 * to serialize with collect_signal() or with
1827 * __exit_signal()->flush_sigqueue().
1829 spin_lock_irqsave(lock, flags);
1830 q->flags &= ~SIGQUEUE_PREALLOC;
1832 * If it is queued it will be freed when dequeued,
1833 * like the "regular" sigqueue.
1835 if (!list_empty(&q->list))
1837 spin_unlock_irqrestore(lock, flags);
1843 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1845 int sig = q->info.si_signo;
1846 struct sigpending *pending;
1847 struct task_struct *t;
1848 unsigned long flags;
1851 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1855 t = pid_task(pid, type);
1856 if (!t || !likely(lock_task_sighand(t, &flags)))
1859 ret = 1; /* the signal is ignored */
1860 result = TRACE_SIGNAL_IGNORED;
1861 if (!prepare_signal(sig, t, false))
1865 if (unlikely(!list_empty(&q->list))) {
1867 * If an SI_TIMER entry is already queue just increment
1868 * the overrun count.
1870 BUG_ON(q->info.si_code != SI_TIMER);
1871 q->info.si_overrun++;
1872 result = TRACE_SIGNAL_ALREADY_PENDING;
1875 q->info.si_overrun = 0;
1877 signalfd_notify(t, sig);
1878 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1879 list_add_tail(&q->list, &pending->list);
1880 sigaddset(&pending->signal, sig);
1881 complete_signal(sig, t, type);
1882 result = TRACE_SIGNAL_DELIVERED;
1884 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1885 unlock_task_sighand(t, &flags);
1891 static void do_notify_pidfd(struct task_struct *task)
1895 WARN_ON(task->exit_state == 0);
1896 pid = task_pid(task);
1897 wake_up_all(&pid->wait_pidfd);
1901 * Let a parent know about the death of a child.
1902 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1904 * Returns true if our parent ignored us and so we've switched to
1907 bool do_notify_parent(struct task_struct *tsk, int sig)
1909 struct kernel_siginfo info;
1910 unsigned long flags;
1911 struct sighand_struct *psig;
1912 bool autoreap = false;
1917 /* do_notify_parent_cldstop should have been called instead. */
1918 BUG_ON(task_is_stopped_or_traced(tsk));
1920 BUG_ON(!tsk->ptrace &&
1921 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1923 /* Wake up all pidfd waiters */
1924 do_notify_pidfd(tsk);
1926 if (sig != SIGCHLD) {
1928 * This is only possible if parent == real_parent.
1929 * Check if it has changed security domain.
1931 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1935 clear_siginfo(&info);
1936 info.si_signo = sig;
1939 * We are under tasklist_lock here so our parent is tied to
1940 * us and cannot change.
1942 * task_active_pid_ns will always return the same pid namespace
1943 * until a task passes through release_task.
1945 * write_lock() currently calls preempt_disable() which is the
1946 * same as rcu_read_lock(), but according to Oleg, this is not
1947 * correct to rely on this
1950 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1951 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1955 task_cputime(tsk, &utime, &stime);
1956 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1957 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1959 info.si_status = tsk->exit_code & 0x7f;
1960 if (tsk->exit_code & 0x80)
1961 info.si_code = CLD_DUMPED;
1962 else if (tsk->exit_code & 0x7f)
1963 info.si_code = CLD_KILLED;
1965 info.si_code = CLD_EXITED;
1966 info.si_status = tsk->exit_code >> 8;
1969 psig = tsk->parent->sighand;
1970 spin_lock_irqsave(&psig->siglock, flags);
1971 if (!tsk->ptrace && sig == SIGCHLD &&
1972 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1973 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1975 * We are exiting and our parent doesn't care. POSIX.1
1976 * defines special semantics for setting SIGCHLD to SIG_IGN
1977 * or setting the SA_NOCLDWAIT flag: we should be reaped
1978 * automatically and not left for our parent's wait4 call.
1979 * Rather than having the parent do it as a magic kind of
1980 * signal handler, we just set this to tell do_exit that we
1981 * can be cleaned up without becoming a zombie. Note that
1982 * we still call __wake_up_parent in this case, because a
1983 * blocked sys_wait4 might now return -ECHILD.
1985 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1986 * is implementation-defined: we do (if you don't want
1987 * it, just use SIG_IGN instead).
1990 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1994 * Send with __send_signal as si_pid and si_uid are in the
1995 * parent's namespaces.
1997 if (valid_signal(sig) && sig)
1998 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1999 __wake_up_parent(tsk, tsk->parent);
2000 spin_unlock_irqrestore(&psig->siglock, flags);
2006 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2007 * @tsk: task reporting the state change
2008 * @for_ptracer: the notification is for ptracer
2009 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2011 * Notify @tsk's parent that the stopped/continued state has changed. If
2012 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2013 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2016 * Must be called with tasklist_lock at least read locked.
2018 static void do_notify_parent_cldstop(struct task_struct *tsk,
2019 bool for_ptracer, int why)
2021 struct kernel_siginfo info;
2022 unsigned long flags;
2023 struct task_struct *parent;
2024 struct sighand_struct *sighand;
2028 parent = tsk->parent;
2030 tsk = tsk->group_leader;
2031 parent = tsk->real_parent;
2034 clear_siginfo(&info);
2035 info.si_signo = SIGCHLD;
2038 * see comment in do_notify_parent() about the following 4 lines
2041 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2042 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2045 task_cputime(tsk, &utime, &stime);
2046 info.si_utime = nsec_to_clock_t(utime);
2047 info.si_stime = nsec_to_clock_t(stime);
2052 info.si_status = SIGCONT;
2055 info.si_status = tsk->signal->group_exit_code & 0x7f;
2058 info.si_status = tsk->exit_code & 0x7f;
2064 sighand = parent->sighand;
2065 spin_lock_irqsave(&sighand->siglock, flags);
2066 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2067 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2068 __group_send_sig_info(SIGCHLD, &info, parent);
2070 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2072 __wake_up_parent(tsk, parent);
2073 spin_unlock_irqrestore(&sighand->siglock, flags);
2076 static inline bool may_ptrace_stop(void)
2078 if (!likely(current->ptrace))
2081 * Are we in the middle of do_coredump?
2082 * If so and our tracer is also part of the coredump stopping
2083 * is a deadlock situation, and pointless because our tracer
2084 * is dead so don't allow us to stop.
2085 * If SIGKILL was already sent before the caller unlocked
2086 * ->siglock we must see ->core_state != NULL. Otherwise it
2087 * is safe to enter schedule().
2089 * This is almost outdated, a task with the pending SIGKILL can't
2090 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2091 * after SIGKILL was already dequeued.
2093 if (unlikely(current->mm->core_state) &&
2094 unlikely(current->mm == current->parent->mm))
2101 * Return non-zero if there is a SIGKILL that should be waking us up.
2102 * Called with the siglock held.
2104 static bool sigkill_pending(struct task_struct *tsk)
2106 return sigismember(&tsk->pending.signal, SIGKILL) ||
2107 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2111 * This must be called with current->sighand->siglock held.
2113 * This should be the path for all ptrace stops.
2114 * We always set current->last_siginfo while stopped here.
2115 * That makes it a way to test a stopped process for
2116 * being ptrace-stopped vs being job-control-stopped.
2118 * If we actually decide not to stop at all because the tracer
2119 * is gone, we keep current->exit_code unless clear_code.
2121 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2122 __releases(¤t->sighand->siglock)
2123 __acquires(¤t->sighand->siglock)
2125 bool gstop_done = false;
2127 if (arch_ptrace_stop_needed(exit_code, info)) {
2129 * The arch code has something special to do before a
2130 * ptrace stop. This is allowed to block, e.g. for faults
2131 * on user stack pages. We can't keep the siglock while
2132 * calling arch_ptrace_stop, so we must release it now.
2133 * To preserve proper semantics, we must do this before
2134 * any signal bookkeeping like checking group_stop_count.
2135 * Meanwhile, a SIGKILL could come in before we retake the
2136 * siglock. That must prevent us from sleeping in TASK_TRACED.
2137 * So after regaining the lock, we must check for SIGKILL.
2139 spin_unlock_irq(¤t->sighand->siglock);
2140 arch_ptrace_stop(exit_code, info);
2141 spin_lock_irq(¤t->sighand->siglock);
2142 if (sigkill_pending(current))
2146 set_special_state(TASK_TRACED);
2149 * We're committing to trapping. TRACED should be visible before
2150 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2151 * Also, transition to TRACED and updates to ->jobctl should be
2152 * atomic with respect to siglock and should be done after the arch
2153 * hook as siglock is released and regrabbed across it.
2158 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2160 * set_current_state() smp_wmb();
2162 * wait_task_stopped()
2163 * task_stopped_code()
2164 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2168 current->last_siginfo = info;
2169 current->exit_code = exit_code;
2172 * If @why is CLD_STOPPED, we're trapping to participate in a group
2173 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2174 * across siglock relocks since INTERRUPT was scheduled, PENDING
2175 * could be clear now. We act as if SIGCONT is received after
2176 * TASK_TRACED is entered - ignore it.
2178 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2179 gstop_done = task_participate_group_stop(current);
2181 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2182 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2183 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2184 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2186 /* entering a trap, clear TRAPPING */
2187 task_clear_jobctl_trapping(current);
2189 spin_unlock_irq(¤t->sighand->siglock);
2190 read_lock(&tasklist_lock);
2191 if (may_ptrace_stop()) {
2193 * Notify parents of the stop.
2195 * While ptraced, there are two parents - the ptracer and
2196 * the real_parent of the group_leader. The ptracer should
2197 * know about every stop while the real parent is only
2198 * interested in the completion of group stop. The states
2199 * for the two don't interact with each other. Notify
2200 * separately unless they're gonna be duplicates.
2202 do_notify_parent_cldstop(current, true, why);
2203 if (gstop_done && ptrace_reparented(current))
2204 do_notify_parent_cldstop(current, false, why);
2207 * Don't want to allow preemption here, because
2208 * sys_ptrace() needs this task to be inactive.
2210 * XXX: implement read_unlock_no_resched().
2213 read_unlock(&tasklist_lock);
2214 cgroup_enter_frozen();
2215 preempt_enable_no_resched();
2216 freezable_schedule();
2217 cgroup_leave_frozen(true);
2220 * By the time we got the lock, our tracer went away.
2221 * Don't drop the lock yet, another tracer may come.
2223 * If @gstop_done, the ptracer went away between group stop
2224 * completion and here. During detach, it would have set
2225 * JOBCTL_STOP_PENDING on us and we'll re-enter
2226 * TASK_STOPPED in do_signal_stop() on return, so notifying
2227 * the real parent of the group stop completion is enough.
2230 do_notify_parent_cldstop(current, false, why);
2232 /* tasklist protects us from ptrace_freeze_traced() */
2233 __set_current_state(TASK_RUNNING);
2235 current->exit_code = 0;
2236 read_unlock(&tasklist_lock);
2240 * We are back. Now reacquire the siglock before touching
2241 * last_siginfo, so that we are sure to have synchronized with
2242 * any signal-sending on another CPU that wants to examine it.
2244 spin_lock_irq(¤t->sighand->siglock);
2245 current->last_siginfo = NULL;
2247 /* LISTENING can be set only during STOP traps, clear it */
2248 current->jobctl &= ~JOBCTL_LISTENING;
2251 * Queued signals ignored us while we were stopped for tracing.
2252 * So check for any that we should take before resuming user mode.
2253 * This sets TIF_SIGPENDING, but never clears it.
2255 recalc_sigpending_tsk(current);
2258 static void ptrace_do_notify(int signr, int exit_code, int why)
2260 kernel_siginfo_t info;
2262 clear_siginfo(&info);
2263 info.si_signo = signr;
2264 info.si_code = exit_code;
2265 info.si_pid = task_pid_vnr(current);
2266 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2268 /* Let the debugger run. */
2269 ptrace_stop(exit_code, why, 1, &info);
2272 void ptrace_notify(int exit_code)
2274 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2275 if (unlikely(current->task_works))
2278 spin_lock_irq(¤t->sighand->siglock);
2279 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2280 spin_unlock_irq(¤t->sighand->siglock);
2284 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2285 * @signr: signr causing group stop if initiating
2287 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2288 * and participate in it. If already set, participate in the existing
2289 * group stop. If participated in a group stop (and thus slept), %true is
2290 * returned with siglock released.
2292 * If ptraced, this function doesn't handle stop itself. Instead,
2293 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2294 * untouched. The caller must ensure that INTERRUPT trap handling takes
2295 * places afterwards.
2298 * Must be called with @current->sighand->siglock held, which is released
2302 * %false if group stop is already cancelled or ptrace trap is scheduled.
2303 * %true if participated in group stop.
2305 static bool do_signal_stop(int signr)
2306 __releases(¤t->sighand->siglock)
2308 struct signal_struct *sig = current->signal;
2310 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2311 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2312 struct task_struct *t;
2314 /* signr will be recorded in task->jobctl for retries */
2315 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2317 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2318 unlikely(signal_group_exit(sig)))
2321 * There is no group stop already in progress. We must
2324 * While ptraced, a task may be resumed while group stop is
2325 * still in effect and then receive a stop signal and
2326 * initiate another group stop. This deviates from the
2327 * usual behavior as two consecutive stop signals can't
2328 * cause two group stops when !ptraced. That is why we
2329 * also check !task_is_stopped(t) below.
2331 * The condition can be distinguished by testing whether
2332 * SIGNAL_STOP_STOPPED is already set. Don't generate
2333 * group_exit_code in such case.
2335 * This is not necessary for SIGNAL_STOP_CONTINUED because
2336 * an intervening stop signal is required to cause two
2337 * continued events regardless of ptrace.
2339 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2340 sig->group_exit_code = signr;
2342 sig->group_stop_count = 0;
2344 if (task_set_jobctl_pending(current, signr | gstop))
2345 sig->group_stop_count++;
2348 while_each_thread(current, t) {
2350 * Setting state to TASK_STOPPED for a group
2351 * stop is always done with the siglock held,
2352 * so this check has no races.
2354 if (!task_is_stopped(t) &&
2355 task_set_jobctl_pending(t, signr | gstop)) {
2356 sig->group_stop_count++;
2357 if (likely(!(t->ptrace & PT_SEIZED)))
2358 signal_wake_up(t, 0);
2360 ptrace_trap_notify(t);
2365 if (likely(!current->ptrace)) {
2369 * If there are no other threads in the group, or if there
2370 * is a group stop in progress and we are the last to stop,
2371 * report to the parent.
2373 if (task_participate_group_stop(current))
2374 notify = CLD_STOPPED;
2376 set_special_state(TASK_STOPPED);
2377 spin_unlock_irq(¤t->sighand->siglock);
2380 * Notify the parent of the group stop completion. Because
2381 * we're not holding either the siglock or tasklist_lock
2382 * here, ptracer may attach inbetween; however, this is for
2383 * group stop and should always be delivered to the real
2384 * parent of the group leader. The new ptracer will get
2385 * its notification when this task transitions into
2389 read_lock(&tasklist_lock);
2390 do_notify_parent_cldstop(current, false, notify);
2391 read_unlock(&tasklist_lock);
2394 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2395 cgroup_enter_frozen();
2396 freezable_schedule();
2400 * While ptraced, group stop is handled by STOP trap.
2401 * Schedule it and let the caller deal with it.
2403 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2409 * do_jobctl_trap - take care of ptrace jobctl traps
2411 * When PT_SEIZED, it's used for both group stop and explicit
2412 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2413 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2414 * the stop signal; otherwise, %SIGTRAP.
2416 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2417 * number as exit_code and no siginfo.
2420 * Must be called with @current->sighand->siglock held, which may be
2421 * released and re-acquired before returning with intervening sleep.
2423 static void do_jobctl_trap(void)
2425 struct signal_struct *signal = current->signal;
2426 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2428 if (current->ptrace & PT_SEIZED) {
2429 if (!signal->group_stop_count &&
2430 !(signal->flags & SIGNAL_STOP_STOPPED))
2432 WARN_ON_ONCE(!signr);
2433 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2436 WARN_ON_ONCE(!signr);
2437 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2438 current->exit_code = 0;
2443 * do_freezer_trap - handle the freezer jobctl trap
2445 * Puts the task into frozen state, if only the task is not about to quit.
2446 * In this case it drops JOBCTL_TRAP_FREEZE.
2449 * Must be called with @current->sighand->siglock held,
2450 * which is always released before returning.
2452 static void do_freezer_trap(void)
2453 __releases(¤t->sighand->siglock)
2456 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2457 * let's make another loop to give it a chance to be handled.
2458 * In any case, we'll return back.
2460 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2461 JOBCTL_TRAP_FREEZE) {
2462 spin_unlock_irq(¤t->sighand->siglock);
2467 * Now we're sure that there is no pending fatal signal and no
2468 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2469 * immediately (if there is a non-fatal signal pending), and
2470 * put the task into sleep.
2472 __set_current_state(TASK_INTERRUPTIBLE);
2473 clear_thread_flag(TIF_SIGPENDING);
2474 spin_unlock_irq(¤t->sighand->siglock);
2475 cgroup_enter_frozen();
2476 freezable_schedule();
2479 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2482 * We do not check sig_kernel_stop(signr) but set this marker
2483 * unconditionally because we do not know whether debugger will
2484 * change signr. This flag has no meaning unless we are going
2485 * to stop after return from ptrace_stop(). In this case it will
2486 * be checked in do_signal_stop(), we should only stop if it was
2487 * not cleared by SIGCONT while we were sleeping. See also the
2488 * comment in dequeue_signal().
2490 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2491 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2493 /* We're back. Did the debugger cancel the sig? */
2494 signr = current->exit_code;
2498 current->exit_code = 0;
2501 * Update the siginfo structure if the signal has
2502 * changed. If the debugger wanted something
2503 * specific in the siginfo structure then it should
2504 * have updated *info via PTRACE_SETSIGINFO.
2506 if (signr != info->si_signo) {
2507 clear_siginfo(info);
2508 info->si_signo = signr;
2510 info->si_code = SI_USER;
2512 info->si_pid = task_pid_vnr(current->parent);
2513 info->si_uid = from_kuid_munged(current_user_ns(),
2514 task_uid(current->parent));
2518 /* If the (new) signal is now blocked, requeue it. */
2519 if (sigismember(¤t->blocked, signr)) {
2520 send_signal(signr, info, current, PIDTYPE_PID);
2527 bool get_signal(struct ksignal *ksig)
2529 struct sighand_struct *sighand = current->sighand;
2530 struct signal_struct *signal = current->signal;
2534 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2535 * that the arch handlers don't all have to do it. If we get here
2536 * without TIF_SIGPENDING, just exit after running signal work.
2538 #ifdef TIF_NOTIFY_SIGNAL
2539 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2540 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2541 tracehook_notify_signal();
2542 if (!task_sigpending(current))
2547 if (unlikely(uprobe_deny_signal()))
2551 * Do this once, we can't return to user-mode if freezing() == T.
2552 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2553 * thus do not need another check after return.
2558 spin_lock_irq(&sighand->siglock);
2560 * Make sure we can safely read ->jobctl() in task_work add. As Oleg
2563 * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
2566 * task_work_add: get_signal:
2567 * STORE(task->task_works, new_work); STORE(task->jobctl);
2569 * LOAD(task->jobctl); LOAD(task->task_works);
2571 * and we can rely on STORE-MB-LOAD [ in task_work_add].
2573 smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
2574 if (unlikely(current->task_works)) {
2575 spin_unlock_irq(&sighand->siglock);
2581 * Every stopped thread goes here after wakeup. Check to see if
2582 * we should notify the parent, prepare_signal(SIGCONT) encodes
2583 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2585 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2588 if (signal->flags & SIGNAL_CLD_CONTINUED)
2589 why = CLD_CONTINUED;
2593 signal->flags &= ~SIGNAL_CLD_MASK;
2595 spin_unlock_irq(&sighand->siglock);
2598 * Notify the parent that we're continuing. This event is
2599 * always per-process and doesn't make whole lot of sense
2600 * for ptracers, who shouldn't consume the state via
2601 * wait(2) either, but, for backward compatibility, notify
2602 * the ptracer of the group leader too unless it's gonna be
2605 read_lock(&tasklist_lock);
2606 do_notify_parent_cldstop(current, false, why);
2608 if (ptrace_reparented(current->group_leader))
2609 do_notify_parent_cldstop(current->group_leader,
2611 read_unlock(&tasklist_lock);
2616 /* Has this task already been marked for death? */
2617 if (signal_group_exit(signal)) {
2618 ksig->info.si_signo = signr = SIGKILL;
2619 sigdelset(¤t->pending.signal, SIGKILL);
2620 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2621 &sighand->action[SIGKILL - 1]);
2622 recalc_sigpending();
2627 struct k_sigaction *ka;
2629 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2633 if (unlikely(current->jobctl &
2634 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2635 if (current->jobctl & JOBCTL_TRAP_MASK) {
2637 spin_unlock_irq(&sighand->siglock);
2638 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2645 * If the task is leaving the frozen state, let's update
2646 * cgroup counters and reset the frozen bit.
2648 if (unlikely(cgroup_task_frozen(current))) {
2649 spin_unlock_irq(&sighand->siglock);
2650 cgroup_leave_frozen(false);
2655 * Signals generated by the execution of an instruction
2656 * need to be delivered before any other pending signals
2657 * so that the instruction pointer in the signal stack
2658 * frame points to the faulting instruction.
2660 signr = dequeue_synchronous_signal(&ksig->info);
2662 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2665 break; /* will return 0 */
2667 if (unlikely(current->ptrace) && signr != SIGKILL) {
2668 signr = ptrace_signal(signr, &ksig->info);
2673 ka = &sighand->action[signr-1];
2675 /* Trace actually delivered signals. */
2676 trace_signal_deliver(signr, &ksig->info, ka);
2678 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2680 if (ka->sa.sa_handler != SIG_DFL) {
2681 /* Run the handler. */
2684 if (ka->sa.sa_flags & SA_ONESHOT)
2685 ka->sa.sa_handler = SIG_DFL;
2687 break; /* will return non-zero "signr" value */
2691 * Now we are doing the default action for this signal.
2693 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2697 * Global init gets no signals it doesn't want.
2698 * Container-init gets no signals it doesn't want from same
2701 * Note that if global/container-init sees a sig_kernel_only()
2702 * signal here, the signal must have been generated internally
2703 * or must have come from an ancestor namespace. In either
2704 * case, the signal cannot be dropped.
2706 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2707 !sig_kernel_only(signr))
2710 if (sig_kernel_stop(signr)) {
2712 * The default action is to stop all threads in
2713 * the thread group. The job control signals
2714 * do nothing in an orphaned pgrp, but SIGSTOP
2715 * always works. Note that siglock needs to be
2716 * dropped during the call to is_orphaned_pgrp()
2717 * because of lock ordering with tasklist_lock.
2718 * This allows an intervening SIGCONT to be posted.
2719 * We need to check for that and bail out if necessary.
2721 if (signr != SIGSTOP) {
2722 spin_unlock_irq(&sighand->siglock);
2724 /* signals can be posted during this window */
2726 if (is_current_pgrp_orphaned())
2729 spin_lock_irq(&sighand->siglock);
2732 if (likely(do_signal_stop(ksig->info.si_signo))) {
2733 /* It released the siglock. */
2738 * We didn't actually stop, due to a race
2739 * with SIGCONT or something like that.
2745 spin_unlock_irq(&sighand->siglock);
2746 if (unlikely(cgroup_task_frozen(current)))
2747 cgroup_leave_frozen(true);
2750 * Anything else is fatal, maybe with a core dump.
2752 current->flags |= PF_SIGNALED;
2754 if (sig_kernel_coredump(signr)) {
2755 if (print_fatal_signals)
2756 print_fatal_signal(ksig->info.si_signo);
2757 proc_coredump_connector(current);
2759 * If it was able to dump core, this kills all
2760 * other threads in the group and synchronizes with
2761 * their demise. If we lost the race with another
2762 * thread getting here, it set group_exit_code
2763 * first and our do_group_exit call below will use
2764 * that value and ignore the one we pass it.
2766 do_coredump(&ksig->info);
2770 * Death signals, no core dump.
2772 do_group_exit(ksig->info.si_signo);
2775 spin_unlock_irq(&sighand->siglock);
2778 return ksig->sig > 0;
2782 * signal_delivered -
2783 * @ksig: kernel signal struct
2784 * @stepping: nonzero if debugger single-step or block-step in use
2786 * This function should be called when a signal has successfully been
2787 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2788 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2789 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2791 static void signal_delivered(struct ksignal *ksig, int stepping)
2795 /* A signal was successfully delivered, and the
2796 saved sigmask was stored on the signal frame,
2797 and will be restored by sigreturn. So we can
2798 simply clear the restore sigmask flag. */
2799 clear_restore_sigmask();
2801 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2802 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2803 sigaddset(&blocked, ksig->sig);
2804 set_current_blocked(&blocked);
2805 tracehook_signal_handler(stepping);
2808 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2811 force_sigsegv(ksig->sig);
2813 signal_delivered(ksig, stepping);
2817 * It could be that complete_signal() picked us to notify about the
2818 * group-wide signal. Other threads should be notified now to take
2819 * the shared signals in @which since we will not.
2821 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2824 struct task_struct *t;
2826 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2827 if (sigisemptyset(&retarget))
2831 while_each_thread(tsk, t) {
2832 if (t->flags & PF_EXITING)
2835 if (!has_pending_signals(&retarget, &t->blocked))
2837 /* Remove the signals this thread can handle. */
2838 sigandsets(&retarget, &retarget, &t->blocked);
2840 if (!task_sigpending(t))
2841 signal_wake_up(t, 0);
2843 if (sigisemptyset(&retarget))
2848 void exit_signals(struct task_struct *tsk)
2854 * @tsk is about to have PF_EXITING set - lock out users which
2855 * expect stable threadgroup.
2857 cgroup_threadgroup_change_begin(tsk);
2859 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2860 tsk->flags |= PF_EXITING;
2861 cgroup_threadgroup_change_end(tsk);
2865 spin_lock_irq(&tsk->sighand->siglock);
2867 * From now this task is not visible for group-wide signals,
2868 * see wants_signal(), do_signal_stop().
2870 tsk->flags |= PF_EXITING;
2872 cgroup_threadgroup_change_end(tsk);
2874 if (!task_sigpending(tsk))
2877 unblocked = tsk->blocked;
2878 signotset(&unblocked);
2879 retarget_shared_pending(tsk, &unblocked);
2881 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2882 task_participate_group_stop(tsk))
2883 group_stop = CLD_STOPPED;
2885 spin_unlock_irq(&tsk->sighand->siglock);
2888 * If group stop has completed, deliver the notification. This
2889 * should always go to the real parent of the group leader.
2891 if (unlikely(group_stop)) {
2892 read_lock(&tasklist_lock);
2893 do_notify_parent_cldstop(tsk, false, group_stop);
2894 read_unlock(&tasklist_lock);
2899 * System call entry points.
2903 * sys_restart_syscall - restart a system call
2905 SYSCALL_DEFINE0(restart_syscall)
2907 struct restart_block *restart = ¤t->restart_block;
2908 return restart->fn(restart);
2911 long do_no_restart_syscall(struct restart_block *param)
2916 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2918 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2919 sigset_t newblocked;
2920 /* A set of now blocked but previously unblocked signals. */
2921 sigandnsets(&newblocked, newset, ¤t->blocked);
2922 retarget_shared_pending(tsk, &newblocked);
2924 tsk->blocked = *newset;
2925 recalc_sigpending();
2929 * set_current_blocked - change current->blocked mask
2932 * It is wrong to change ->blocked directly, this helper should be used
2933 * to ensure the process can't miss a shared signal we are going to block.
2935 void set_current_blocked(sigset_t *newset)
2937 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2938 __set_current_blocked(newset);
2941 void __set_current_blocked(const sigset_t *newset)
2943 struct task_struct *tsk = current;
2946 * In case the signal mask hasn't changed, there is nothing we need
2947 * to do. The current->blocked shouldn't be modified by other task.
2949 if (sigequalsets(&tsk->blocked, newset))
2952 spin_lock_irq(&tsk->sighand->siglock);
2953 __set_task_blocked(tsk, newset);
2954 spin_unlock_irq(&tsk->sighand->siglock);
2958 * This is also useful for kernel threads that want to temporarily
2959 * (or permanently) block certain signals.
2961 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2962 * interface happily blocks "unblockable" signals like SIGKILL
2965 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2967 struct task_struct *tsk = current;
2970 /* Lockless, only current can change ->blocked, never from irq */
2972 *oldset = tsk->blocked;
2976 sigorsets(&newset, &tsk->blocked, set);
2979 sigandnsets(&newset, &tsk->blocked, set);
2988 __set_current_blocked(&newset);
2991 EXPORT_SYMBOL(sigprocmask);
2994 * The api helps set app-provided sigmasks.
2996 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2997 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2999 * Note that it does set_restore_sigmask() in advance, so it must be always
3000 * paired with restore_saved_sigmask_unless() before return from syscall.
3002 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3008 if (sigsetsize != sizeof(sigset_t))
3010 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3013 set_restore_sigmask();
3014 current->saved_sigmask = current->blocked;
3015 set_current_blocked(&kmask);
3020 #ifdef CONFIG_COMPAT
3021 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3028 if (sigsetsize != sizeof(compat_sigset_t))
3030 if (get_compat_sigset(&kmask, umask))
3033 set_restore_sigmask();
3034 current->saved_sigmask = current->blocked;
3035 set_current_blocked(&kmask);
3042 * sys_rt_sigprocmask - change the list of currently blocked signals
3043 * @how: whether to add, remove, or set signals
3044 * @nset: stores pending signals
3045 * @oset: previous value of signal mask if non-null
3046 * @sigsetsize: size of sigset_t type
3048 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3049 sigset_t __user *, oset, size_t, sigsetsize)
3051 sigset_t old_set, new_set;
3054 /* XXX: Don't preclude handling different sized sigset_t's. */
3055 if (sigsetsize != sizeof(sigset_t))
3058 old_set = current->blocked;
3061 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3063 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3065 error = sigprocmask(how, &new_set, NULL);
3071 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3078 #ifdef CONFIG_COMPAT
3079 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3080 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3082 sigset_t old_set = current->blocked;
3084 /* XXX: Don't preclude handling different sized sigset_t's. */
3085 if (sigsetsize != sizeof(sigset_t))
3091 if (get_compat_sigset(&new_set, nset))
3093 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3095 error = sigprocmask(how, &new_set, NULL);
3099 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3103 static void do_sigpending(sigset_t *set)
3105 spin_lock_irq(¤t->sighand->siglock);
3106 sigorsets(set, ¤t->pending.signal,
3107 ¤t->signal->shared_pending.signal);
3108 spin_unlock_irq(¤t->sighand->siglock);
3110 /* Outside the lock because only this thread touches it. */
3111 sigandsets(set, ¤t->blocked, set);
3115 * sys_rt_sigpending - examine a pending signal that has been raised
3117 * @uset: stores pending signals
3118 * @sigsetsize: size of sigset_t type or larger
3120 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3124 if (sigsetsize > sizeof(*uset))
3127 do_sigpending(&set);
3129 if (copy_to_user(uset, &set, sigsetsize))
3135 #ifdef CONFIG_COMPAT
3136 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3137 compat_size_t, sigsetsize)
3141 if (sigsetsize > sizeof(*uset))
3144 do_sigpending(&set);
3146 return put_compat_sigset(uset, &set, sigsetsize);
3150 static const struct {
3151 unsigned char limit, layout;
3153 [SIGILL] = { NSIGILL, SIL_FAULT },
3154 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3155 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3156 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3157 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3159 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3161 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3162 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3163 [SIGSYS] = { NSIGSYS, SIL_SYS },
3166 static bool known_siginfo_layout(unsigned sig, int si_code)
3168 if (si_code == SI_KERNEL)
3170 else if ((si_code > SI_USER)) {
3171 if (sig_specific_sicodes(sig)) {
3172 if (si_code <= sig_sicodes[sig].limit)
3175 else if (si_code <= NSIGPOLL)
3178 else if (si_code >= SI_DETHREAD)
3180 else if (si_code == SI_ASYNCNL)
3185 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3187 enum siginfo_layout layout = SIL_KILL;
3188 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3189 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3190 (si_code <= sig_sicodes[sig].limit)) {
3191 layout = sig_sicodes[sig].layout;
3192 /* Handle the exceptions */
3193 if ((sig == SIGBUS) &&
3194 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3195 layout = SIL_FAULT_MCEERR;
3196 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3197 layout = SIL_FAULT_BNDERR;
3199 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3200 layout = SIL_FAULT_PKUERR;
3203 else if (si_code <= NSIGPOLL)
3206 if (si_code == SI_TIMER)
3208 else if (si_code == SI_SIGIO)
3210 else if (si_code < 0)
3216 static inline char __user *si_expansion(const siginfo_t __user *info)
3218 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3221 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3223 char __user *expansion = si_expansion(to);
3224 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3226 if (clear_user(expansion, SI_EXPANSION_SIZE))
3231 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3232 const siginfo_t __user *from)
3234 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3235 char __user *expansion = si_expansion(from);
3236 char buf[SI_EXPANSION_SIZE];
3239 * An unknown si_code might need more than
3240 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3241 * extra bytes are 0. This guarantees copy_siginfo_to_user
3242 * will return this data to userspace exactly.
3244 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3246 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3254 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3255 const siginfo_t __user *from)
3257 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3259 to->si_signo = signo;
3260 return post_copy_siginfo_from_user(to, from);
3263 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3265 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3267 return post_copy_siginfo_from_user(to, from);
3270 #ifdef CONFIG_COMPAT
3272 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3273 * @to: compat siginfo destination
3274 * @from: kernel siginfo source
3276 * Note: This function does not work properly for the SIGCHLD on x32, but
3277 * fortunately it doesn't have to. The only valid callers for this function are
3278 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3279 * The latter does not care because SIGCHLD will never cause a coredump.
3281 void copy_siginfo_to_external32(struct compat_siginfo *to,
3282 const struct kernel_siginfo *from)
3284 memset(to, 0, sizeof(*to));
3286 to->si_signo = from->si_signo;
3287 to->si_errno = from->si_errno;
3288 to->si_code = from->si_code;
3289 switch(siginfo_layout(from->si_signo, from->si_code)) {
3291 to->si_pid = from->si_pid;
3292 to->si_uid = from->si_uid;
3295 to->si_tid = from->si_tid;
3296 to->si_overrun = from->si_overrun;
3297 to->si_int = from->si_int;
3300 to->si_band = from->si_band;
3301 to->si_fd = from->si_fd;
3304 to->si_addr = ptr_to_compat(from->si_addr);
3305 #ifdef __ARCH_SI_TRAPNO
3306 to->si_trapno = from->si_trapno;
3309 case SIL_FAULT_MCEERR:
3310 to->si_addr = ptr_to_compat(from->si_addr);
3311 #ifdef __ARCH_SI_TRAPNO
3312 to->si_trapno = from->si_trapno;
3314 to->si_addr_lsb = from->si_addr_lsb;
3316 case SIL_FAULT_BNDERR:
3317 to->si_addr = ptr_to_compat(from->si_addr);
3318 #ifdef __ARCH_SI_TRAPNO
3319 to->si_trapno = from->si_trapno;
3321 to->si_lower = ptr_to_compat(from->si_lower);
3322 to->si_upper = ptr_to_compat(from->si_upper);
3324 case SIL_FAULT_PKUERR:
3325 to->si_addr = ptr_to_compat(from->si_addr);
3326 #ifdef __ARCH_SI_TRAPNO
3327 to->si_trapno = from->si_trapno;
3329 to->si_pkey = from->si_pkey;
3332 to->si_pid = from->si_pid;
3333 to->si_uid = from->si_uid;
3334 to->si_status = from->si_status;
3335 to->si_utime = from->si_utime;
3336 to->si_stime = from->si_stime;
3339 to->si_pid = from->si_pid;
3340 to->si_uid = from->si_uid;
3341 to->si_int = from->si_int;
3344 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3345 to->si_syscall = from->si_syscall;
3346 to->si_arch = from->si_arch;
3351 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3352 const struct kernel_siginfo *from)
3354 struct compat_siginfo new;
3356 copy_siginfo_to_external32(&new, from);
3357 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3362 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3363 const struct compat_siginfo *from)
3366 to->si_signo = from->si_signo;
3367 to->si_errno = from->si_errno;
3368 to->si_code = from->si_code;
3369 switch(siginfo_layout(from->si_signo, from->si_code)) {
3371 to->si_pid = from->si_pid;
3372 to->si_uid = from->si_uid;
3375 to->si_tid = from->si_tid;
3376 to->si_overrun = from->si_overrun;
3377 to->si_int = from->si_int;
3380 to->si_band = from->si_band;
3381 to->si_fd = from->si_fd;
3384 to->si_addr = compat_ptr(from->si_addr);
3385 #ifdef __ARCH_SI_TRAPNO
3386 to->si_trapno = from->si_trapno;
3389 case SIL_FAULT_MCEERR:
3390 to->si_addr = compat_ptr(from->si_addr);
3391 #ifdef __ARCH_SI_TRAPNO
3392 to->si_trapno = from->si_trapno;
3394 to->si_addr_lsb = from->si_addr_lsb;
3396 case SIL_FAULT_BNDERR:
3397 to->si_addr = compat_ptr(from->si_addr);
3398 #ifdef __ARCH_SI_TRAPNO
3399 to->si_trapno = from->si_trapno;
3401 to->si_lower = compat_ptr(from->si_lower);
3402 to->si_upper = compat_ptr(from->si_upper);
3404 case SIL_FAULT_PKUERR:
3405 to->si_addr = compat_ptr(from->si_addr);
3406 #ifdef __ARCH_SI_TRAPNO
3407 to->si_trapno = from->si_trapno;
3409 to->si_pkey = from->si_pkey;
3412 to->si_pid = from->si_pid;
3413 to->si_uid = from->si_uid;
3414 to->si_status = from->si_status;
3415 #ifdef CONFIG_X86_X32_ABI
3416 if (in_x32_syscall()) {
3417 to->si_utime = from->_sifields._sigchld_x32._utime;
3418 to->si_stime = from->_sifields._sigchld_x32._stime;
3422 to->si_utime = from->si_utime;
3423 to->si_stime = from->si_stime;
3427 to->si_pid = from->si_pid;
3428 to->si_uid = from->si_uid;
3429 to->si_int = from->si_int;
3432 to->si_call_addr = compat_ptr(from->si_call_addr);
3433 to->si_syscall = from->si_syscall;
3434 to->si_arch = from->si_arch;
3440 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3441 const struct compat_siginfo __user *ufrom)
3443 struct compat_siginfo from;
3445 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3448 from.si_signo = signo;
3449 return post_copy_siginfo_from_user32(to, &from);
3452 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3453 const struct compat_siginfo __user *ufrom)
3455 struct compat_siginfo from;
3457 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3460 return post_copy_siginfo_from_user32(to, &from);
3462 #endif /* CONFIG_COMPAT */
3465 * do_sigtimedwait - wait for queued signals specified in @which
3466 * @which: queued signals to wait for
3467 * @info: if non-null, the signal's siginfo is returned here
3468 * @ts: upper bound on process time suspension
3470 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3471 const struct timespec64 *ts)
3473 ktime_t *to = NULL, timeout = KTIME_MAX;
3474 struct task_struct *tsk = current;
3475 sigset_t mask = *which;
3479 if (!timespec64_valid(ts))
3481 timeout = timespec64_to_ktime(*ts);
3486 * Invert the set of allowed signals to get those we want to block.
3488 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3491 spin_lock_irq(&tsk->sighand->siglock);
3492 sig = dequeue_signal(tsk, &mask, info);
3493 if (!sig && timeout) {
3495 * None ready, temporarily unblock those we're interested
3496 * while we are sleeping in so that we'll be awakened when
3497 * they arrive. Unblocking is always fine, we can avoid
3498 * set_current_blocked().
3500 tsk->real_blocked = tsk->blocked;
3501 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3502 recalc_sigpending();
3503 spin_unlock_irq(&tsk->sighand->siglock);
3505 __set_current_state(TASK_INTERRUPTIBLE);
3506 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3508 spin_lock_irq(&tsk->sighand->siglock);
3509 __set_task_blocked(tsk, &tsk->real_blocked);
3510 sigemptyset(&tsk->real_blocked);
3511 sig = dequeue_signal(tsk, &mask, info);
3513 spin_unlock_irq(&tsk->sighand->siglock);
3517 return ret ? -EINTR : -EAGAIN;
3521 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3523 * @uthese: queued signals to wait for
3524 * @uinfo: if non-null, the signal's siginfo is returned here
3525 * @uts: upper bound on process time suspension
3526 * @sigsetsize: size of sigset_t type
3528 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3529 siginfo_t __user *, uinfo,
3530 const struct __kernel_timespec __user *, uts,
3534 struct timespec64 ts;
3535 kernel_siginfo_t info;
3538 /* XXX: Don't preclude handling different sized sigset_t's. */
3539 if (sigsetsize != sizeof(sigset_t))
3542 if (copy_from_user(&these, uthese, sizeof(these)))
3546 if (get_timespec64(&ts, uts))
3550 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3552 if (ret > 0 && uinfo) {
3553 if (copy_siginfo_to_user(uinfo, &info))
3560 #ifdef CONFIG_COMPAT_32BIT_TIME
3561 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3562 siginfo_t __user *, uinfo,
3563 const struct old_timespec32 __user *, uts,
3567 struct timespec64 ts;
3568 kernel_siginfo_t info;
3571 if (sigsetsize != sizeof(sigset_t))
3574 if (copy_from_user(&these, uthese, sizeof(these)))
3578 if (get_old_timespec32(&ts, uts))
3582 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3584 if (ret > 0 && uinfo) {
3585 if (copy_siginfo_to_user(uinfo, &info))
3593 #ifdef CONFIG_COMPAT
3594 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3595 struct compat_siginfo __user *, uinfo,
3596 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3599 struct timespec64 t;
3600 kernel_siginfo_t info;
3603 if (sigsetsize != sizeof(sigset_t))
3606 if (get_compat_sigset(&s, uthese))
3610 if (get_timespec64(&t, uts))
3614 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3616 if (ret > 0 && uinfo) {
3617 if (copy_siginfo_to_user32(uinfo, &info))
3624 #ifdef CONFIG_COMPAT_32BIT_TIME
3625 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3626 struct compat_siginfo __user *, uinfo,
3627 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3630 struct timespec64 t;
3631 kernel_siginfo_t info;
3634 if (sigsetsize != sizeof(sigset_t))
3637 if (get_compat_sigset(&s, uthese))
3641 if (get_old_timespec32(&t, uts))
3645 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3647 if (ret > 0 && uinfo) {
3648 if (copy_siginfo_to_user32(uinfo, &info))
3657 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3659 clear_siginfo(info);
3660 info->si_signo = sig;
3662 info->si_code = SI_USER;
3663 info->si_pid = task_tgid_vnr(current);
3664 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3668 * sys_kill - send a signal to a process
3669 * @pid: the PID of the process
3670 * @sig: signal to be sent
3672 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3674 struct kernel_siginfo info;
3676 prepare_kill_siginfo(sig, &info);
3678 return kill_something_info(sig, &info, pid);
3682 * Verify that the signaler and signalee either are in the same pid namespace
3683 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3686 static bool access_pidfd_pidns(struct pid *pid)
3688 struct pid_namespace *active = task_active_pid_ns(current);
3689 struct pid_namespace *p = ns_of_pid(pid);
3702 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3704 #ifdef CONFIG_COMPAT
3706 * Avoid hooking up compat syscalls and instead handle necessary
3707 * conversions here. Note, this is a stop-gap measure and should not be
3708 * considered a generic solution.
3710 if (in_compat_syscall())
3711 return copy_siginfo_from_user32(
3712 kinfo, (struct compat_siginfo __user *)info);
3714 return copy_siginfo_from_user(kinfo, info);
3717 static struct pid *pidfd_to_pid(const struct file *file)
3721 pid = pidfd_pid(file);
3725 return tgid_pidfd_to_pid(file);
3729 * sys_pidfd_send_signal - Signal a process through a pidfd
3730 * @pidfd: file descriptor of the process
3731 * @sig: signal to send
3732 * @info: signal info
3733 * @flags: future flags
3735 * The syscall currently only signals via PIDTYPE_PID which covers
3736 * kill(<positive-pid>, <signal>. It does not signal threads or process
3738 * In order to extend the syscall to threads and process groups the @flags
3739 * argument should be used. In essence, the @flags argument will determine
3740 * what is signaled and not the file descriptor itself. Put in other words,
3741 * grouping is a property of the flags argument not a property of the file
3744 * Return: 0 on success, negative errno on failure
3746 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3747 siginfo_t __user *, info, unsigned int, flags)
3752 kernel_siginfo_t kinfo;
3754 /* Enforce flags be set to 0 until we add an extension. */
3762 /* Is this a pidfd? */
3763 pid = pidfd_to_pid(f.file);
3770 if (!access_pidfd_pidns(pid))
3774 ret = copy_siginfo_from_user_any(&kinfo, info);
3779 if (unlikely(sig != kinfo.si_signo))
3782 /* Only allow sending arbitrary signals to yourself. */
3784 if ((task_pid(current) != pid) &&
3785 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3788 prepare_kill_siginfo(sig, &kinfo);
3791 ret = kill_pid_info(sig, &kinfo, pid);
3799 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3801 struct task_struct *p;
3805 p = find_task_by_vpid(pid);
3806 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3807 error = check_kill_permission(sig, info, p);
3809 * The null signal is a permissions and process existence
3810 * probe. No signal is actually delivered.
3812 if (!error && sig) {
3813 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3815 * If lock_task_sighand() failed we pretend the task
3816 * dies after receiving the signal. The window is tiny,
3817 * and the signal is private anyway.
3819 if (unlikely(error == -ESRCH))
3828 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3830 struct kernel_siginfo info;
3832 clear_siginfo(&info);
3833 info.si_signo = sig;
3835 info.si_code = SI_TKILL;
3836 info.si_pid = task_tgid_vnr(current);
3837 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3839 return do_send_specific(tgid, pid, sig, &info);
3843 * sys_tgkill - send signal to one specific thread
3844 * @tgid: the thread group ID of the thread
3845 * @pid: the PID of the thread
3846 * @sig: signal to be sent
3848 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3849 * exists but it's not belonging to the target process anymore. This
3850 * method solves the problem of threads exiting and PIDs getting reused.
3852 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3854 /* This is only valid for single tasks */
3855 if (pid <= 0 || tgid <= 0)
3858 return do_tkill(tgid, pid, sig);
3862 * sys_tkill - send signal to one specific task
3863 * @pid: the PID of the task
3864 * @sig: signal to be sent
3866 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3868 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3870 /* This is only valid for single tasks */
3874 return do_tkill(0, pid, sig);
3877 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3879 /* Not even root can pretend to send signals from the kernel.
3880 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3882 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3883 (task_pid_vnr(current) != pid))
3886 /* POSIX.1b doesn't mention process groups. */
3887 return kill_proc_info(sig, info, pid);
3891 * sys_rt_sigqueueinfo - send signal information to a signal
3892 * @pid: the PID of the thread
3893 * @sig: signal to be sent
3894 * @uinfo: signal info to be sent
3896 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3897 siginfo_t __user *, uinfo)
3899 kernel_siginfo_t info;
3900 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3903 return do_rt_sigqueueinfo(pid, sig, &info);
3906 #ifdef CONFIG_COMPAT
3907 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3910 struct compat_siginfo __user *, uinfo)
3912 kernel_siginfo_t info;
3913 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3916 return do_rt_sigqueueinfo(pid, sig, &info);
3920 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3922 /* This is only valid for single tasks */
3923 if (pid <= 0 || tgid <= 0)
3926 /* Not even root can pretend to send signals from the kernel.
3927 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3929 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3930 (task_pid_vnr(current) != pid))
3933 return do_send_specific(tgid, pid, sig, info);
3936 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3937 siginfo_t __user *, uinfo)
3939 kernel_siginfo_t info;
3940 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3943 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3946 #ifdef CONFIG_COMPAT
3947 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3951 struct compat_siginfo __user *, uinfo)
3953 kernel_siginfo_t info;
3954 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3957 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3962 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3964 void kernel_sigaction(int sig, __sighandler_t action)
3966 spin_lock_irq(¤t->sighand->siglock);
3967 current->sighand->action[sig - 1].sa.sa_handler = action;
3968 if (action == SIG_IGN) {
3972 sigaddset(&mask, sig);
3974 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3975 flush_sigqueue_mask(&mask, ¤t->pending);
3976 recalc_sigpending();
3978 spin_unlock_irq(¤t->sighand->siglock);
3980 EXPORT_SYMBOL(kernel_sigaction);
3982 void __weak sigaction_compat_abi(struct k_sigaction *act,
3983 struct k_sigaction *oact)
3987 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3989 struct task_struct *p = current, *t;
3990 struct k_sigaction *k;
3993 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3996 k = &p->sighand->action[sig-1];
3998 spin_lock_irq(&p->sighand->siglock);
4002 sigaction_compat_abi(act, oact);
4005 sigdelsetmask(&act->sa.sa_mask,
4006 sigmask(SIGKILL) | sigmask(SIGSTOP));
4010 * "Setting a signal action to SIG_IGN for a signal that is
4011 * pending shall cause the pending signal to be discarded,
4012 * whether or not it is blocked."
4014 * "Setting a signal action to SIG_DFL for a signal that is
4015 * pending and whose default action is to ignore the signal
4016 * (for example, SIGCHLD), shall cause the pending signal to
4017 * be discarded, whether or not it is blocked"
4019 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4021 sigaddset(&mask, sig);
4022 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4023 for_each_thread(p, t)
4024 flush_sigqueue_mask(&mask, &t->pending);
4028 spin_unlock_irq(&p->sighand->siglock);
4033 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4036 struct task_struct *t = current;
4039 memset(oss, 0, sizeof(stack_t));
4040 oss->ss_sp = (void __user *) t->sas_ss_sp;
4041 oss->ss_size = t->sas_ss_size;
4042 oss->ss_flags = sas_ss_flags(sp) |
4043 (current->sas_ss_flags & SS_FLAG_BITS);
4047 void __user *ss_sp = ss->ss_sp;
4048 size_t ss_size = ss->ss_size;
4049 unsigned ss_flags = ss->ss_flags;
4052 if (unlikely(on_sig_stack(sp)))
4055 ss_mode = ss_flags & ~SS_FLAG_BITS;
4056 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4060 if (ss_mode == SS_DISABLE) {
4064 if (unlikely(ss_size < min_ss_size))
4068 t->sas_ss_sp = (unsigned long) ss_sp;
4069 t->sas_ss_size = ss_size;
4070 t->sas_ss_flags = ss_flags;
4075 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4079 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4081 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4082 current_user_stack_pointer(),
4084 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4089 int restore_altstack(const stack_t __user *uss)
4092 if (copy_from_user(&new, uss, sizeof(stack_t)))
4094 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4096 /* squash all but EFAULT for now */
4100 int __save_altstack(stack_t __user *uss, unsigned long sp)
4102 struct task_struct *t = current;
4103 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4104 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4105 __put_user(t->sas_ss_size, &uss->ss_size);
4108 if (t->sas_ss_flags & SS_AUTODISARM)
4113 #ifdef CONFIG_COMPAT
4114 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4115 compat_stack_t __user *uoss_ptr)
4121 compat_stack_t uss32;
4122 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4124 uss.ss_sp = compat_ptr(uss32.ss_sp);
4125 uss.ss_flags = uss32.ss_flags;
4126 uss.ss_size = uss32.ss_size;
4128 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4129 compat_user_stack_pointer(),
4130 COMPAT_MINSIGSTKSZ);
4131 if (ret >= 0 && uoss_ptr) {
4133 memset(&old, 0, sizeof(old));
4134 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4135 old.ss_flags = uoss.ss_flags;
4136 old.ss_size = uoss.ss_size;
4137 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4143 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4144 const compat_stack_t __user *, uss_ptr,
4145 compat_stack_t __user *, uoss_ptr)
4147 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4150 int compat_restore_altstack(const compat_stack_t __user *uss)
4152 int err = do_compat_sigaltstack(uss, NULL);
4153 /* squash all but -EFAULT for now */
4154 return err == -EFAULT ? err : 0;
4157 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4160 struct task_struct *t = current;
4161 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4163 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4164 __put_user(t->sas_ss_size, &uss->ss_size);
4167 if (t->sas_ss_flags & SS_AUTODISARM)
4173 #ifdef __ARCH_WANT_SYS_SIGPENDING
4176 * sys_sigpending - examine pending signals
4177 * @uset: where mask of pending signal is returned
4179 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4183 if (sizeof(old_sigset_t) > sizeof(*uset))
4186 do_sigpending(&set);
4188 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4194 #ifdef CONFIG_COMPAT
4195 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4199 do_sigpending(&set);
4201 return put_user(set.sig[0], set32);
4207 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4209 * sys_sigprocmask - examine and change blocked signals
4210 * @how: whether to add, remove, or set signals
4211 * @nset: signals to add or remove (if non-null)
4212 * @oset: previous value of signal mask if non-null
4214 * Some platforms have their own version with special arguments;
4215 * others support only sys_rt_sigprocmask.
4218 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4219 old_sigset_t __user *, oset)
4221 old_sigset_t old_set, new_set;
4222 sigset_t new_blocked;
4224 old_set = current->blocked.sig[0];
4227 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4230 new_blocked = current->blocked;
4234 sigaddsetmask(&new_blocked, new_set);
4237 sigdelsetmask(&new_blocked, new_set);
4240 new_blocked.sig[0] = new_set;
4246 set_current_blocked(&new_blocked);
4250 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4256 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4258 #ifndef CONFIG_ODD_RT_SIGACTION
4260 * sys_rt_sigaction - alter an action taken by a process
4261 * @sig: signal to be sent
4262 * @act: new sigaction
4263 * @oact: used to save the previous sigaction
4264 * @sigsetsize: size of sigset_t type
4266 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4267 const struct sigaction __user *, act,
4268 struct sigaction __user *, oact,
4271 struct k_sigaction new_sa, old_sa;
4274 /* XXX: Don't preclude handling different sized sigset_t's. */
4275 if (sigsetsize != sizeof(sigset_t))
4278 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4281 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4285 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4290 #ifdef CONFIG_COMPAT
4291 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4292 const struct compat_sigaction __user *, act,
4293 struct compat_sigaction __user *, oact,
4294 compat_size_t, sigsetsize)
4296 struct k_sigaction new_ka, old_ka;
4297 #ifdef __ARCH_HAS_SA_RESTORER
4298 compat_uptr_t restorer;
4302 /* XXX: Don't preclude handling different sized sigset_t's. */
4303 if (sigsetsize != sizeof(compat_sigset_t))
4307 compat_uptr_t handler;
4308 ret = get_user(handler, &act->sa_handler);
4309 new_ka.sa.sa_handler = compat_ptr(handler);
4310 #ifdef __ARCH_HAS_SA_RESTORER
4311 ret |= get_user(restorer, &act->sa_restorer);
4312 new_ka.sa.sa_restorer = compat_ptr(restorer);
4314 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4315 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4320 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4322 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4324 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4325 sizeof(oact->sa_mask));
4326 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4327 #ifdef __ARCH_HAS_SA_RESTORER
4328 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4329 &oact->sa_restorer);
4335 #endif /* !CONFIG_ODD_RT_SIGACTION */
4337 #ifdef CONFIG_OLD_SIGACTION
4338 SYSCALL_DEFINE3(sigaction, int, sig,
4339 const struct old_sigaction __user *, act,
4340 struct old_sigaction __user *, oact)
4342 struct k_sigaction new_ka, old_ka;
4347 if (!access_ok(act, sizeof(*act)) ||
4348 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4349 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4350 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4351 __get_user(mask, &act->sa_mask))
4353 #ifdef __ARCH_HAS_KA_RESTORER
4354 new_ka.ka_restorer = NULL;
4356 siginitset(&new_ka.sa.sa_mask, mask);
4359 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4362 if (!access_ok(oact, sizeof(*oact)) ||
4363 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4364 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4365 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4366 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4373 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4374 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4375 const struct compat_old_sigaction __user *, act,
4376 struct compat_old_sigaction __user *, oact)
4378 struct k_sigaction new_ka, old_ka;
4380 compat_old_sigset_t mask;
4381 compat_uptr_t handler, restorer;
4384 if (!access_ok(act, sizeof(*act)) ||
4385 __get_user(handler, &act->sa_handler) ||
4386 __get_user(restorer, &act->sa_restorer) ||
4387 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4388 __get_user(mask, &act->sa_mask))
4391 #ifdef __ARCH_HAS_KA_RESTORER
4392 new_ka.ka_restorer = NULL;
4394 new_ka.sa.sa_handler = compat_ptr(handler);
4395 new_ka.sa.sa_restorer = compat_ptr(restorer);
4396 siginitset(&new_ka.sa.sa_mask, mask);
4399 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4402 if (!access_ok(oact, sizeof(*oact)) ||
4403 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4404 &oact->sa_handler) ||
4405 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4406 &oact->sa_restorer) ||
4407 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4408 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4415 #ifdef CONFIG_SGETMASK_SYSCALL
4418 * For backwards compatibility. Functionality superseded by sigprocmask.
4420 SYSCALL_DEFINE0(sgetmask)
4423 return current->blocked.sig[0];
4426 SYSCALL_DEFINE1(ssetmask, int, newmask)
4428 int old = current->blocked.sig[0];
4431 siginitset(&newset, newmask);
4432 set_current_blocked(&newset);
4436 #endif /* CONFIG_SGETMASK_SYSCALL */
4438 #ifdef __ARCH_WANT_SYS_SIGNAL
4440 * For backwards compatibility. Functionality superseded by sigaction.
4442 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4444 struct k_sigaction new_sa, old_sa;
4447 new_sa.sa.sa_handler = handler;
4448 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4449 sigemptyset(&new_sa.sa.sa_mask);
4451 ret = do_sigaction(sig, &new_sa, &old_sa);
4453 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4455 #endif /* __ARCH_WANT_SYS_SIGNAL */
4457 #ifdef __ARCH_WANT_SYS_PAUSE
4459 SYSCALL_DEFINE0(pause)
4461 while (!signal_pending(current)) {
4462 __set_current_state(TASK_INTERRUPTIBLE);
4465 return -ERESTARTNOHAND;
4470 static int sigsuspend(sigset_t *set)
4472 current->saved_sigmask = current->blocked;
4473 set_current_blocked(set);
4475 while (!signal_pending(current)) {
4476 __set_current_state(TASK_INTERRUPTIBLE);
4479 set_restore_sigmask();
4480 return -ERESTARTNOHAND;
4484 * sys_rt_sigsuspend - replace the signal mask for a value with the
4485 * @unewset value until a signal is received
4486 * @unewset: new signal mask value
4487 * @sigsetsize: size of sigset_t type
4489 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4493 /* XXX: Don't preclude handling different sized sigset_t's. */
4494 if (sigsetsize != sizeof(sigset_t))
4497 if (copy_from_user(&newset, unewset, sizeof(newset)))
4499 return sigsuspend(&newset);
4502 #ifdef CONFIG_COMPAT
4503 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4507 /* XXX: Don't preclude handling different sized sigset_t's. */
4508 if (sigsetsize != sizeof(sigset_t))
4511 if (get_compat_sigset(&newset, unewset))
4513 return sigsuspend(&newset);
4517 #ifdef CONFIG_OLD_SIGSUSPEND
4518 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4521 siginitset(&blocked, mask);
4522 return sigsuspend(&blocked);
4525 #ifdef CONFIG_OLD_SIGSUSPEND3
4526 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4529 siginitset(&blocked, mask);
4530 return sigsuspend(&blocked);
4534 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4539 static inline void siginfo_buildtime_checks(void)
4541 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4543 /* Verify the offsets in the two siginfos match */
4544 #define CHECK_OFFSET(field) \
4545 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4548 CHECK_OFFSET(si_pid);
4549 CHECK_OFFSET(si_uid);
4552 CHECK_OFFSET(si_tid);
4553 CHECK_OFFSET(si_overrun);
4554 CHECK_OFFSET(si_value);
4557 CHECK_OFFSET(si_pid);
4558 CHECK_OFFSET(si_uid);
4559 CHECK_OFFSET(si_value);
4562 CHECK_OFFSET(si_pid);
4563 CHECK_OFFSET(si_uid);
4564 CHECK_OFFSET(si_status);
4565 CHECK_OFFSET(si_utime);
4566 CHECK_OFFSET(si_stime);
4569 CHECK_OFFSET(si_addr);
4570 CHECK_OFFSET(si_addr_lsb);
4571 CHECK_OFFSET(si_lower);
4572 CHECK_OFFSET(si_upper);
4573 CHECK_OFFSET(si_pkey);
4576 CHECK_OFFSET(si_band);
4577 CHECK_OFFSET(si_fd);
4580 CHECK_OFFSET(si_call_addr);
4581 CHECK_OFFSET(si_syscall);
4582 CHECK_OFFSET(si_arch);
4586 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4587 offsetof(struct siginfo, si_addr));
4588 if (sizeof(int) == sizeof(void __user *)) {
4589 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4590 sizeof(void __user *));
4592 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4593 sizeof_field(struct siginfo, si_uid)) !=
4594 sizeof(void __user *));
4595 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4596 offsetof(struct siginfo, si_uid));
4598 #ifdef CONFIG_COMPAT
4599 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4600 offsetof(struct compat_siginfo, si_addr));
4601 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4602 sizeof(compat_uptr_t));
4603 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4604 sizeof_field(struct siginfo, si_pid));
4608 void __init signals_init(void)
4610 siginfo_buildtime_checks();
4612 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4615 #ifdef CONFIG_KGDB_KDB
4616 #include <linux/kdb.h>
4618 * kdb_send_sig - Allows kdb to send signals without exposing
4619 * signal internals. This function checks if the required locks are
4620 * available before calling the main signal code, to avoid kdb
4623 void kdb_send_sig(struct task_struct *t, int sig)
4625 static struct task_struct *kdb_prev_t;
4627 if (!spin_trylock(&t->sighand->siglock)) {
4628 kdb_printf("Can't do kill command now.\n"
4629 "The sigmask lock is held somewhere else in "
4630 "kernel, try again later\n");
4633 new_t = kdb_prev_t != t;
4635 if (t->state != TASK_RUNNING && new_t) {
4636 spin_unlock(&t->sighand->siglock);
4637 kdb_printf("Process is not RUNNING, sending a signal from "
4638 "kdb risks deadlock\n"
4639 "on the run queue locks. "
4640 "The signal has _not_ been sent.\n"
4641 "Reissue the kill command if you want to risk "
4645 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4646 spin_unlock(&t->sighand->siglock);
4648 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4651 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4653 #endif /* CONFIG_KGDB_KDB */