1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
414 struct sigqueue *q = NULL;
415 struct ucounts *ucounts = NULL;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 ucounts = task_ucounts(t);
428 sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
430 ucounts = get_ucounts(ucounts);
433 if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 print_dropped_signal(sig);
439 if (unlikely(q == NULL)) {
440 if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
441 put_ucounts(ucounts);
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
454 if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
455 put_ucounts(q->ucounts);
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* if ptraced, let the tracer determine */
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
571 struct sigqueue *q, *first = NULL;
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
585 sigdelset(&list->signal, sig);
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
597 __sigqueue_free(first);
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
605 info->si_signo = sig;
607 info->si_code = SI_USER;
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
616 int sig = next_signal(pending, mask);
619 collect_signal(sig, pending, info, resched_timer);
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
627 * All callers have to hold the siglock.
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
631 bool resched_timer = false;
634 /* We only dequeue private signals from ourselves, we don't let
635 * signalfd steal them
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
672 if (unlikely(sig_kernel_stop(signr))) {
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
687 #ifdef CONFIG_POSIX_TIMERS
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
705 EXPORT_SYMBOL_GPL(dequeue_signal);
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
714 * Might a synchronous signal be in the queue?
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 * Return the first synchronous signal in the queue.
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
733 * Check if there is another siginfo for the same signal.
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
740 sigdelset(&pending->signal, sync->info.si_signo);
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
750 * Tell a process that it has a new active signal..
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 * case. We don't check t->state here because there is a race with it
766 * executing another processor and just now entering stopped state.
767 * By using wake_up_state, we ensure the process will wake up and
768 * handle its death signal.
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
775 * Remove signals in mask from the pending set and queue.
776 * Returns 1 if any signals were found.
778 * All callers must be holding the siglock.
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
782 struct sigqueue *q, *n;
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
798 static inline int is_si_special(const struct kernel_siginfo *info)
800 return info <= SEND_SIG_PRIV;
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
810 * called with RCU read lock from check_kill_permission()
812 static bool kill_ok_by_cred(struct task_struct *t)
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
825 * Bad permissions for sending the signal
826 * - the caller must hold the RCU read lock
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
834 if (!valid_signal(sig))
837 if (!si_fromuser(info))
840 error = audit_signal_info(sig, t); /* Let audit system see the signal */
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
848 sid = task_session(t);
850 * We don't return the error if sid == NULL. The
851 * task was unhashed, the caller must notice this.
853 if (!sid || sid == task_session(current))
861 return security_task_kill(t, info, sig, NULL);
865 * ptrace_trap_notify - schedule trap to notify ptracer
866 * @t: tracee wanting to notify tracer
868 * This function schedules sticky ptrace trap which is cleared on the next
869 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * If @t is running, STOP trap will be taken. If trapped for STOP and
873 * ptracer is listening for events, tracee is woken up so that it can
874 * re-trap for the new event. If trapped otherwise, STOP trap will be
875 * eventually taken without returning to userland after the existing traps
876 * are finished by PTRACE_CONT.
879 * Must be called with @task->sighand->siglock held.
881 static void ptrace_trap_notify(struct task_struct *t)
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
891 * Handle magic process-wide effects of stop/continue signals. Unlike
892 * the signal actions, these happen immediately at signal-generation
893 * time regardless of blocking, ignoring, or handling. This does the
894 * actual continuing for SIGCONT, but not the actual stopping for stop
895 * signals. The process stop is done as a signal action for SIG_DFL.
897 * Returns true if the signal should be actually delivered, otherwise
898 * it should be dropped.
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
910 * The process is in the middle of dying, nothing to do.
912 } else if (sig_kernel_stop(sig)) {
914 * This is a stop signal. Remove SIGCONT from all queues.
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
923 * Remove all stop signals from all queues, wake all threads.
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
933 ptrace_trap_notify(t);
937 * Notify the parent with CLD_CONTINUED if we were stopped.
939 * If we were in the middle of a group stop, we pretend it
940 * was already finished, and then continued. Since SIGCHLD
941 * doesn't queue we report only CLD_STOPPED, as if the next
942 * CLD_CONTINUED was dropped.
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
952 * The first thread which returns from do_signal_stop()
953 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 * notify its parent. See get_signal().
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
962 return !sig_ignored(p, sig, force);
966 * Test if P wants to take SIG. After we've checked all threads with this,
967 * it's equivalent to finding no threads not blocking SIG. Any threads not
968 * blocking SIG were ruled out because they are not running and already
969 * have pending signals. Such threads will dequeue from the shared queue
970 * as soon as they're available, so putting the signal on the shared queue
971 * will be equivalent to sending it to one such thread.
973 static inline bool wants_signal(int sig, struct task_struct *p)
975 if (sigismember(&p->blocked, sig))
978 if (p->flags & PF_EXITING)
984 if (task_is_stopped_or_traced(p))
987 return task_curr(p) || !task_sigpending(p);
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
996 * Now find a thread we can wake up to take the signal off the queue.
998 * If the main thread wants the signal, it gets first crack.
999 * Probably the least surprising to the average bear.
1001 if (wants_signal(sig, p))
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1005 * There is just one thread and it does not need to be woken.
1006 * It will dequeue unblocked signals before it runs again.
1011 * Otherwise try to find a suitable thread.
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1016 if (t == signal->curr_target)
1018 * No thread needs to be woken.
1019 * Any eligible threads will see
1020 * the signal in the queue soon.
1024 signal->curr_target = t;
1028 * Found a killable thread. If the signal will be fatal,
1029 * then start taking the whole group down immediately.
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1036 * This signal will be fatal to the whole group.
1038 if (!sig_kernel_coredump(sig)) {
1040 * Start a group exit and wake everybody up.
1041 * This way we don't have other threads
1042 * running and doing things after a slower
1043 * thread has the fatal signal pending.
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1059 * The signal is already in the shared-pending queue.
1060 * Tell the chosen thread to wake up and dequeue it.
1062 signal_wake_up(t, sig == SIGKILL);
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1074 struct sigpending *pending;
1076 int override_rlimit;
1077 int ret = 0, result;
1079 assert_spin_locked(&t->sighand->siglock);
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1087 * Short-circuit ignored signals and support queuing
1088 * exactly one non-rt signal, so that we can get more
1089 * detailed information about the cause of the signal.
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1095 result = TRACE_SIGNAL_DELIVERED;
1097 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1103 * Real-time signals must be queued if sent by sigqueue, or
1104 * some other real-time mechanism. It is implementation
1105 * defined whether kill() does so. We attempt to do so, on
1106 * the principle of least surprise, but since kill is not
1107 * allowed to fail with EAGAIN when low on memory we just
1108 * make sure at least one signal gets delivered and don't
1109 * pass on the info struct.
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1114 override_rlimit = 0;
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1143 copy_siginfo(&q->info, info);
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1149 * Queue overflow, abort. We may abort if the
1150 * signal was rt and sent by user using something
1151 * other than kill().
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1158 * This is a silent loss of information. We still
1159 * send the signal, but the *info bits are lost.
1161 result = TRACE_SIGNAL_LOSE_INFO;
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1168 /* Let multiprocess signals appear after on-going forks */
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173 /* Can't queue both a stop and a continue signal */
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1182 complete_signal(sig, t, type);
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_FAULT_TRAPNO:
1201 case SIL_FAULT_MCEERR:
1202 case SIL_FAULT_BNDERR:
1203 case SIL_FAULT_PKUERR:
1204 case SIL_PERF_EVENT:
1212 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1215 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218 if (info == SEND_SIG_NOINFO) {
1219 /* Force if sent from an ancestor pid namespace */
1220 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 } else if (info == SEND_SIG_PRIV) {
1222 /* Don't ignore kernel generated signals */
1224 } else if (has_si_pid_and_uid(info)) {
1225 /* SIGKILL and SIGSTOP is special or has ids */
1226 struct user_namespace *t_user_ns;
1229 t_user_ns = task_cred_xxx(t, user_ns);
1230 if (current_user_ns() != t_user_ns) {
1231 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 info->si_uid = from_kuid_munged(t_user_ns, uid);
1236 /* A kernel generated signal? */
1237 force = (info->si_code == SI_KERNEL);
1239 /* From an ancestor pid namespace? */
1240 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1245 return __send_signal(sig, info, t, type, force);
1248 static void print_fatal_signal(int signr)
1250 struct pt_regs *regs = signal_pt_regs();
1251 pr_info("potentially unexpected fatal signal %d.\n", signr);
1253 #if defined(__i386__) && !defined(__arch_um__)
1254 pr_info("code at %08lx: ", regs->ip);
1257 for (i = 0; i < 16; i++) {
1260 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1262 pr_cont("%02x ", insn);
1272 static int __init setup_print_fatal_signals(char *str)
1274 get_option (&str, &print_fatal_signals);
1279 __setup("print-fatal-signals=", setup_print_fatal_signals);
1282 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1284 return send_signal(sig, info, p, PIDTYPE_TGID);
1287 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290 unsigned long flags;
1293 if (lock_task_sighand(p, &flags)) {
1294 ret = send_signal(sig, info, p, type);
1295 unlock_task_sighand(p, &flags);
1302 * Force a signal that the process can't ignore: if necessary
1303 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1305 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1306 * since we do not want to have a signal handler that was blocked
1307 * be invoked when user space had explicitly blocked it.
1309 * We don't want to have recursive SIGSEGV's etc, for example,
1310 * that is why we also clear SIGNAL_UNKILLABLE.
1313 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1315 unsigned long int flags;
1316 int ret, blocked, ignored;
1317 struct k_sigaction *action;
1318 int sig = info->si_signo;
1320 spin_lock_irqsave(&t->sighand->siglock, flags);
1321 action = &t->sighand->action[sig-1];
1322 ignored = action->sa.sa_handler == SIG_IGN;
1323 blocked = sigismember(&t->blocked, sig);
1324 if (blocked || ignored) {
1325 action->sa.sa_handler = SIG_DFL;
1327 sigdelset(&t->blocked, sig);
1328 recalc_sigpending_and_wake(t);
1332 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1333 * debugging to leave init killable.
1335 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1336 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1337 ret = send_signal(sig, info, t, PIDTYPE_PID);
1338 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1343 int force_sig_info(struct kernel_siginfo *info)
1345 return force_sig_info_to_task(info, current);
1349 * Nuke all other threads in the group.
1351 int zap_other_threads(struct task_struct *p)
1353 struct task_struct *t = p;
1356 p->signal->group_stop_count = 0;
1358 while_each_thread(p, t) {
1359 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1362 /* Don't bother with already dead threads */
1365 sigaddset(&t->pending.signal, SIGKILL);
1366 signal_wake_up(t, 1);
1372 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1373 unsigned long *flags)
1375 struct sighand_struct *sighand;
1379 sighand = rcu_dereference(tsk->sighand);
1380 if (unlikely(sighand == NULL))
1384 * This sighand can be already freed and even reused, but
1385 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1386 * initializes ->siglock: this slab can't go away, it has
1387 * the same object type, ->siglock can't be reinitialized.
1389 * We need to ensure that tsk->sighand is still the same
1390 * after we take the lock, we can race with de_thread() or
1391 * __exit_signal(). In the latter case the next iteration
1392 * must see ->sighand == NULL.
1394 spin_lock_irqsave(&sighand->siglock, *flags);
1395 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1397 spin_unlock_irqrestore(&sighand->siglock, *flags);
1405 * send signal info to all the members of a group
1407 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1408 struct task_struct *p, enum pid_type type)
1413 ret = check_kill_permission(sig, info, p);
1417 ret = do_send_sig_info(sig, info, p, type);
1423 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1424 * control characters do (^C, ^Z etc)
1425 * - the caller must hold at least a readlock on tasklist_lock
1427 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1429 struct task_struct *p = NULL;
1430 int retval, success;
1434 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1435 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1438 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1439 return success ? 0 : retval;
1442 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1445 struct task_struct *p;
1449 p = pid_task(pid, PIDTYPE_PID);
1451 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1453 if (likely(!p || error != -ESRCH))
1457 * The task was unhashed in between, try again. If it
1458 * is dead, pid_task() will return NULL, if we race with
1459 * de_thread() it will find the new leader.
1464 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1468 error = kill_pid_info(sig, info, find_vpid(pid));
1473 static inline bool kill_as_cred_perm(const struct cred *cred,
1474 struct task_struct *target)
1476 const struct cred *pcred = __task_cred(target);
1478 return uid_eq(cred->euid, pcred->suid) ||
1479 uid_eq(cred->euid, pcred->uid) ||
1480 uid_eq(cred->uid, pcred->suid) ||
1481 uid_eq(cred->uid, pcred->uid);
1485 * The usb asyncio usage of siginfo is wrong. The glibc support
1486 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1487 * AKA after the generic fields:
1488 * kernel_pid_t si_pid;
1489 * kernel_uid32_t si_uid;
1490 * sigval_t si_value;
1492 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1493 * after the generic fields is:
1494 * void __user *si_addr;
1496 * This is a practical problem when there is a 64bit big endian kernel
1497 * and a 32bit userspace. As the 32bit address will encoded in the low
1498 * 32bits of the pointer. Those low 32bits will be stored at higher
1499 * address than appear in a 32 bit pointer. So userspace will not
1500 * see the address it was expecting for it's completions.
1502 * There is nothing in the encoding that can allow
1503 * copy_siginfo_to_user32 to detect this confusion of formats, so
1504 * handle this by requiring the caller of kill_pid_usb_asyncio to
1505 * notice when this situration takes place and to store the 32bit
1506 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1509 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1510 struct pid *pid, const struct cred *cred)
1512 struct kernel_siginfo info;
1513 struct task_struct *p;
1514 unsigned long flags;
1517 if (!valid_signal(sig))
1520 clear_siginfo(&info);
1521 info.si_signo = sig;
1522 info.si_errno = errno;
1523 info.si_code = SI_ASYNCIO;
1524 *((sigval_t *)&info.si_pid) = addr;
1527 p = pid_task(pid, PIDTYPE_PID);
1532 if (!kill_as_cred_perm(cred, p)) {
1536 ret = security_task_kill(p, &info, sig, cred);
1541 if (lock_task_sighand(p, &flags)) {
1542 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1543 unlock_task_sighand(p, &flags);
1551 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1554 * kill_something_info() interprets pid in interesting ways just like kill(2).
1556 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1557 * is probably wrong. Should make it like BSD or SYSV.
1560 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1565 return kill_proc_info(sig, info, pid);
1567 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1571 read_lock(&tasklist_lock);
1573 ret = __kill_pgrp_info(sig, info,
1574 pid ? find_vpid(-pid) : task_pgrp(current));
1576 int retval = 0, count = 0;
1577 struct task_struct * p;
1579 for_each_process(p) {
1580 if (task_pid_vnr(p) > 1 &&
1581 !same_thread_group(p, current)) {
1582 int err = group_send_sig_info(sig, info, p,
1589 ret = count ? retval : -ESRCH;
1591 read_unlock(&tasklist_lock);
1597 * These are for backward compatibility with the rest of the kernel source.
1600 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1603 * Make sure legacy kernel users don't send in bad values
1604 * (normal paths check this in check_kill_permission).
1606 if (!valid_signal(sig))
1609 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1611 EXPORT_SYMBOL(send_sig_info);
1613 #define __si_special(priv) \
1614 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1617 send_sig(int sig, struct task_struct *p, int priv)
1619 return send_sig_info(sig, __si_special(priv), p);
1621 EXPORT_SYMBOL(send_sig);
1623 void force_sig(int sig)
1625 struct kernel_siginfo info;
1627 clear_siginfo(&info);
1628 info.si_signo = sig;
1630 info.si_code = SI_KERNEL;
1633 force_sig_info(&info);
1635 EXPORT_SYMBOL(force_sig);
1638 * When things go south during signal handling, we
1639 * will force a SIGSEGV. And if the signal that caused
1640 * the problem was already a SIGSEGV, we'll want to
1641 * make sure we don't even try to deliver the signal..
1643 void force_sigsegv(int sig)
1645 struct task_struct *p = current;
1647 if (sig == SIGSEGV) {
1648 unsigned long flags;
1649 spin_lock_irqsave(&p->sighand->siglock, flags);
1650 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1651 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1656 int force_sig_fault_to_task(int sig, int code, void __user *addr
1657 ___ARCH_SI_TRAPNO(int trapno)
1658 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1659 , struct task_struct *t)
1661 struct kernel_siginfo info;
1663 clear_siginfo(&info);
1664 info.si_signo = sig;
1666 info.si_code = code;
1667 info.si_addr = addr;
1668 #ifdef __ARCH_SI_TRAPNO
1669 info.si_trapno = trapno;
1673 info.si_flags = flags;
1676 return force_sig_info_to_task(&info, t);
1679 int force_sig_fault(int sig, int code, void __user *addr
1680 ___ARCH_SI_TRAPNO(int trapno)
1681 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1683 return force_sig_fault_to_task(sig, code, addr
1684 ___ARCH_SI_TRAPNO(trapno)
1685 ___ARCH_SI_IA64(imm, flags, isr), current);
1688 int send_sig_fault(int sig, int code, void __user *addr
1689 ___ARCH_SI_TRAPNO(int trapno)
1690 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1691 , struct task_struct *t)
1693 struct kernel_siginfo info;
1695 clear_siginfo(&info);
1696 info.si_signo = sig;
1698 info.si_code = code;
1699 info.si_addr = addr;
1700 #ifdef __ARCH_SI_TRAPNO
1701 info.si_trapno = trapno;
1705 info.si_flags = flags;
1708 return send_sig_info(info.si_signo, &info, t);
1711 int force_sig_mceerr(int code, void __user *addr, short lsb)
1713 struct kernel_siginfo info;
1715 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1716 clear_siginfo(&info);
1717 info.si_signo = SIGBUS;
1719 info.si_code = code;
1720 info.si_addr = addr;
1721 info.si_addr_lsb = lsb;
1722 return force_sig_info(&info);
1725 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1727 struct kernel_siginfo info;
1729 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1730 clear_siginfo(&info);
1731 info.si_signo = SIGBUS;
1733 info.si_code = code;
1734 info.si_addr = addr;
1735 info.si_addr_lsb = lsb;
1736 return send_sig_info(info.si_signo, &info, t);
1738 EXPORT_SYMBOL(send_sig_mceerr);
1740 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1742 struct kernel_siginfo info;
1744 clear_siginfo(&info);
1745 info.si_signo = SIGSEGV;
1747 info.si_code = SEGV_BNDERR;
1748 info.si_addr = addr;
1749 info.si_lower = lower;
1750 info.si_upper = upper;
1751 return force_sig_info(&info);
1755 int force_sig_pkuerr(void __user *addr, u32 pkey)
1757 struct kernel_siginfo info;
1759 clear_siginfo(&info);
1760 info.si_signo = SIGSEGV;
1762 info.si_code = SEGV_PKUERR;
1763 info.si_addr = addr;
1764 info.si_pkey = pkey;
1765 return force_sig_info(&info);
1769 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1771 struct kernel_siginfo info;
1773 clear_siginfo(&info);
1774 info.si_signo = SIGTRAP;
1776 info.si_code = TRAP_PERF;
1777 info.si_addr = addr;
1778 info.si_perf_data = sig_data;
1779 info.si_perf_type = type;
1781 return force_sig_info(&info);
1784 /* For the crazy architectures that include trap information in
1785 * the errno field, instead of an actual errno value.
1787 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1789 struct kernel_siginfo info;
1791 clear_siginfo(&info);
1792 info.si_signo = SIGTRAP;
1793 info.si_errno = errno;
1794 info.si_code = TRAP_HWBKPT;
1795 info.si_addr = addr;
1796 return force_sig_info(&info);
1799 int kill_pgrp(struct pid *pid, int sig, int priv)
1803 read_lock(&tasklist_lock);
1804 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1805 read_unlock(&tasklist_lock);
1809 EXPORT_SYMBOL(kill_pgrp);
1811 int kill_pid(struct pid *pid, int sig, int priv)
1813 return kill_pid_info(sig, __si_special(priv), pid);
1815 EXPORT_SYMBOL(kill_pid);
1818 * These functions support sending signals using preallocated sigqueue
1819 * structures. This is needed "because realtime applications cannot
1820 * afford to lose notifications of asynchronous events, like timer
1821 * expirations or I/O completions". In the case of POSIX Timers
1822 * we allocate the sigqueue structure from the timer_create. If this
1823 * allocation fails we are able to report the failure to the application
1824 * with an EAGAIN error.
1826 struct sigqueue *sigqueue_alloc(void)
1828 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1831 void sigqueue_free(struct sigqueue *q)
1833 unsigned long flags;
1834 spinlock_t *lock = ¤t->sighand->siglock;
1836 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1838 * We must hold ->siglock while testing q->list
1839 * to serialize with collect_signal() or with
1840 * __exit_signal()->flush_sigqueue().
1842 spin_lock_irqsave(lock, flags);
1843 q->flags &= ~SIGQUEUE_PREALLOC;
1845 * If it is queued it will be freed when dequeued,
1846 * like the "regular" sigqueue.
1848 if (!list_empty(&q->list))
1850 spin_unlock_irqrestore(lock, flags);
1856 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1858 int sig = q->info.si_signo;
1859 struct sigpending *pending;
1860 struct task_struct *t;
1861 unsigned long flags;
1864 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1868 t = pid_task(pid, type);
1869 if (!t || !likely(lock_task_sighand(t, &flags)))
1872 ret = 1; /* the signal is ignored */
1873 result = TRACE_SIGNAL_IGNORED;
1874 if (!prepare_signal(sig, t, false))
1878 if (unlikely(!list_empty(&q->list))) {
1880 * If an SI_TIMER entry is already queue just increment
1881 * the overrun count.
1883 BUG_ON(q->info.si_code != SI_TIMER);
1884 q->info.si_overrun++;
1885 result = TRACE_SIGNAL_ALREADY_PENDING;
1888 q->info.si_overrun = 0;
1890 signalfd_notify(t, sig);
1891 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1892 list_add_tail(&q->list, &pending->list);
1893 sigaddset(&pending->signal, sig);
1894 complete_signal(sig, t, type);
1895 result = TRACE_SIGNAL_DELIVERED;
1897 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1898 unlock_task_sighand(t, &flags);
1904 static void do_notify_pidfd(struct task_struct *task)
1908 WARN_ON(task->exit_state == 0);
1909 pid = task_pid(task);
1910 wake_up_all(&pid->wait_pidfd);
1914 * Let a parent know about the death of a child.
1915 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1917 * Returns true if our parent ignored us and so we've switched to
1920 bool do_notify_parent(struct task_struct *tsk, int sig)
1922 struct kernel_siginfo info;
1923 unsigned long flags;
1924 struct sighand_struct *psig;
1925 bool autoreap = false;
1930 /* do_notify_parent_cldstop should have been called instead. */
1931 BUG_ON(task_is_stopped_or_traced(tsk));
1933 BUG_ON(!tsk->ptrace &&
1934 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1936 /* Wake up all pidfd waiters */
1937 do_notify_pidfd(tsk);
1939 if (sig != SIGCHLD) {
1941 * This is only possible if parent == real_parent.
1942 * Check if it has changed security domain.
1944 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1948 clear_siginfo(&info);
1949 info.si_signo = sig;
1952 * We are under tasklist_lock here so our parent is tied to
1953 * us and cannot change.
1955 * task_active_pid_ns will always return the same pid namespace
1956 * until a task passes through release_task.
1958 * write_lock() currently calls preempt_disable() which is the
1959 * same as rcu_read_lock(), but according to Oleg, this is not
1960 * correct to rely on this
1963 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1964 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1968 task_cputime(tsk, &utime, &stime);
1969 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1970 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1972 info.si_status = tsk->exit_code & 0x7f;
1973 if (tsk->exit_code & 0x80)
1974 info.si_code = CLD_DUMPED;
1975 else if (tsk->exit_code & 0x7f)
1976 info.si_code = CLD_KILLED;
1978 info.si_code = CLD_EXITED;
1979 info.si_status = tsk->exit_code >> 8;
1982 psig = tsk->parent->sighand;
1983 spin_lock_irqsave(&psig->siglock, flags);
1984 if (!tsk->ptrace && sig == SIGCHLD &&
1985 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1986 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1988 * We are exiting and our parent doesn't care. POSIX.1
1989 * defines special semantics for setting SIGCHLD to SIG_IGN
1990 * or setting the SA_NOCLDWAIT flag: we should be reaped
1991 * automatically and not left for our parent's wait4 call.
1992 * Rather than having the parent do it as a magic kind of
1993 * signal handler, we just set this to tell do_exit that we
1994 * can be cleaned up without becoming a zombie. Note that
1995 * we still call __wake_up_parent in this case, because a
1996 * blocked sys_wait4 might now return -ECHILD.
1998 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1999 * is implementation-defined: we do (if you don't want
2000 * it, just use SIG_IGN instead).
2003 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2007 * Send with __send_signal as si_pid and si_uid are in the
2008 * parent's namespaces.
2010 if (valid_signal(sig) && sig)
2011 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2012 __wake_up_parent(tsk, tsk->parent);
2013 spin_unlock_irqrestore(&psig->siglock, flags);
2019 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2020 * @tsk: task reporting the state change
2021 * @for_ptracer: the notification is for ptracer
2022 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2024 * Notify @tsk's parent that the stopped/continued state has changed. If
2025 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2026 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2029 * Must be called with tasklist_lock at least read locked.
2031 static void do_notify_parent_cldstop(struct task_struct *tsk,
2032 bool for_ptracer, int why)
2034 struct kernel_siginfo info;
2035 unsigned long flags;
2036 struct task_struct *parent;
2037 struct sighand_struct *sighand;
2041 parent = tsk->parent;
2043 tsk = tsk->group_leader;
2044 parent = tsk->real_parent;
2047 clear_siginfo(&info);
2048 info.si_signo = SIGCHLD;
2051 * see comment in do_notify_parent() about the following 4 lines
2054 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2055 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2058 task_cputime(tsk, &utime, &stime);
2059 info.si_utime = nsec_to_clock_t(utime);
2060 info.si_stime = nsec_to_clock_t(stime);
2065 info.si_status = SIGCONT;
2068 info.si_status = tsk->signal->group_exit_code & 0x7f;
2071 info.si_status = tsk->exit_code & 0x7f;
2077 sighand = parent->sighand;
2078 spin_lock_irqsave(&sighand->siglock, flags);
2079 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2080 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2081 __group_send_sig_info(SIGCHLD, &info, parent);
2083 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2085 __wake_up_parent(tsk, parent);
2086 spin_unlock_irqrestore(&sighand->siglock, flags);
2089 static inline bool may_ptrace_stop(void)
2091 if (!likely(current->ptrace))
2094 * Are we in the middle of do_coredump?
2095 * If so and our tracer is also part of the coredump stopping
2096 * is a deadlock situation, and pointless because our tracer
2097 * is dead so don't allow us to stop.
2098 * If SIGKILL was already sent before the caller unlocked
2099 * ->siglock we must see ->core_state != NULL. Otherwise it
2100 * is safe to enter schedule().
2102 * This is almost outdated, a task with the pending SIGKILL can't
2103 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2104 * after SIGKILL was already dequeued.
2106 if (unlikely(current->mm->core_state) &&
2107 unlikely(current->mm == current->parent->mm))
2114 * Return non-zero if there is a SIGKILL that should be waking us up.
2115 * Called with the siglock held.
2117 static bool sigkill_pending(struct task_struct *tsk)
2119 return sigismember(&tsk->pending.signal, SIGKILL) ||
2120 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2124 * This must be called with current->sighand->siglock held.
2126 * This should be the path for all ptrace stops.
2127 * We always set current->last_siginfo while stopped here.
2128 * That makes it a way to test a stopped process for
2129 * being ptrace-stopped vs being job-control-stopped.
2131 * If we actually decide not to stop at all because the tracer
2132 * is gone, we keep current->exit_code unless clear_code.
2134 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2135 __releases(¤t->sighand->siglock)
2136 __acquires(¤t->sighand->siglock)
2138 bool gstop_done = false;
2140 if (arch_ptrace_stop_needed(exit_code, info)) {
2142 * The arch code has something special to do before a
2143 * ptrace stop. This is allowed to block, e.g. for faults
2144 * on user stack pages. We can't keep the siglock while
2145 * calling arch_ptrace_stop, so we must release it now.
2146 * To preserve proper semantics, we must do this before
2147 * any signal bookkeeping like checking group_stop_count.
2148 * Meanwhile, a SIGKILL could come in before we retake the
2149 * siglock. That must prevent us from sleeping in TASK_TRACED.
2150 * So after regaining the lock, we must check for SIGKILL.
2152 spin_unlock_irq(¤t->sighand->siglock);
2153 arch_ptrace_stop(exit_code, info);
2154 spin_lock_irq(¤t->sighand->siglock);
2155 if (sigkill_pending(current))
2159 set_special_state(TASK_TRACED);
2162 * We're committing to trapping. TRACED should be visible before
2163 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2164 * Also, transition to TRACED and updates to ->jobctl should be
2165 * atomic with respect to siglock and should be done after the arch
2166 * hook as siglock is released and regrabbed across it.
2171 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2173 * set_current_state() smp_wmb();
2175 * wait_task_stopped()
2176 * task_stopped_code()
2177 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2181 current->last_siginfo = info;
2182 current->exit_code = exit_code;
2185 * If @why is CLD_STOPPED, we're trapping to participate in a group
2186 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2187 * across siglock relocks since INTERRUPT was scheduled, PENDING
2188 * could be clear now. We act as if SIGCONT is received after
2189 * TASK_TRACED is entered - ignore it.
2191 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2192 gstop_done = task_participate_group_stop(current);
2194 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2195 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2196 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2197 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2199 /* entering a trap, clear TRAPPING */
2200 task_clear_jobctl_trapping(current);
2202 spin_unlock_irq(¤t->sighand->siglock);
2203 read_lock(&tasklist_lock);
2204 if (may_ptrace_stop()) {
2206 * Notify parents of the stop.
2208 * While ptraced, there are two parents - the ptracer and
2209 * the real_parent of the group_leader. The ptracer should
2210 * know about every stop while the real parent is only
2211 * interested in the completion of group stop. The states
2212 * for the two don't interact with each other. Notify
2213 * separately unless they're gonna be duplicates.
2215 do_notify_parent_cldstop(current, true, why);
2216 if (gstop_done && ptrace_reparented(current))
2217 do_notify_parent_cldstop(current, false, why);
2220 * Don't want to allow preemption here, because
2221 * sys_ptrace() needs this task to be inactive.
2223 * XXX: implement read_unlock_no_resched().
2226 read_unlock(&tasklist_lock);
2227 cgroup_enter_frozen();
2228 preempt_enable_no_resched();
2229 freezable_schedule();
2230 cgroup_leave_frozen(true);
2233 * By the time we got the lock, our tracer went away.
2234 * Don't drop the lock yet, another tracer may come.
2236 * If @gstop_done, the ptracer went away between group stop
2237 * completion and here. During detach, it would have set
2238 * JOBCTL_STOP_PENDING on us and we'll re-enter
2239 * TASK_STOPPED in do_signal_stop() on return, so notifying
2240 * the real parent of the group stop completion is enough.
2243 do_notify_parent_cldstop(current, false, why);
2245 /* tasklist protects us from ptrace_freeze_traced() */
2246 __set_current_state(TASK_RUNNING);
2248 current->exit_code = 0;
2249 read_unlock(&tasklist_lock);
2253 * We are back. Now reacquire the siglock before touching
2254 * last_siginfo, so that we are sure to have synchronized with
2255 * any signal-sending on another CPU that wants to examine it.
2257 spin_lock_irq(¤t->sighand->siglock);
2258 current->last_siginfo = NULL;
2260 /* LISTENING can be set only during STOP traps, clear it */
2261 current->jobctl &= ~JOBCTL_LISTENING;
2264 * Queued signals ignored us while we were stopped for tracing.
2265 * So check for any that we should take before resuming user mode.
2266 * This sets TIF_SIGPENDING, but never clears it.
2268 recalc_sigpending_tsk(current);
2271 static void ptrace_do_notify(int signr, int exit_code, int why)
2273 kernel_siginfo_t info;
2275 clear_siginfo(&info);
2276 info.si_signo = signr;
2277 info.si_code = exit_code;
2278 info.si_pid = task_pid_vnr(current);
2279 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2281 /* Let the debugger run. */
2282 ptrace_stop(exit_code, why, 1, &info);
2285 void ptrace_notify(int exit_code)
2287 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2288 if (unlikely(current->task_works))
2291 spin_lock_irq(¤t->sighand->siglock);
2292 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2293 spin_unlock_irq(¤t->sighand->siglock);
2297 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2298 * @signr: signr causing group stop if initiating
2300 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2301 * and participate in it. If already set, participate in the existing
2302 * group stop. If participated in a group stop (and thus slept), %true is
2303 * returned with siglock released.
2305 * If ptraced, this function doesn't handle stop itself. Instead,
2306 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2307 * untouched. The caller must ensure that INTERRUPT trap handling takes
2308 * places afterwards.
2311 * Must be called with @current->sighand->siglock held, which is released
2315 * %false if group stop is already cancelled or ptrace trap is scheduled.
2316 * %true if participated in group stop.
2318 static bool do_signal_stop(int signr)
2319 __releases(¤t->sighand->siglock)
2321 struct signal_struct *sig = current->signal;
2323 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2324 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2325 struct task_struct *t;
2327 /* signr will be recorded in task->jobctl for retries */
2328 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2330 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2331 unlikely(signal_group_exit(sig)))
2334 * There is no group stop already in progress. We must
2337 * While ptraced, a task may be resumed while group stop is
2338 * still in effect and then receive a stop signal and
2339 * initiate another group stop. This deviates from the
2340 * usual behavior as two consecutive stop signals can't
2341 * cause two group stops when !ptraced. That is why we
2342 * also check !task_is_stopped(t) below.
2344 * The condition can be distinguished by testing whether
2345 * SIGNAL_STOP_STOPPED is already set. Don't generate
2346 * group_exit_code in such case.
2348 * This is not necessary for SIGNAL_STOP_CONTINUED because
2349 * an intervening stop signal is required to cause two
2350 * continued events regardless of ptrace.
2352 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2353 sig->group_exit_code = signr;
2355 sig->group_stop_count = 0;
2357 if (task_set_jobctl_pending(current, signr | gstop))
2358 sig->group_stop_count++;
2361 while_each_thread(current, t) {
2363 * Setting state to TASK_STOPPED for a group
2364 * stop is always done with the siglock held,
2365 * so this check has no races.
2367 if (!task_is_stopped(t) &&
2368 task_set_jobctl_pending(t, signr | gstop)) {
2369 sig->group_stop_count++;
2370 if (likely(!(t->ptrace & PT_SEIZED)))
2371 signal_wake_up(t, 0);
2373 ptrace_trap_notify(t);
2378 if (likely(!current->ptrace)) {
2382 * If there are no other threads in the group, or if there
2383 * is a group stop in progress and we are the last to stop,
2384 * report to the parent.
2386 if (task_participate_group_stop(current))
2387 notify = CLD_STOPPED;
2389 set_special_state(TASK_STOPPED);
2390 spin_unlock_irq(¤t->sighand->siglock);
2393 * Notify the parent of the group stop completion. Because
2394 * we're not holding either the siglock or tasklist_lock
2395 * here, ptracer may attach inbetween; however, this is for
2396 * group stop and should always be delivered to the real
2397 * parent of the group leader. The new ptracer will get
2398 * its notification when this task transitions into
2402 read_lock(&tasklist_lock);
2403 do_notify_parent_cldstop(current, false, notify);
2404 read_unlock(&tasklist_lock);
2407 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2408 cgroup_enter_frozen();
2409 freezable_schedule();
2413 * While ptraced, group stop is handled by STOP trap.
2414 * Schedule it and let the caller deal with it.
2416 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2422 * do_jobctl_trap - take care of ptrace jobctl traps
2424 * When PT_SEIZED, it's used for both group stop and explicit
2425 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2426 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2427 * the stop signal; otherwise, %SIGTRAP.
2429 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2430 * number as exit_code and no siginfo.
2433 * Must be called with @current->sighand->siglock held, which may be
2434 * released and re-acquired before returning with intervening sleep.
2436 static void do_jobctl_trap(void)
2438 struct signal_struct *signal = current->signal;
2439 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2441 if (current->ptrace & PT_SEIZED) {
2442 if (!signal->group_stop_count &&
2443 !(signal->flags & SIGNAL_STOP_STOPPED))
2445 WARN_ON_ONCE(!signr);
2446 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2449 WARN_ON_ONCE(!signr);
2450 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2451 current->exit_code = 0;
2456 * do_freezer_trap - handle the freezer jobctl trap
2458 * Puts the task into frozen state, if only the task is not about to quit.
2459 * In this case it drops JOBCTL_TRAP_FREEZE.
2462 * Must be called with @current->sighand->siglock held,
2463 * which is always released before returning.
2465 static void do_freezer_trap(void)
2466 __releases(¤t->sighand->siglock)
2469 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2470 * let's make another loop to give it a chance to be handled.
2471 * In any case, we'll return back.
2473 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2474 JOBCTL_TRAP_FREEZE) {
2475 spin_unlock_irq(¤t->sighand->siglock);
2480 * Now we're sure that there is no pending fatal signal and no
2481 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2482 * immediately (if there is a non-fatal signal pending), and
2483 * put the task into sleep.
2485 __set_current_state(TASK_INTERRUPTIBLE);
2486 clear_thread_flag(TIF_SIGPENDING);
2487 spin_unlock_irq(¤t->sighand->siglock);
2488 cgroup_enter_frozen();
2489 freezable_schedule();
2492 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2495 * We do not check sig_kernel_stop(signr) but set this marker
2496 * unconditionally because we do not know whether debugger will
2497 * change signr. This flag has no meaning unless we are going
2498 * to stop after return from ptrace_stop(). In this case it will
2499 * be checked in do_signal_stop(), we should only stop if it was
2500 * not cleared by SIGCONT while we were sleeping. See also the
2501 * comment in dequeue_signal().
2503 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2504 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2506 /* We're back. Did the debugger cancel the sig? */
2507 signr = current->exit_code;
2511 current->exit_code = 0;
2514 * Update the siginfo structure if the signal has
2515 * changed. If the debugger wanted something
2516 * specific in the siginfo structure then it should
2517 * have updated *info via PTRACE_SETSIGINFO.
2519 if (signr != info->si_signo) {
2520 clear_siginfo(info);
2521 info->si_signo = signr;
2523 info->si_code = SI_USER;
2525 info->si_pid = task_pid_vnr(current->parent);
2526 info->si_uid = from_kuid_munged(current_user_ns(),
2527 task_uid(current->parent));
2531 /* If the (new) signal is now blocked, requeue it. */
2532 if (sigismember(¤t->blocked, signr)) {
2533 send_signal(signr, info, current, PIDTYPE_PID);
2540 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2542 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2544 case SIL_FAULT_TRAPNO:
2545 case SIL_FAULT_MCEERR:
2546 case SIL_FAULT_BNDERR:
2547 case SIL_FAULT_PKUERR:
2548 case SIL_PERF_EVENT:
2549 ksig->info.si_addr = arch_untagged_si_addr(
2550 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2562 bool get_signal(struct ksignal *ksig)
2564 struct sighand_struct *sighand = current->sighand;
2565 struct signal_struct *signal = current->signal;
2568 if (unlikely(current->task_works))
2572 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2573 * that the arch handlers don't all have to do it. If we get here
2574 * without TIF_SIGPENDING, just exit after running signal work.
2576 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2577 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2578 tracehook_notify_signal();
2579 if (!task_sigpending(current))
2583 if (unlikely(uprobe_deny_signal()))
2587 * Do this once, we can't return to user-mode if freezing() == T.
2588 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2589 * thus do not need another check after return.
2594 spin_lock_irq(&sighand->siglock);
2597 * Every stopped thread goes here after wakeup. Check to see if
2598 * we should notify the parent, prepare_signal(SIGCONT) encodes
2599 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2601 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2604 if (signal->flags & SIGNAL_CLD_CONTINUED)
2605 why = CLD_CONTINUED;
2609 signal->flags &= ~SIGNAL_CLD_MASK;
2611 spin_unlock_irq(&sighand->siglock);
2614 * Notify the parent that we're continuing. This event is
2615 * always per-process and doesn't make whole lot of sense
2616 * for ptracers, who shouldn't consume the state via
2617 * wait(2) either, but, for backward compatibility, notify
2618 * the ptracer of the group leader too unless it's gonna be
2621 read_lock(&tasklist_lock);
2622 do_notify_parent_cldstop(current, false, why);
2624 if (ptrace_reparented(current->group_leader))
2625 do_notify_parent_cldstop(current->group_leader,
2627 read_unlock(&tasklist_lock);
2632 /* Has this task already been marked for death? */
2633 if (signal_group_exit(signal)) {
2634 ksig->info.si_signo = signr = SIGKILL;
2635 sigdelset(¤t->pending.signal, SIGKILL);
2636 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2637 &sighand->action[SIGKILL - 1]);
2638 recalc_sigpending();
2643 struct k_sigaction *ka;
2645 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2649 if (unlikely(current->jobctl &
2650 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2651 if (current->jobctl & JOBCTL_TRAP_MASK) {
2653 spin_unlock_irq(&sighand->siglock);
2654 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2661 * If the task is leaving the frozen state, let's update
2662 * cgroup counters and reset the frozen bit.
2664 if (unlikely(cgroup_task_frozen(current))) {
2665 spin_unlock_irq(&sighand->siglock);
2666 cgroup_leave_frozen(false);
2671 * Signals generated by the execution of an instruction
2672 * need to be delivered before any other pending signals
2673 * so that the instruction pointer in the signal stack
2674 * frame points to the faulting instruction.
2676 signr = dequeue_synchronous_signal(&ksig->info);
2678 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2681 break; /* will return 0 */
2683 if (unlikely(current->ptrace) && signr != SIGKILL) {
2684 signr = ptrace_signal(signr, &ksig->info);
2689 ka = &sighand->action[signr-1];
2691 /* Trace actually delivered signals. */
2692 trace_signal_deliver(signr, &ksig->info, ka);
2694 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2696 if (ka->sa.sa_handler != SIG_DFL) {
2697 /* Run the handler. */
2700 if (ka->sa.sa_flags & SA_ONESHOT)
2701 ka->sa.sa_handler = SIG_DFL;
2703 break; /* will return non-zero "signr" value */
2707 * Now we are doing the default action for this signal.
2709 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2713 * Global init gets no signals it doesn't want.
2714 * Container-init gets no signals it doesn't want from same
2717 * Note that if global/container-init sees a sig_kernel_only()
2718 * signal here, the signal must have been generated internally
2719 * or must have come from an ancestor namespace. In either
2720 * case, the signal cannot be dropped.
2722 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2723 !sig_kernel_only(signr))
2726 if (sig_kernel_stop(signr)) {
2728 * The default action is to stop all threads in
2729 * the thread group. The job control signals
2730 * do nothing in an orphaned pgrp, but SIGSTOP
2731 * always works. Note that siglock needs to be
2732 * dropped during the call to is_orphaned_pgrp()
2733 * because of lock ordering with tasklist_lock.
2734 * This allows an intervening SIGCONT to be posted.
2735 * We need to check for that and bail out if necessary.
2737 if (signr != SIGSTOP) {
2738 spin_unlock_irq(&sighand->siglock);
2740 /* signals can be posted during this window */
2742 if (is_current_pgrp_orphaned())
2745 spin_lock_irq(&sighand->siglock);
2748 if (likely(do_signal_stop(ksig->info.si_signo))) {
2749 /* It released the siglock. */
2754 * We didn't actually stop, due to a race
2755 * with SIGCONT or something like that.
2761 spin_unlock_irq(&sighand->siglock);
2762 if (unlikely(cgroup_task_frozen(current)))
2763 cgroup_leave_frozen(true);
2766 * Anything else is fatal, maybe with a core dump.
2768 current->flags |= PF_SIGNALED;
2770 if (sig_kernel_coredump(signr)) {
2771 if (print_fatal_signals)
2772 print_fatal_signal(ksig->info.si_signo);
2773 proc_coredump_connector(current);
2775 * If it was able to dump core, this kills all
2776 * other threads in the group and synchronizes with
2777 * their demise. If we lost the race with another
2778 * thread getting here, it set group_exit_code
2779 * first and our do_group_exit call below will use
2780 * that value and ignore the one we pass it.
2782 do_coredump(&ksig->info);
2786 * PF_IO_WORKER threads will catch and exit on fatal signals
2787 * themselves. They have cleanup that must be performed, so
2788 * we cannot call do_exit() on their behalf.
2790 if (current->flags & PF_IO_WORKER)
2794 * Death signals, no core dump.
2796 do_group_exit(ksig->info.si_signo);
2799 spin_unlock_irq(&sighand->siglock);
2803 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2804 hide_si_addr_tag_bits(ksig);
2806 return ksig->sig > 0;
2810 * signal_delivered -
2811 * @ksig: kernel signal struct
2812 * @stepping: nonzero if debugger single-step or block-step in use
2814 * This function should be called when a signal has successfully been
2815 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2816 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2817 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2819 static void signal_delivered(struct ksignal *ksig, int stepping)
2823 /* A signal was successfully delivered, and the
2824 saved sigmask was stored on the signal frame,
2825 and will be restored by sigreturn. So we can
2826 simply clear the restore sigmask flag. */
2827 clear_restore_sigmask();
2829 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2830 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2831 sigaddset(&blocked, ksig->sig);
2832 set_current_blocked(&blocked);
2833 if (current->sas_ss_flags & SS_AUTODISARM)
2834 sas_ss_reset(current);
2835 tracehook_signal_handler(stepping);
2838 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2841 force_sigsegv(ksig->sig);
2843 signal_delivered(ksig, stepping);
2847 * It could be that complete_signal() picked us to notify about the
2848 * group-wide signal. Other threads should be notified now to take
2849 * the shared signals in @which since we will not.
2851 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2854 struct task_struct *t;
2856 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2857 if (sigisemptyset(&retarget))
2861 while_each_thread(tsk, t) {
2862 if (t->flags & PF_EXITING)
2865 if (!has_pending_signals(&retarget, &t->blocked))
2867 /* Remove the signals this thread can handle. */
2868 sigandsets(&retarget, &retarget, &t->blocked);
2870 if (!task_sigpending(t))
2871 signal_wake_up(t, 0);
2873 if (sigisemptyset(&retarget))
2878 void exit_signals(struct task_struct *tsk)
2884 * @tsk is about to have PF_EXITING set - lock out users which
2885 * expect stable threadgroup.
2887 cgroup_threadgroup_change_begin(tsk);
2889 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2890 tsk->flags |= PF_EXITING;
2891 cgroup_threadgroup_change_end(tsk);
2895 spin_lock_irq(&tsk->sighand->siglock);
2897 * From now this task is not visible for group-wide signals,
2898 * see wants_signal(), do_signal_stop().
2900 tsk->flags |= PF_EXITING;
2902 cgroup_threadgroup_change_end(tsk);
2904 if (!task_sigpending(tsk))
2907 unblocked = tsk->blocked;
2908 signotset(&unblocked);
2909 retarget_shared_pending(tsk, &unblocked);
2911 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2912 task_participate_group_stop(tsk))
2913 group_stop = CLD_STOPPED;
2915 spin_unlock_irq(&tsk->sighand->siglock);
2918 * If group stop has completed, deliver the notification. This
2919 * should always go to the real parent of the group leader.
2921 if (unlikely(group_stop)) {
2922 read_lock(&tasklist_lock);
2923 do_notify_parent_cldstop(tsk, false, group_stop);
2924 read_unlock(&tasklist_lock);
2929 * System call entry points.
2933 * sys_restart_syscall - restart a system call
2935 SYSCALL_DEFINE0(restart_syscall)
2937 struct restart_block *restart = ¤t->restart_block;
2938 return restart->fn(restart);
2941 long do_no_restart_syscall(struct restart_block *param)
2946 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2948 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2949 sigset_t newblocked;
2950 /* A set of now blocked but previously unblocked signals. */
2951 sigandnsets(&newblocked, newset, ¤t->blocked);
2952 retarget_shared_pending(tsk, &newblocked);
2954 tsk->blocked = *newset;
2955 recalc_sigpending();
2959 * set_current_blocked - change current->blocked mask
2962 * It is wrong to change ->blocked directly, this helper should be used
2963 * to ensure the process can't miss a shared signal we are going to block.
2965 void set_current_blocked(sigset_t *newset)
2967 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2968 __set_current_blocked(newset);
2971 void __set_current_blocked(const sigset_t *newset)
2973 struct task_struct *tsk = current;
2976 * In case the signal mask hasn't changed, there is nothing we need
2977 * to do. The current->blocked shouldn't be modified by other task.
2979 if (sigequalsets(&tsk->blocked, newset))
2982 spin_lock_irq(&tsk->sighand->siglock);
2983 __set_task_blocked(tsk, newset);
2984 spin_unlock_irq(&tsk->sighand->siglock);
2988 * This is also useful for kernel threads that want to temporarily
2989 * (or permanently) block certain signals.
2991 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2992 * interface happily blocks "unblockable" signals like SIGKILL
2995 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2997 struct task_struct *tsk = current;
3000 /* Lockless, only current can change ->blocked, never from irq */
3002 *oldset = tsk->blocked;
3006 sigorsets(&newset, &tsk->blocked, set);
3009 sigandnsets(&newset, &tsk->blocked, set);
3018 __set_current_blocked(&newset);
3021 EXPORT_SYMBOL(sigprocmask);
3024 * The api helps set app-provided sigmasks.
3026 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3027 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3029 * Note that it does set_restore_sigmask() in advance, so it must be always
3030 * paired with restore_saved_sigmask_unless() before return from syscall.
3032 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3038 if (sigsetsize != sizeof(sigset_t))
3040 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3043 set_restore_sigmask();
3044 current->saved_sigmask = current->blocked;
3045 set_current_blocked(&kmask);
3050 #ifdef CONFIG_COMPAT
3051 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3058 if (sigsetsize != sizeof(compat_sigset_t))
3060 if (get_compat_sigset(&kmask, umask))
3063 set_restore_sigmask();
3064 current->saved_sigmask = current->blocked;
3065 set_current_blocked(&kmask);
3072 * sys_rt_sigprocmask - change the list of currently blocked signals
3073 * @how: whether to add, remove, or set signals
3074 * @nset: stores pending signals
3075 * @oset: previous value of signal mask if non-null
3076 * @sigsetsize: size of sigset_t type
3078 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3079 sigset_t __user *, oset, size_t, sigsetsize)
3081 sigset_t old_set, new_set;
3084 /* XXX: Don't preclude handling different sized sigset_t's. */
3085 if (sigsetsize != sizeof(sigset_t))
3088 old_set = current->blocked;
3091 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3093 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3095 error = sigprocmask(how, &new_set, NULL);
3101 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3108 #ifdef CONFIG_COMPAT
3109 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3110 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3112 sigset_t old_set = current->blocked;
3114 /* XXX: Don't preclude handling different sized sigset_t's. */
3115 if (sigsetsize != sizeof(sigset_t))
3121 if (get_compat_sigset(&new_set, nset))
3123 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3125 error = sigprocmask(how, &new_set, NULL);
3129 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3133 static void do_sigpending(sigset_t *set)
3135 spin_lock_irq(¤t->sighand->siglock);
3136 sigorsets(set, ¤t->pending.signal,
3137 ¤t->signal->shared_pending.signal);
3138 spin_unlock_irq(¤t->sighand->siglock);
3140 /* Outside the lock because only this thread touches it. */
3141 sigandsets(set, ¤t->blocked, set);
3145 * sys_rt_sigpending - examine a pending signal that has been raised
3147 * @uset: stores pending signals
3148 * @sigsetsize: size of sigset_t type or larger
3150 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3154 if (sigsetsize > sizeof(*uset))
3157 do_sigpending(&set);
3159 if (copy_to_user(uset, &set, sigsetsize))
3165 #ifdef CONFIG_COMPAT
3166 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3167 compat_size_t, sigsetsize)
3171 if (sigsetsize > sizeof(*uset))
3174 do_sigpending(&set);
3176 return put_compat_sigset(uset, &set, sigsetsize);
3180 static const struct {
3181 unsigned char limit, layout;
3183 [SIGILL] = { NSIGILL, SIL_FAULT },
3184 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3185 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3186 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3187 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3189 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3191 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3192 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3193 [SIGSYS] = { NSIGSYS, SIL_SYS },
3196 static bool known_siginfo_layout(unsigned sig, int si_code)
3198 if (si_code == SI_KERNEL)
3200 else if ((si_code > SI_USER)) {
3201 if (sig_specific_sicodes(sig)) {
3202 if (si_code <= sig_sicodes[sig].limit)
3205 else if (si_code <= NSIGPOLL)
3208 else if (si_code >= SI_DETHREAD)
3210 else if (si_code == SI_ASYNCNL)
3215 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3217 enum siginfo_layout layout = SIL_KILL;
3218 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3219 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3220 (si_code <= sig_sicodes[sig].limit)) {
3221 layout = sig_sicodes[sig].layout;
3222 /* Handle the exceptions */
3223 if ((sig == SIGBUS) &&
3224 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3225 layout = SIL_FAULT_MCEERR;
3226 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3227 layout = SIL_FAULT_BNDERR;
3229 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3230 layout = SIL_FAULT_PKUERR;
3232 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3233 layout = SIL_PERF_EVENT;
3234 #ifdef __ARCH_SI_TRAPNO
3235 else if (layout == SIL_FAULT)
3236 layout = SIL_FAULT_TRAPNO;
3239 else if (si_code <= NSIGPOLL)
3242 if (si_code == SI_TIMER)
3244 else if (si_code == SI_SIGIO)
3246 else if (si_code < 0)
3252 static inline char __user *si_expansion(const siginfo_t __user *info)
3254 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3257 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3259 char __user *expansion = si_expansion(to);
3260 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3262 if (clear_user(expansion, SI_EXPANSION_SIZE))
3267 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3268 const siginfo_t __user *from)
3270 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3271 char __user *expansion = si_expansion(from);
3272 char buf[SI_EXPANSION_SIZE];
3275 * An unknown si_code might need more than
3276 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3277 * extra bytes are 0. This guarantees copy_siginfo_to_user
3278 * will return this data to userspace exactly.
3280 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3282 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3290 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3291 const siginfo_t __user *from)
3293 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3295 to->si_signo = signo;
3296 return post_copy_siginfo_from_user(to, from);
3299 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3301 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3303 return post_copy_siginfo_from_user(to, from);
3306 #ifdef CONFIG_COMPAT
3308 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3309 * @to: compat siginfo destination
3310 * @from: kernel siginfo source
3312 * Note: This function does not work properly for the SIGCHLD on x32, but
3313 * fortunately it doesn't have to. The only valid callers for this function are
3314 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3315 * The latter does not care because SIGCHLD will never cause a coredump.
3317 void copy_siginfo_to_external32(struct compat_siginfo *to,
3318 const struct kernel_siginfo *from)
3320 memset(to, 0, sizeof(*to));
3322 to->si_signo = from->si_signo;
3323 to->si_errno = from->si_errno;
3324 to->si_code = from->si_code;
3325 switch(siginfo_layout(from->si_signo, from->si_code)) {
3327 to->si_pid = from->si_pid;
3328 to->si_uid = from->si_uid;
3331 to->si_tid = from->si_tid;
3332 to->si_overrun = from->si_overrun;
3333 to->si_int = from->si_int;
3336 to->si_band = from->si_band;
3337 to->si_fd = from->si_fd;
3340 to->si_addr = ptr_to_compat(from->si_addr);
3342 case SIL_FAULT_TRAPNO:
3343 to->si_addr = ptr_to_compat(from->si_addr);
3344 to->si_trapno = from->si_trapno;
3346 case SIL_FAULT_MCEERR:
3347 to->si_addr = ptr_to_compat(from->si_addr);
3348 to->si_addr_lsb = from->si_addr_lsb;
3350 case SIL_FAULT_BNDERR:
3351 to->si_addr = ptr_to_compat(from->si_addr);
3352 to->si_lower = ptr_to_compat(from->si_lower);
3353 to->si_upper = ptr_to_compat(from->si_upper);
3355 case SIL_FAULT_PKUERR:
3356 to->si_addr = ptr_to_compat(from->si_addr);
3357 to->si_pkey = from->si_pkey;
3359 case SIL_PERF_EVENT:
3360 to->si_addr = ptr_to_compat(from->si_addr);
3361 to->si_perf_data = from->si_perf_data;
3362 to->si_perf_type = from->si_perf_type;
3365 to->si_pid = from->si_pid;
3366 to->si_uid = from->si_uid;
3367 to->si_status = from->si_status;
3368 to->si_utime = from->si_utime;
3369 to->si_stime = from->si_stime;
3372 to->si_pid = from->si_pid;
3373 to->si_uid = from->si_uid;
3374 to->si_int = from->si_int;
3377 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3378 to->si_syscall = from->si_syscall;
3379 to->si_arch = from->si_arch;
3384 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3385 const struct kernel_siginfo *from)
3387 struct compat_siginfo new;
3389 copy_siginfo_to_external32(&new, from);
3390 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3395 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3396 const struct compat_siginfo *from)
3399 to->si_signo = from->si_signo;
3400 to->si_errno = from->si_errno;
3401 to->si_code = from->si_code;
3402 switch(siginfo_layout(from->si_signo, from->si_code)) {
3404 to->si_pid = from->si_pid;
3405 to->si_uid = from->si_uid;
3408 to->si_tid = from->si_tid;
3409 to->si_overrun = from->si_overrun;
3410 to->si_int = from->si_int;
3413 to->si_band = from->si_band;
3414 to->si_fd = from->si_fd;
3417 to->si_addr = compat_ptr(from->si_addr);
3419 case SIL_FAULT_TRAPNO:
3420 to->si_addr = compat_ptr(from->si_addr);
3421 to->si_trapno = from->si_trapno;
3423 case SIL_FAULT_MCEERR:
3424 to->si_addr = compat_ptr(from->si_addr);
3425 to->si_addr_lsb = from->si_addr_lsb;
3427 case SIL_FAULT_BNDERR:
3428 to->si_addr = compat_ptr(from->si_addr);
3429 to->si_lower = compat_ptr(from->si_lower);
3430 to->si_upper = compat_ptr(from->si_upper);
3432 case SIL_FAULT_PKUERR:
3433 to->si_addr = compat_ptr(from->si_addr);
3434 to->si_pkey = from->si_pkey;
3436 case SIL_PERF_EVENT:
3437 to->si_addr = compat_ptr(from->si_addr);
3438 to->si_perf_data = from->si_perf_data;
3439 to->si_perf_type = from->si_perf_type;
3442 to->si_pid = from->si_pid;
3443 to->si_uid = from->si_uid;
3444 to->si_status = from->si_status;
3445 #ifdef CONFIG_X86_X32_ABI
3446 if (in_x32_syscall()) {
3447 to->si_utime = from->_sifields._sigchld_x32._utime;
3448 to->si_stime = from->_sifields._sigchld_x32._stime;
3452 to->si_utime = from->si_utime;
3453 to->si_stime = from->si_stime;
3457 to->si_pid = from->si_pid;
3458 to->si_uid = from->si_uid;
3459 to->si_int = from->si_int;
3462 to->si_call_addr = compat_ptr(from->si_call_addr);
3463 to->si_syscall = from->si_syscall;
3464 to->si_arch = from->si_arch;
3470 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3471 const struct compat_siginfo __user *ufrom)
3473 struct compat_siginfo from;
3475 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3478 from.si_signo = signo;
3479 return post_copy_siginfo_from_user32(to, &from);
3482 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3483 const struct compat_siginfo __user *ufrom)
3485 struct compat_siginfo from;
3487 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3490 return post_copy_siginfo_from_user32(to, &from);
3492 #endif /* CONFIG_COMPAT */
3495 * do_sigtimedwait - wait for queued signals specified in @which
3496 * @which: queued signals to wait for
3497 * @info: if non-null, the signal's siginfo is returned here
3498 * @ts: upper bound on process time suspension
3500 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3501 const struct timespec64 *ts)
3503 ktime_t *to = NULL, timeout = KTIME_MAX;
3504 struct task_struct *tsk = current;
3505 sigset_t mask = *which;
3509 if (!timespec64_valid(ts))
3511 timeout = timespec64_to_ktime(*ts);
3516 * Invert the set of allowed signals to get those we want to block.
3518 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3521 spin_lock_irq(&tsk->sighand->siglock);
3522 sig = dequeue_signal(tsk, &mask, info);
3523 if (!sig && timeout) {
3525 * None ready, temporarily unblock those we're interested
3526 * while we are sleeping in so that we'll be awakened when
3527 * they arrive. Unblocking is always fine, we can avoid
3528 * set_current_blocked().
3530 tsk->real_blocked = tsk->blocked;
3531 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3532 recalc_sigpending();
3533 spin_unlock_irq(&tsk->sighand->siglock);
3535 __set_current_state(TASK_INTERRUPTIBLE);
3536 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3538 spin_lock_irq(&tsk->sighand->siglock);
3539 __set_task_blocked(tsk, &tsk->real_blocked);
3540 sigemptyset(&tsk->real_blocked);
3541 sig = dequeue_signal(tsk, &mask, info);
3543 spin_unlock_irq(&tsk->sighand->siglock);
3547 return ret ? -EINTR : -EAGAIN;
3551 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3553 * @uthese: queued signals to wait for
3554 * @uinfo: if non-null, the signal's siginfo is returned here
3555 * @uts: upper bound on process time suspension
3556 * @sigsetsize: size of sigset_t type
3558 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3559 siginfo_t __user *, uinfo,
3560 const struct __kernel_timespec __user *, uts,
3564 struct timespec64 ts;
3565 kernel_siginfo_t info;
3568 /* XXX: Don't preclude handling different sized sigset_t's. */
3569 if (sigsetsize != sizeof(sigset_t))
3572 if (copy_from_user(&these, uthese, sizeof(these)))
3576 if (get_timespec64(&ts, uts))
3580 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3582 if (ret > 0 && uinfo) {
3583 if (copy_siginfo_to_user(uinfo, &info))
3590 #ifdef CONFIG_COMPAT_32BIT_TIME
3591 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3592 siginfo_t __user *, uinfo,
3593 const struct old_timespec32 __user *, uts,
3597 struct timespec64 ts;
3598 kernel_siginfo_t info;
3601 if (sigsetsize != sizeof(sigset_t))
3604 if (copy_from_user(&these, uthese, sizeof(these)))
3608 if (get_old_timespec32(&ts, uts))
3612 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3614 if (ret > 0 && uinfo) {
3615 if (copy_siginfo_to_user(uinfo, &info))
3623 #ifdef CONFIG_COMPAT
3624 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3625 struct compat_siginfo __user *, uinfo,
3626 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3629 struct timespec64 t;
3630 kernel_siginfo_t info;
3633 if (sigsetsize != sizeof(sigset_t))
3636 if (get_compat_sigset(&s, uthese))
3640 if (get_timespec64(&t, uts))
3644 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3646 if (ret > 0 && uinfo) {
3647 if (copy_siginfo_to_user32(uinfo, &info))
3654 #ifdef CONFIG_COMPAT_32BIT_TIME
3655 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3656 struct compat_siginfo __user *, uinfo,
3657 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3660 struct timespec64 t;
3661 kernel_siginfo_t info;
3664 if (sigsetsize != sizeof(sigset_t))
3667 if (get_compat_sigset(&s, uthese))
3671 if (get_old_timespec32(&t, uts))
3675 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3677 if (ret > 0 && uinfo) {
3678 if (copy_siginfo_to_user32(uinfo, &info))
3687 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3689 clear_siginfo(info);
3690 info->si_signo = sig;
3692 info->si_code = SI_USER;
3693 info->si_pid = task_tgid_vnr(current);
3694 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3698 * sys_kill - send a signal to a process
3699 * @pid: the PID of the process
3700 * @sig: signal to be sent
3702 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3704 struct kernel_siginfo info;
3706 prepare_kill_siginfo(sig, &info);
3708 return kill_something_info(sig, &info, pid);
3712 * Verify that the signaler and signalee either are in the same pid namespace
3713 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3716 static bool access_pidfd_pidns(struct pid *pid)
3718 struct pid_namespace *active = task_active_pid_ns(current);
3719 struct pid_namespace *p = ns_of_pid(pid);
3732 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3733 siginfo_t __user *info)
3735 #ifdef CONFIG_COMPAT
3737 * Avoid hooking up compat syscalls and instead handle necessary
3738 * conversions here. Note, this is a stop-gap measure and should not be
3739 * considered a generic solution.
3741 if (in_compat_syscall())
3742 return copy_siginfo_from_user32(
3743 kinfo, (struct compat_siginfo __user *)info);
3745 return copy_siginfo_from_user(kinfo, info);
3748 static struct pid *pidfd_to_pid(const struct file *file)
3752 pid = pidfd_pid(file);
3756 return tgid_pidfd_to_pid(file);
3760 * sys_pidfd_send_signal - Signal a process through a pidfd
3761 * @pidfd: file descriptor of the process
3762 * @sig: signal to send
3763 * @info: signal info
3764 * @flags: future flags
3766 * The syscall currently only signals via PIDTYPE_PID which covers
3767 * kill(<positive-pid>, <signal>. It does not signal threads or process
3769 * In order to extend the syscall to threads and process groups the @flags
3770 * argument should be used. In essence, the @flags argument will determine
3771 * what is signaled and not the file descriptor itself. Put in other words,
3772 * grouping is a property of the flags argument not a property of the file
3775 * Return: 0 on success, negative errno on failure
3777 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3778 siginfo_t __user *, info, unsigned int, flags)
3783 kernel_siginfo_t kinfo;
3785 /* Enforce flags be set to 0 until we add an extension. */
3793 /* Is this a pidfd? */
3794 pid = pidfd_to_pid(f.file);
3801 if (!access_pidfd_pidns(pid))
3805 ret = copy_siginfo_from_user_any(&kinfo, info);
3810 if (unlikely(sig != kinfo.si_signo))
3813 /* Only allow sending arbitrary signals to yourself. */
3815 if ((task_pid(current) != pid) &&
3816 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3819 prepare_kill_siginfo(sig, &kinfo);
3822 ret = kill_pid_info(sig, &kinfo, pid);
3830 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3832 struct task_struct *p;
3836 p = find_task_by_vpid(pid);
3837 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3838 error = check_kill_permission(sig, info, p);
3840 * The null signal is a permissions and process existence
3841 * probe. No signal is actually delivered.
3843 if (!error && sig) {
3844 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3846 * If lock_task_sighand() failed we pretend the task
3847 * dies after receiving the signal. The window is tiny,
3848 * and the signal is private anyway.
3850 if (unlikely(error == -ESRCH))
3859 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3861 struct kernel_siginfo info;
3863 clear_siginfo(&info);
3864 info.si_signo = sig;
3866 info.si_code = SI_TKILL;
3867 info.si_pid = task_tgid_vnr(current);
3868 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3870 return do_send_specific(tgid, pid, sig, &info);
3874 * sys_tgkill - send signal to one specific thread
3875 * @tgid: the thread group ID of the thread
3876 * @pid: the PID of the thread
3877 * @sig: signal to be sent
3879 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3880 * exists but it's not belonging to the target process anymore. This
3881 * method solves the problem of threads exiting and PIDs getting reused.
3883 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3885 /* This is only valid for single tasks */
3886 if (pid <= 0 || tgid <= 0)
3889 return do_tkill(tgid, pid, sig);
3893 * sys_tkill - send signal to one specific task
3894 * @pid: the PID of the task
3895 * @sig: signal to be sent
3897 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3899 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3901 /* This is only valid for single tasks */
3905 return do_tkill(0, pid, sig);
3908 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3910 /* Not even root can pretend to send signals from the kernel.
3911 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3913 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3914 (task_pid_vnr(current) != pid))
3917 /* POSIX.1b doesn't mention process groups. */
3918 return kill_proc_info(sig, info, pid);
3922 * sys_rt_sigqueueinfo - send signal information to a signal
3923 * @pid: the PID of the thread
3924 * @sig: signal to be sent
3925 * @uinfo: signal info to be sent
3927 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3928 siginfo_t __user *, uinfo)
3930 kernel_siginfo_t info;
3931 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3934 return do_rt_sigqueueinfo(pid, sig, &info);
3937 #ifdef CONFIG_COMPAT
3938 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3941 struct compat_siginfo __user *, uinfo)
3943 kernel_siginfo_t info;
3944 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3947 return do_rt_sigqueueinfo(pid, sig, &info);
3951 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3953 /* This is only valid for single tasks */
3954 if (pid <= 0 || tgid <= 0)
3957 /* Not even root can pretend to send signals from the kernel.
3958 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3960 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3961 (task_pid_vnr(current) != pid))
3964 return do_send_specific(tgid, pid, sig, info);
3967 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3968 siginfo_t __user *, uinfo)
3970 kernel_siginfo_t info;
3971 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3974 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3977 #ifdef CONFIG_COMPAT
3978 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3982 struct compat_siginfo __user *, uinfo)
3984 kernel_siginfo_t info;
3985 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3988 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3993 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3995 void kernel_sigaction(int sig, __sighandler_t action)
3997 spin_lock_irq(¤t->sighand->siglock);
3998 current->sighand->action[sig - 1].sa.sa_handler = action;
3999 if (action == SIG_IGN) {
4003 sigaddset(&mask, sig);
4005 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4006 flush_sigqueue_mask(&mask, ¤t->pending);
4007 recalc_sigpending();
4009 spin_unlock_irq(¤t->sighand->siglock);
4011 EXPORT_SYMBOL(kernel_sigaction);
4013 void __weak sigaction_compat_abi(struct k_sigaction *act,
4014 struct k_sigaction *oact)
4018 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4020 struct task_struct *p = current, *t;
4021 struct k_sigaction *k;
4024 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4027 k = &p->sighand->action[sig-1];
4029 spin_lock_irq(&p->sighand->siglock);
4034 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4035 * e.g. by having an architecture use the bit in their uapi.
4037 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4040 * Clear unknown flag bits in order to allow userspace to detect missing
4041 * support for flag bits and to allow the kernel to use non-uapi bits
4045 act->sa.sa_flags &= UAPI_SA_FLAGS;
4047 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4049 sigaction_compat_abi(act, oact);
4052 sigdelsetmask(&act->sa.sa_mask,
4053 sigmask(SIGKILL) | sigmask(SIGSTOP));
4057 * "Setting a signal action to SIG_IGN for a signal that is
4058 * pending shall cause the pending signal to be discarded,
4059 * whether or not it is blocked."
4061 * "Setting a signal action to SIG_DFL for a signal that is
4062 * pending and whose default action is to ignore the signal
4063 * (for example, SIGCHLD), shall cause the pending signal to
4064 * be discarded, whether or not it is blocked"
4066 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4068 sigaddset(&mask, sig);
4069 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4070 for_each_thread(p, t)
4071 flush_sigqueue_mask(&mask, &t->pending);
4075 spin_unlock_irq(&p->sighand->siglock);
4080 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4083 struct task_struct *t = current;
4086 memset(oss, 0, sizeof(stack_t));
4087 oss->ss_sp = (void __user *) t->sas_ss_sp;
4088 oss->ss_size = t->sas_ss_size;
4089 oss->ss_flags = sas_ss_flags(sp) |
4090 (current->sas_ss_flags & SS_FLAG_BITS);
4094 void __user *ss_sp = ss->ss_sp;
4095 size_t ss_size = ss->ss_size;
4096 unsigned ss_flags = ss->ss_flags;
4099 if (unlikely(on_sig_stack(sp)))
4102 ss_mode = ss_flags & ~SS_FLAG_BITS;
4103 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4107 if (ss_mode == SS_DISABLE) {
4111 if (unlikely(ss_size < min_ss_size))
4115 t->sas_ss_sp = (unsigned long) ss_sp;
4116 t->sas_ss_size = ss_size;
4117 t->sas_ss_flags = ss_flags;
4122 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4126 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4128 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4129 current_user_stack_pointer(),
4131 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4136 int restore_altstack(const stack_t __user *uss)
4139 if (copy_from_user(&new, uss, sizeof(stack_t)))
4141 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4143 /* squash all but EFAULT for now */
4147 int __save_altstack(stack_t __user *uss, unsigned long sp)
4149 struct task_struct *t = current;
4150 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4151 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4152 __put_user(t->sas_ss_size, &uss->ss_size);
4156 #ifdef CONFIG_COMPAT
4157 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4158 compat_stack_t __user *uoss_ptr)
4164 compat_stack_t uss32;
4165 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4167 uss.ss_sp = compat_ptr(uss32.ss_sp);
4168 uss.ss_flags = uss32.ss_flags;
4169 uss.ss_size = uss32.ss_size;
4171 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4172 compat_user_stack_pointer(),
4173 COMPAT_MINSIGSTKSZ);
4174 if (ret >= 0 && uoss_ptr) {
4176 memset(&old, 0, sizeof(old));
4177 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4178 old.ss_flags = uoss.ss_flags;
4179 old.ss_size = uoss.ss_size;
4180 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4186 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4187 const compat_stack_t __user *, uss_ptr,
4188 compat_stack_t __user *, uoss_ptr)
4190 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4193 int compat_restore_altstack(const compat_stack_t __user *uss)
4195 int err = do_compat_sigaltstack(uss, NULL);
4196 /* squash all but -EFAULT for now */
4197 return err == -EFAULT ? err : 0;
4200 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4203 struct task_struct *t = current;
4204 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4206 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4207 __put_user(t->sas_ss_size, &uss->ss_size);
4212 #ifdef __ARCH_WANT_SYS_SIGPENDING
4215 * sys_sigpending - examine pending signals
4216 * @uset: where mask of pending signal is returned
4218 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4222 if (sizeof(old_sigset_t) > sizeof(*uset))
4225 do_sigpending(&set);
4227 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4233 #ifdef CONFIG_COMPAT
4234 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4238 do_sigpending(&set);
4240 return put_user(set.sig[0], set32);
4246 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4248 * sys_sigprocmask - examine and change blocked signals
4249 * @how: whether to add, remove, or set signals
4250 * @nset: signals to add or remove (if non-null)
4251 * @oset: previous value of signal mask if non-null
4253 * Some platforms have their own version with special arguments;
4254 * others support only sys_rt_sigprocmask.
4257 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4258 old_sigset_t __user *, oset)
4260 old_sigset_t old_set, new_set;
4261 sigset_t new_blocked;
4263 old_set = current->blocked.sig[0];
4266 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4269 new_blocked = current->blocked;
4273 sigaddsetmask(&new_blocked, new_set);
4276 sigdelsetmask(&new_blocked, new_set);
4279 new_blocked.sig[0] = new_set;
4285 set_current_blocked(&new_blocked);
4289 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4295 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4297 #ifndef CONFIG_ODD_RT_SIGACTION
4299 * sys_rt_sigaction - alter an action taken by a process
4300 * @sig: signal to be sent
4301 * @act: new sigaction
4302 * @oact: used to save the previous sigaction
4303 * @sigsetsize: size of sigset_t type
4305 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4306 const struct sigaction __user *, act,
4307 struct sigaction __user *, oact,
4310 struct k_sigaction new_sa, old_sa;
4313 /* XXX: Don't preclude handling different sized sigset_t's. */
4314 if (sigsetsize != sizeof(sigset_t))
4317 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4320 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4324 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4329 #ifdef CONFIG_COMPAT
4330 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4331 const struct compat_sigaction __user *, act,
4332 struct compat_sigaction __user *, oact,
4333 compat_size_t, sigsetsize)
4335 struct k_sigaction new_ka, old_ka;
4336 #ifdef __ARCH_HAS_SA_RESTORER
4337 compat_uptr_t restorer;
4341 /* XXX: Don't preclude handling different sized sigset_t's. */
4342 if (sigsetsize != sizeof(compat_sigset_t))
4346 compat_uptr_t handler;
4347 ret = get_user(handler, &act->sa_handler);
4348 new_ka.sa.sa_handler = compat_ptr(handler);
4349 #ifdef __ARCH_HAS_SA_RESTORER
4350 ret |= get_user(restorer, &act->sa_restorer);
4351 new_ka.sa.sa_restorer = compat_ptr(restorer);
4353 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4354 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4359 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4361 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4363 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4364 sizeof(oact->sa_mask));
4365 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4366 #ifdef __ARCH_HAS_SA_RESTORER
4367 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4368 &oact->sa_restorer);
4374 #endif /* !CONFIG_ODD_RT_SIGACTION */
4376 #ifdef CONFIG_OLD_SIGACTION
4377 SYSCALL_DEFINE3(sigaction, int, sig,
4378 const struct old_sigaction __user *, act,
4379 struct old_sigaction __user *, oact)
4381 struct k_sigaction new_ka, old_ka;
4386 if (!access_ok(act, sizeof(*act)) ||
4387 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4388 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4389 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4390 __get_user(mask, &act->sa_mask))
4392 #ifdef __ARCH_HAS_KA_RESTORER
4393 new_ka.ka_restorer = NULL;
4395 siginitset(&new_ka.sa.sa_mask, mask);
4398 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4401 if (!access_ok(oact, sizeof(*oact)) ||
4402 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4403 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4404 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4405 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4412 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4413 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4414 const struct compat_old_sigaction __user *, act,
4415 struct compat_old_sigaction __user *, oact)
4417 struct k_sigaction new_ka, old_ka;
4419 compat_old_sigset_t mask;
4420 compat_uptr_t handler, restorer;
4423 if (!access_ok(act, sizeof(*act)) ||
4424 __get_user(handler, &act->sa_handler) ||
4425 __get_user(restorer, &act->sa_restorer) ||
4426 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4427 __get_user(mask, &act->sa_mask))
4430 #ifdef __ARCH_HAS_KA_RESTORER
4431 new_ka.ka_restorer = NULL;
4433 new_ka.sa.sa_handler = compat_ptr(handler);
4434 new_ka.sa.sa_restorer = compat_ptr(restorer);
4435 siginitset(&new_ka.sa.sa_mask, mask);
4438 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4441 if (!access_ok(oact, sizeof(*oact)) ||
4442 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4443 &oact->sa_handler) ||
4444 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4445 &oact->sa_restorer) ||
4446 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4447 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4454 #ifdef CONFIG_SGETMASK_SYSCALL
4457 * For backwards compatibility. Functionality superseded by sigprocmask.
4459 SYSCALL_DEFINE0(sgetmask)
4462 return current->blocked.sig[0];
4465 SYSCALL_DEFINE1(ssetmask, int, newmask)
4467 int old = current->blocked.sig[0];
4470 siginitset(&newset, newmask);
4471 set_current_blocked(&newset);
4475 #endif /* CONFIG_SGETMASK_SYSCALL */
4477 #ifdef __ARCH_WANT_SYS_SIGNAL
4479 * For backwards compatibility. Functionality superseded by sigaction.
4481 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4483 struct k_sigaction new_sa, old_sa;
4486 new_sa.sa.sa_handler = handler;
4487 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4488 sigemptyset(&new_sa.sa.sa_mask);
4490 ret = do_sigaction(sig, &new_sa, &old_sa);
4492 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4494 #endif /* __ARCH_WANT_SYS_SIGNAL */
4496 #ifdef __ARCH_WANT_SYS_PAUSE
4498 SYSCALL_DEFINE0(pause)
4500 while (!signal_pending(current)) {
4501 __set_current_state(TASK_INTERRUPTIBLE);
4504 return -ERESTARTNOHAND;
4509 static int sigsuspend(sigset_t *set)
4511 current->saved_sigmask = current->blocked;
4512 set_current_blocked(set);
4514 while (!signal_pending(current)) {
4515 __set_current_state(TASK_INTERRUPTIBLE);
4518 set_restore_sigmask();
4519 return -ERESTARTNOHAND;
4523 * sys_rt_sigsuspend - replace the signal mask for a value with the
4524 * @unewset value until a signal is received
4525 * @unewset: new signal mask value
4526 * @sigsetsize: size of sigset_t type
4528 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4532 /* XXX: Don't preclude handling different sized sigset_t's. */
4533 if (sigsetsize != sizeof(sigset_t))
4536 if (copy_from_user(&newset, unewset, sizeof(newset)))
4538 return sigsuspend(&newset);
4541 #ifdef CONFIG_COMPAT
4542 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4546 /* XXX: Don't preclude handling different sized sigset_t's. */
4547 if (sigsetsize != sizeof(sigset_t))
4550 if (get_compat_sigset(&newset, unewset))
4552 return sigsuspend(&newset);
4556 #ifdef CONFIG_OLD_SIGSUSPEND
4557 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4560 siginitset(&blocked, mask);
4561 return sigsuspend(&blocked);
4564 #ifdef CONFIG_OLD_SIGSUSPEND3
4565 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4568 siginitset(&blocked, mask);
4569 return sigsuspend(&blocked);
4573 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4578 static inline void siginfo_buildtime_checks(void)
4580 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4582 /* Verify the offsets in the two siginfos match */
4583 #define CHECK_OFFSET(field) \
4584 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4587 CHECK_OFFSET(si_pid);
4588 CHECK_OFFSET(si_uid);
4591 CHECK_OFFSET(si_tid);
4592 CHECK_OFFSET(si_overrun);
4593 CHECK_OFFSET(si_value);
4596 CHECK_OFFSET(si_pid);
4597 CHECK_OFFSET(si_uid);
4598 CHECK_OFFSET(si_value);
4601 CHECK_OFFSET(si_pid);
4602 CHECK_OFFSET(si_uid);
4603 CHECK_OFFSET(si_status);
4604 CHECK_OFFSET(si_utime);
4605 CHECK_OFFSET(si_stime);
4608 CHECK_OFFSET(si_addr);
4609 CHECK_OFFSET(si_trapno);
4610 CHECK_OFFSET(si_addr_lsb);
4611 CHECK_OFFSET(si_lower);
4612 CHECK_OFFSET(si_upper);
4613 CHECK_OFFSET(si_pkey);
4614 CHECK_OFFSET(si_perf_data);
4615 CHECK_OFFSET(si_perf_type);
4618 CHECK_OFFSET(si_band);
4619 CHECK_OFFSET(si_fd);
4622 CHECK_OFFSET(si_call_addr);
4623 CHECK_OFFSET(si_syscall);
4624 CHECK_OFFSET(si_arch);
4628 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4629 offsetof(struct siginfo, si_addr));
4630 if (sizeof(int) == sizeof(void __user *)) {
4631 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4632 sizeof(void __user *));
4634 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4635 sizeof_field(struct siginfo, si_uid)) !=
4636 sizeof(void __user *));
4637 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4638 offsetof(struct siginfo, si_uid));
4640 #ifdef CONFIG_COMPAT
4641 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4642 offsetof(struct compat_siginfo, si_addr));
4643 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4644 sizeof(compat_uptr_t));
4645 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4646 sizeof_field(struct siginfo, si_pid));
4650 void __init signals_init(void)
4652 siginfo_buildtime_checks();
4654 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4657 #ifdef CONFIG_KGDB_KDB
4658 #include <linux/kdb.h>
4660 * kdb_send_sig - Allows kdb to send signals without exposing
4661 * signal internals. This function checks if the required locks are
4662 * available before calling the main signal code, to avoid kdb
4665 void kdb_send_sig(struct task_struct *t, int sig)
4667 static struct task_struct *kdb_prev_t;
4669 if (!spin_trylock(&t->sighand->siglock)) {
4670 kdb_printf("Can't do kill command now.\n"
4671 "The sigmask lock is held somewhere else in "
4672 "kernel, try again later\n");
4675 new_t = kdb_prev_t != t;
4677 if (!task_is_running(t) && new_t) {
4678 spin_unlock(&t->sighand->siglock);
4679 kdb_printf("Process is not RUNNING, sending a signal from "
4680 "kdb risks deadlock\n"
4681 "on the run queue locks. "
4682 "The signal has _not_ been sent.\n"
4683 "Reissue the kill command if you want to risk "
4687 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4688 spin_unlock(&t->sighand->siglock);
4690 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4693 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4695 #endif /* CONFIG_KGDB_KDB */