1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/task_work.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 #include <asm/syscall.h> /* for syscall_get_* */
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
188 EXPORT_SYMBOL(recalc_sigpending);
190 void calculate_sigpending(void)
192 /* Have any signals or users of TIF_SIGPENDING been delayed
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 spin_unlock_irq(¤t->sighand->siglock);
201 /* Given the mask, find the first available signal that should be serviced. */
203 #define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 int next_signal(struct sigpending *pending, sigset_t *mask)
209 unsigned long i, *s, *m, x;
212 s = pending->signal.sig;
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
227 switch (_NSIG_WORDS) {
229 for (i = 1; i < _NSIG_WORDS; ++i) {
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
242 sig = ffz(~x) + _NSIG_BPW + 1;
253 static inline void print_dropped_signal(int sig)
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 if (!print_fatal_signals)
260 if (!__ratelimit(&ratelimit_state))
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
268 * task_set_jobctl_pending - set jobctl pending bits
270 * @mask: pending bits to set
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
279 * Must be called with @task->sighand->siglock held.
282 * %true if @mask is set, %false if made noop because @task was dying.
284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 task->jobctl |= mask;
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
310 * Must be called with @task->sighand->siglock held.
312 void task_clear_jobctl_trapping(struct task_struct *task)
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @mask: pending bits to clear
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
334 * Must be called with @task->sighand->siglock held.
336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 task->jobctl &= ~mask;
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
359 * Must be called with @task->sighand->siglock held.
362 * %true if group stop completion should be notified to the parent, %false
365 static bool task_participate_group_stop(struct task_struct *task)
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
391 void task_join_group_stop(struct task_struct *task)
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
437 print_dropped_signal(sig);
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* if ptraced, let the tracer determine */
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
571 struct sigqueue *q, *first = NULL;
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
585 sigdelset(&list->signal, sig);
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
597 __sigqueue_free(first);
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
605 info->si_signo = sig;
607 info->si_code = SI_USER;
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
616 int sig = next_signal(pending, mask);
619 collect_signal(sig, pending, info, resched_timer);
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
627 * All callers have to hold the siglock.
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
630 kernel_siginfo_t *info, enum pid_type *type)
632 bool resched_timer = false;
635 /* We only dequeue private signals from ourselves, we don't let
636 * signalfd steal them
639 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
641 *type = PIDTYPE_TGID;
642 signr = __dequeue_signal(&tsk->signal->shared_pending,
643 mask, info, &resched_timer);
644 #ifdef CONFIG_POSIX_TIMERS
648 * itimers are process shared and we restart periodic
649 * itimers in the signal delivery path to prevent DoS
650 * attacks in the high resolution timer case. This is
651 * compliant with the old way of self-restarting
652 * itimers, as the SIGALRM is a legacy signal and only
653 * queued once. Changing the restart behaviour to
654 * restart the timer in the signal dequeue path is
655 * reducing the timer noise on heavy loaded !highres
658 if (unlikely(signr == SIGALRM)) {
659 struct hrtimer *tmr = &tsk->signal->real_timer;
661 if (!hrtimer_is_queued(tmr) &&
662 tsk->signal->it_real_incr != 0) {
663 hrtimer_forward(tmr, tmr->base->get_time(),
664 tsk->signal->it_real_incr);
665 hrtimer_restart(tmr);
675 if (unlikely(sig_kernel_stop(signr))) {
677 * Set a marker that we have dequeued a stop signal. Our
678 * caller might release the siglock and then the pending
679 * stop signal it is about to process is no longer in the
680 * pending bitmasks, but must still be cleared by a SIGCONT
681 * (and overruled by a SIGKILL). So those cases clear this
682 * shared flag after we've set it. Note that this flag may
683 * remain set after the signal we return is ignored or
684 * handled. That doesn't matter because its only purpose
685 * is to alert stop-signal processing code when another
686 * processor has come along and cleared the flag.
688 current->jobctl |= JOBCTL_STOP_DEQUEUED;
690 #ifdef CONFIG_POSIX_TIMERS
693 * Release the siglock to ensure proper locking order
694 * of timer locks outside of siglocks. Note, we leave
695 * irqs disabled here, since the posix-timers code is
696 * about to disable them again anyway.
698 spin_unlock(&tsk->sighand->siglock);
699 posixtimer_rearm(info);
700 spin_lock(&tsk->sighand->siglock);
702 /* Don't expose the si_sys_private value to userspace */
703 info->si_sys_private = 0;
708 EXPORT_SYMBOL_GPL(dequeue_signal);
710 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
712 struct task_struct *tsk = current;
713 struct sigpending *pending = &tsk->pending;
714 struct sigqueue *q, *sync = NULL;
717 * Might a synchronous signal be in the queue?
719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
723 * Return the first synchronous signal in the queue.
725 list_for_each_entry(q, &pending->list, list) {
726 /* Synchronous signals have a positive si_code */
727 if ((q->info.si_code > SI_USER) &&
728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
736 * Check if there is another siginfo for the same signal.
738 list_for_each_entry_continue(q, &pending->list, list) {
739 if (q->info.si_signo == sync->info.si_signo)
743 sigdelset(&pending->signal, sync->info.si_signo);
746 list_del_init(&sync->list);
747 copy_siginfo(info, &sync->info);
748 __sigqueue_free(sync);
749 return info->si_signo;
753 * Tell a process that it has a new active signal..
755 * NOTE! we rely on the previous spin_lock to
756 * lock interrupts for us! We can only be called with
757 * "siglock" held, and the local interrupt must
758 * have been disabled when that got acquired!
760 * No need to set need_resched since signal event passing
761 * goes through ->blocked
763 void signal_wake_up_state(struct task_struct *t, unsigned int state)
765 set_tsk_thread_flag(t, TIF_SIGPENDING);
767 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
768 * case. We don't check t->state here because there is a race with it
769 * executing another processor and just now entering stopped state.
770 * By using wake_up_state, we ensure the process will wake up and
771 * handle its death signal.
773 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
778 * Remove signals in mask from the pending set and queue.
779 * Returns 1 if any signals were found.
781 * All callers must be holding the siglock.
783 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
785 struct sigqueue *q, *n;
788 sigandsets(&m, mask, &s->signal);
789 if (sigisemptyset(&m))
792 sigandnsets(&s->signal, &s->signal, mask);
793 list_for_each_entry_safe(q, n, &s->list, list) {
794 if (sigismember(mask, q->info.si_signo)) {
795 list_del_init(&q->list);
801 static inline int is_si_special(const struct kernel_siginfo *info)
803 return info <= SEND_SIG_PRIV;
806 static inline bool si_fromuser(const struct kernel_siginfo *info)
808 return info == SEND_SIG_NOINFO ||
809 (!is_si_special(info) && SI_FROMUSER(info));
813 * called with RCU read lock from check_kill_permission()
815 static bool kill_ok_by_cred(struct task_struct *t)
817 const struct cred *cred = current_cred();
818 const struct cred *tcred = __task_cred(t);
820 return uid_eq(cred->euid, tcred->suid) ||
821 uid_eq(cred->euid, tcred->uid) ||
822 uid_eq(cred->uid, tcred->suid) ||
823 uid_eq(cred->uid, tcred->uid) ||
824 ns_capable(tcred->user_ns, CAP_KILL);
828 * Bad permissions for sending the signal
829 * - the caller must hold the RCU read lock
831 static int check_kill_permission(int sig, struct kernel_siginfo *info,
832 struct task_struct *t)
837 if (!valid_signal(sig))
840 if (!si_fromuser(info))
843 error = audit_signal_info(sig, t); /* Let audit system see the signal */
847 if (!same_thread_group(current, t) &&
848 !kill_ok_by_cred(t)) {
851 sid = task_session(t);
853 * We don't return the error if sid == NULL. The
854 * task was unhashed, the caller must notice this.
856 if (!sid || sid == task_session(current))
864 return security_task_kill(t, info, sig, NULL);
868 * ptrace_trap_notify - schedule trap to notify ptracer
869 * @t: tracee wanting to notify tracer
871 * This function schedules sticky ptrace trap which is cleared on the next
872 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
875 * If @t is running, STOP trap will be taken. If trapped for STOP and
876 * ptracer is listening for events, tracee is woken up so that it can
877 * re-trap for the new event. If trapped otherwise, STOP trap will be
878 * eventually taken without returning to userland after the existing traps
879 * are finished by PTRACE_CONT.
882 * Must be called with @task->sighand->siglock held.
884 static void ptrace_trap_notify(struct task_struct *t)
886 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
887 assert_spin_locked(&t->sighand->siglock);
889 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
890 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
894 * Handle magic process-wide effects of stop/continue signals. Unlike
895 * the signal actions, these happen immediately at signal-generation
896 * time regardless of blocking, ignoring, or handling. This does the
897 * actual continuing for SIGCONT, but not the actual stopping for stop
898 * signals. The process stop is done as a signal action for SIG_DFL.
900 * Returns true if the signal should be actually delivered, otherwise
901 * it should be dropped.
903 static bool prepare_signal(int sig, struct task_struct *p, bool force)
905 struct signal_struct *signal = p->signal;
906 struct task_struct *t;
909 if (signal->flags & SIGNAL_GROUP_EXIT) {
910 if (signal->core_state)
911 return sig == SIGKILL;
913 * The process is in the middle of dying, nothing to do.
915 } else if (sig_kernel_stop(sig)) {
917 * This is a stop signal. Remove SIGCONT from all queues.
919 siginitset(&flush, sigmask(SIGCONT));
920 flush_sigqueue_mask(&flush, &signal->shared_pending);
921 for_each_thread(p, t)
922 flush_sigqueue_mask(&flush, &t->pending);
923 } else if (sig == SIGCONT) {
926 * Remove all stop signals from all queues, wake all threads.
928 siginitset(&flush, SIG_KERNEL_STOP_MASK);
929 flush_sigqueue_mask(&flush, &signal->shared_pending);
930 for_each_thread(p, t) {
931 flush_sigqueue_mask(&flush, &t->pending);
932 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
933 if (likely(!(t->ptrace & PT_SEIZED)))
934 wake_up_state(t, __TASK_STOPPED);
936 ptrace_trap_notify(t);
940 * Notify the parent with CLD_CONTINUED if we were stopped.
942 * If we were in the middle of a group stop, we pretend it
943 * was already finished, and then continued. Since SIGCHLD
944 * doesn't queue we report only CLD_STOPPED, as if the next
945 * CLD_CONTINUED was dropped.
948 if (signal->flags & SIGNAL_STOP_STOPPED)
949 why |= SIGNAL_CLD_CONTINUED;
950 else if (signal->group_stop_count)
951 why |= SIGNAL_CLD_STOPPED;
955 * The first thread which returns from do_signal_stop()
956 * will take ->siglock, notice SIGNAL_CLD_MASK, and
957 * notify its parent. See get_signal().
959 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
960 signal->group_stop_count = 0;
961 signal->group_exit_code = 0;
965 return !sig_ignored(p, sig, force);
969 * Test if P wants to take SIG. After we've checked all threads with this,
970 * it's equivalent to finding no threads not blocking SIG. Any threads not
971 * blocking SIG were ruled out because they are not running and already
972 * have pending signals. Such threads will dequeue from the shared queue
973 * as soon as they're available, so putting the signal on the shared queue
974 * will be equivalent to sending it to one such thread.
976 static inline bool wants_signal(int sig, struct task_struct *p)
978 if (sigismember(&p->blocked, sig))
981 if (p->flags & PF_EXITING)
987 if (task_is_stopped_or_traced(p))
990 return task_curr(p) || !task_sigpending(p);
993 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
995 struct signal_struct *signal = p->signal;
996 struct task_struct *t;
999 * Now find a thread we can wake up to take the signal off the queue.
1001 * If the main thread wants the signal, it gets first crack.
1002 * Probably the least surprising to the average bear.
1004 if (wants_signal(sig, p))
1006 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1008 * There is just one thread and it does not need to be woken.
1009 * It will dequeue unblocked signals before it runs again.
1014 * Otherwise try to find a suitable thread.
1016 t = signal->curr_target;
1017 while (!wants_signal(sig, t)) {
1019 if (t == signal->curr_target)
1021 * No thread needs to be woken.
1022 * Any eligible threads will see
1023 * the signal in the queue soon.
1027 signal->curr_target = t;
1031 * Found a killable thread. If the signal will be fatal,
1032 * then start taking the whole group down immediately.
1034 if (sig_fatal(p, sig) &&
1035 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1036 !sigismember(&t->real_blocked, sig) &&
1037 (sig == SIGKILL || !p->ptrace)) {
1039 * This signal will be fatal to the whole group.
1041 if (!sig_kernel_coredump(sig)) {
1043 * Start a group exit and wake everybody up.
1044 * This way we don't have other threads
1045 * running and doing things after a slower
1046 * thread has the fatal signal pending.
1048 signal->flags = SIGNAL_GROUP_EXIT;
1049 signal->group_exit_code = sig;
1050 signal->group_stop_count = 0;
1053 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1054 sigaddset(&t->pending.signal, SIGKILL);
1055 signal_wake_up(t, 1);
1056 } while_each_thread(p, t);
1062 * The signal is already in the shared-pending queue.
1063 * Tell the chosen thread to wake up and dequeue it.
1065 signal_wake_up(t, sig == SIGKILL);
1069 static inline bool legacy_queue(struct sigpending *signals, int sig)
1071 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1074 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1075 enum pid_type type, bool force)
1077 struct sigpending *pending;
1079 int override_rlimit;
1080 int ret = 0, result;
1082 assert_spin_locked(&t->sighand->siglock);
1084 result = TRACE_SIGNAL_IGNORED;
1085 if (!prepare_signal(sig, t, force))
1088 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1090 * Short-circuit ignored signals and support queuing
1091 * exactly one non-rt signal, so that we can get more
1092 * detailed information about the cause of the signal.
1094 result = TRACE_SIGNAL_ALREADY_PENDING;
1095 if (legacy_queue(pending, sig))
1098 result = TRACE_SIGNAL_DELIVERED;
1100 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1102 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1106 * Real-time signals must be queued if sent by sigqueue, or
1107 * some other real-time mechanism. It is implementation
1108 * defined whether kill() does so. We attempt to do so, on
1109 * the principle of least surprise, but since kill is not
1110 * allowed to fail with EAGAIN when low on memory we just
1111 * make sure at least one signal gets delivered and don't
1112 * pass on the info struct.
1115 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1117 override_rlimit = 0;
1119 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1122 list_add_tail(&q->list, &pending->list);
1123 switch ((unsigned long) info) {
1124 case (unsigned long) SEND_SIG_NOINFO:
1125 clear_siginfo(&q->info);
1126 q->info.si_signo = sig;
1127 q->info.si_errno = 0;
1128 q->info.si_code = SI_USER;
1129 q->info.si_pid = task_tgid_nr_ns(current,
1130 task_active_pid_ns(t));
1133 from_kuid_munged(task_cred_xxx(t, user_ns),
1137 case (unsigned long) SEND_SIG_PRIV:
1138 clear_siginfo(&q->info);
1139 q->info.si_signo = sig;
1140 q->info.si_errno = 0;
1141 q->info.si_code = SI_KERNEL;
1146 copy_siginfo(&q->info, info);
1149 } else if (!is_si_special(info) &&
1150 sig >= SIGRTMIN && info->si_code != SI_USER) {
1152 * Queue overflow, abort. We may abort if the
1153 * signal was rt and sent by user using something
1154 * other than kill().
1156 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1161 * This is a silent loss of information. We still
1162 * send the signal, but the *info bits are lost.
1164 result = TRACE_SIGNAL_LOSE_INFO;
1168 signalfd_notify(t, sig);
1169 sigaddset(&pending->signal, sig);
1171 /* Let multiprocess signals appear after on-going forks */
1172 if (type > PIDTYPE_TGID) {
1173 struct multiprocess_signals *delayed;
1174 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1175 sigset_t *signal = &delayed->signal;
1176 /* Can't queue both a stop and a continue signal */
1178 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1179 else if (sig_kernel_stop(sig))
1180 sigdelset(signal, SIGCONT);
1181 sigaddset(signal, sig);
1185 complete_signal(sig, t, type);
1187 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1191 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1194 switch (siginfo_layout(info->si_signo, info->si_code)) {
1203 case SIL_FAULT_TRAPNO:
1204 case SIL_FAULT_MCEERR:
1205 case SIL_FAULT_BNDERR:
1206 case SIL_FAULT_PKUERR:
1207 case SIL_FAULT_PERF_EVENT:
1215 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1218 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1221 if (info == SEND_SIG_NOINFO) {
1222 /* Force if sent from an ancestor pid namespace */
1223 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1224 } else if (info == SEND_SIG_PRIV) {
1225 /* Don't ignore kernel generated signals */
1227 } else if (has_si_pid_and_uid(info)) {
1228 /* SIGKILL and SIGSTOP is special or has ids */
1229 struct user_namespace *t_user_ns;
1232 t_user_ns = task_cred_xxx(t, user_ns);
1233 if (current_user_ns() != t_user_ns) {
1234 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1235 info->si_uid = from_kuid_munged(t_user_ns, uid);
1239 /* A kernel generated signal? */
1240 force = (info->si_code == SI_KERNEL);
1242 /* From an ancestor pid namespace? */
1243 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1248 return __send_signal(sig, info, t, type, force);
1251 static void print_fatal_signal(int signr)
1253 struct pt_regs *regs = signal_pt_regs();
1254 pr_info("potentially unexpected fatal signal %d.\n", signr);
1256 #if defined(__i386__) && !defined(__arch_um__)
1257 pr_info("code at %08lx: ", regs->ip);
1260 for (i = 0; i < 16; i++) {
1263 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1265 pr_cont("%02x ", insn);
1275 static int __init setup_print_fatal_signals(char *str)
1277 get_option (&str, &print_fatal_signals);
1282 __setup("print-fatal-signals=", setup_print_fatal_signals);
1285 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1287 return send_signal(sig, info, p, PIDTYPE_TGID);
1290 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1293 unsigned long flags;
1296 if (lock_task_sighand(p, &flags)) {
1297 ret = send_signal(sig, info, p, type);
1298 unlock_task_sighand(p, &flags);
1305 HANDLER_CURRENT, /* If reachable use the current handler */
1306 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1307 HANDLER_EXIT, /* Only visible as the process exit code */
1311 * On some archictectures, PREEMPT_RT has to delay sending a signal from a
1312 * trap since it cannot enable preemption, and the signal code's
1313 * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME
1314 * which will send the signal on exit of the trap.
1316 #ifdef CONFIG_RT_DELAYED_SIGNALS
1317 static inline bool force_sig_delayed(struct kernel_siginfo *info,
1318 struct task_struct *t)
1323 if (WARN_ON_ONCE(t->forced_info.si_signo))
1326 if (is_si_special(info)) {
1327 WARN_ON_ONCE(info != SEND_SIG_PRIV);
1328 t->forced_info.si_signo = info->si_signo;
1329 t->forced_info.si_errno = 0;
1330 t->forced_info.si_code = SI_KERNEL;
1331 t->forced_info.si_pid = 0;
1332 t->forced_info.si_uid = 0;
1334 t->forced_info = *info;
1336 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1340 static inline bool force_sig_delayed(struct kernel_siginfo *info,
1341 struct task_struct *t)
1348 * Force a signal that the process can't ignore: if necessary
1349 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1351 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1352 * since we do not want to have a signal handler that was blocked
1353 * be invoked when user space had explicitly blocked it.
1355 * We don't want to have recursive SIGSEGV's etc, for example,
1356 * that is why we also clear SIGNAL_UNKILLABLE.
1359 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1360 enum sig_handler handler)
1362 unsigned long int flags;
1363 int ret, blocked, ignored;
1364 struct k_sigaction *action;
1365 int sig = info->si_signo;
1367 if (force_sig_delayed(info, t))
1370 spin_lock_irqsave(&t->sighand->siglock, flags);
1371 action = &t->sighand->action[sig-1];
1372 ignored = action->sa.sa_handler == SIG_IGN;
1373 blocked = sigismember(&t->blocked, sig);
1374 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1375 action->sa.sa_handler = SIG_DFL;
1376 if (handler == HANDLER_EXIT)
1377 action->sa.sa_flags |= SA_IMMUTABLE;
1379 sigdelset(&t->blocked, sig);
1380 recalc_sigpending_and_wake(t);
1384 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1385 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1387 if (action->sa.sa_handler == SIG_DFL &&
1388 (!t->ptrace || (handler == HANDLER_EXIT)))
1389 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1390 ret = send_signal(sig, info, t, PIDTYPE_PID);
1391 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1396 int force_sig_info(struct kernel_siginfo *info)
1398 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1402 * Nuke all other threads in the group.
1404 int zap_other_threads(struct task_struct *p)
1406 struct task_struct *t = p;
1409 p->signal->group_stop_count = 0;
1411 while_each_thread(p, t) {
1412 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1415 /* Don't bother with already dead threads */
1418 sigaddset(&t->pending.signal, SIGKILL);
1419 signal_wake_up(t, 1);
1425 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1426 unsigned long *flags)
1428 struct sighand_struct *sighand;
1432 sighand = rcu_dereference(tsk->sighand);
1433 if (unlikely(sighand == NULL))
1437 * This sighand can be already freed and even reused, but
1438 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1439 * initializes ->siglock: this slab can't go away, it has
1440 * the same object type, ->siglock can't be reinitialized.
1442 * We need to ensure that tsk->sighand is still the same
1443 * after we take the lock, we can race with de_thread() or
1444 * __exit_signal(). In the latter case the next iteration
1445 * must see ->sighand == NULL.
1447 spin_lock_irqsave(&sighand->siglock, *flags);
1448 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1450 spin_unlock_irqrestore(&sighand->siglock, *flags);
1457 #ifdef CONFIG_LOCKDEP
1458 void lockdep_assert_task_sighand_held(struct task_struct *task)
1460 struct sighand_struct *sighand;
1463 sighand = rcu_dereference(task->sighand);
1465 lockdep_assert_held(&sighand->siglock);
1473 * send signal info to all the members of a group
1475 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1476 struct task_struct *p, enum pid_type type)
1481 ret = check_kill_permission(sig, info, p);
1485 ret = do_send_sig_info(sig, info, p, type);
1491 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1492 * control characters do (^C, ^Z etc)
1493 * - the caller must hold at least a readlock on tasklist_lock
1495 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1497 struct task_struct *p = NULL;
1498 int retval, success;
1502 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1503 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1506 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1507 return success ? 0 : retval;
1510 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1513 struct task_struct *p;
1517 p = pid_task(pid, PIDTYPE_PID);
1519 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1521 if (likely(!p || error != -ESRCH))
1525 * The task was unhashed in between, try again. If it
1526 * is dead, pid_task() will return NULL, if we race with
1527 * de_thread() it will find the new leader.
1532 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1536 error = kill_pid_info(sig, info, find_vpid(pid));
1541 static inline bool kill_as_cred_perm(const struct cred *cred,
1542 struct task_struct *target)
1544 const struct cred *pcred = __task_cred(target);
1546 return uid_eq(cred->euid, pcred->suid) ||
1547 uid_eq(cred->euid, pcred->uid) ||
1548 uid_eq(cred->uid, pcred->suid) ||
1549 uid_eq(cred->uid, pcred->uid);
1553 * The usb asyncio usage of siginfo is wrong. The glibc support
1554 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1555 * AKA after the generic fields:
1556 * kernel_pid_t si_pid;
1557 * kernel_uid32_t si_uid;
1558 * sigval_t si_value;
1560 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1561 * after the generic fields is:
1562 * void __user *si_addr;
1564 * This is a practical problem when there is a 64bit big endian kernel
1565 * and a 32bit userspace. As the 32bit address will encoded in the low
1566 * 32bits of the pointer. Those low 32bits will be stored at higher
1567 * address than appear in a 32 bit pointer. So userspace will not
1568 * see the address it was expecting for it's completions.
1570 * There is nothing in the encoding that can allow
1571 * copy_siginfo_to_user32 to detect this confusion of formats, so
1572 * handle this by requiring the caller of kill_pid_usb_asyncio to
1573 * notice when this situration takes place and to store the 32bit
1574 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1577 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1578 struct pid *pid, const struct cred *cred)
1580 struct kernel_siginfo info;
1581 struct task_struct *p;
1582 unsigned long flags;
1585 if (!valid_signal(sig))
1588 clear_siginfo(&info);
1589 info.si_signo = sig;
1590 info.si_errno = errno;
1591 info.si_code = SI_ASYNCIO;
1592 *((sigval_t *)&info.si_pid) = addr;
1595 p = pid_task(pid, PIDTYPE_PID);
1600 if (!kill_as_cred_perm(cred, p)) {
1604 ret = security_task_kill(p, &info, sig, cred);
1609 if (lock_task_sighand(p, &flags)) {
1610 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1611 unlock_task_sighand(p, &flags);
1619 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1622 * kill_something_info() interprets pid in interesting ways just like kill(2).
1624 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1625 * is probably wrong. Should make it like BSD or SYSV.
1628 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1633 return kill_proc_info(sig, info, pid);
1635 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1639 read_lock(&tasklist_lock);
1641 ret = __kill_pgrp_info(sig, info,
1642 pid ? find_vpid(-pid) : task_pgrp(current));
1644 int retval = 0, count = 0;
1645 struct task_struct * p;
1647 for_each_process(p) {
1648 if (task_pid_vnr(p) > 1 &&
1649 !same_thread_group(p, current)) {
1650 int err = group_send_sig_info(sig, info, p,
1657 ret = count ? retval : -ESRCH;
1659 read_unlock(&tasklist_lock);
1665 * These are for backward compatibility with the rest of the kernel source.
1668 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1671 * Make sure legacy kernel users don't send in bad values
1672 * (normal paths check this in check_kill_permission).
1674 if (!valid_signal(sig))
1677 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1679 EXPORT_SYMBOL(send_sig_info);
1681 #define __si_special(priv) \
1682 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1685 send_sig(int sig, struct task_struct *p, int priv)
1687 return send_sig_info(sig, __si_special(priv), p);
1689 EXPORT_SYMBOL(send_sig);
1691 void force_sig(int sig)
1693 struct kernel_siginfo info;
1695 clear_siginfo(&info);
1696 info.si_signo = sig;
1698 info.si_code = SI_KERNEL;
1701 force_sig_info(&info);
1703 EXPORT_SYMBOL(force_sig);
1705 void force_fatal_sig(int sig)
1707 struct kernel_siginfo info;
1709 clear_siginfo(&info);
1710 info.si_signo = sig;
1712 info.si_code = SI_KERNEL;
1715 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1718 void force_exit_sig(int sig)
1720 struct kernel_siginfo info;
1722 clear_siginfo(&info);
1723 info.si_signo = sig;
1725 info.si_code = SI_KERNEL;
1728 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1732 * When things go south during signal handling, we
1733 * will force a SIGSEGV. And if the signal that caused
1734 * the problem was already a SIGSEGV, we'll want to
1735 * make sure we don't even try to deliver the signal..
1737 void force_sigsegv(int sig)
1740 force_fatal_sig(SIGSEGV);
1745 int force_sig_fault_to_task(int sig, int code, void __user *addr
1746 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1747 , struct task_struct *t)
1749 struct kernel_siginfo info;
1751 clear_siginfo(&info);
1752 info.si_signo = sig;
1754 info.si_code = code;
1755 info.si_addr = addr;
1758 info.si_flags = flags;
1761 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1764 int force_sig_fault(int sig, int code, void __user *addr
1765 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1767 return force_sig_fault_to_task(sig, code, addr
1768 ___ARCH_SI_IA64(imm, flags, isr), current);
1771 int send_sig_fault(int sig, int code, void __user *addr
1772 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1773 , struct task_struct *t)
1775 struct kernel_siginfo info;
1777 clear_siginfo(&info);
1778 info.si_signo = sig;
1780 info.si_code = code;
1781 info.si_addr = addr;
1784 info.si_flags = flags;
1787 return send_sig_info(info.si_signo, &info, t);
1790 int force_sig_mceerr(int code, void __user *addr, short lsb)
1792 struct kernel_siginfo info;
1794 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1795 clear_siginfo(&info);
1796 info.si_signo = SIGBUS;
1798 info.si_code = code;
1799 info.si_addr = addr;
1800 info.si_addr_lsb = lsb;
1801 return force_sig_info(&info);
1804 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1806 struct kernel_siginfo info;
1808 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1809 clear_siginfo(&info);
1810 info.si_signo = SIGBUS;
1812 info.si_code = code;
1813 info.si_addr = addr;
1814 info.si_addr_lsb = lsb;
1815 return send_sig_info(info.si_signo, &info, t);
1817 EXPORT_SYMBOL(send_sig_mceerr);
1819 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1821 struct kernel_siginfo info;
1823 clear_siginfo(&info);
1824 info.si_signo = SIGSEGV;
1826 info.si_code = SEGV_BNDERR;
1827 info.si_addr = addr;
1828 info.si_lower = lower;
1829 info.si_upper = upper;
1830 return force_sig_info(&info);
1834 int force_sig_pkuerr(void __user *addr, u32 pkey)
1836 struct kernel_siginfo info;
1838 clear_siginfo(&info);
1839 info.si_signo = SIGSEGV;
1841 info.si_code = SEGV_PKUERR;
1842 info.si_addr = addr;
1843 info.si_pkey = pkey;
1844 return force_sig_info(&info);
1848 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1850 struct kernel_siginfo info;
1852 clear_siginfo(&info);
1853 info.si_signo = SIGTRAP;
1855 info.si_code = TRAP_PERF;
1856 info.si_addr = addr;
1857 info.si_perf_data = sig_data;
1858 info.si_perf_type = type;
1860 return force_sig_info(&info);
1864 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1865 * @syscall: syscall number to send to userland
1866 * @reason: filter-supplied reason code to send to userland (via si_errno)
1867 * @force_coredump: true to trigger a coredump
1869 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1871 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1873 struct kernel_siginfo info;
1875 clear_siginfo(&info);
1876 info.si_signo = SIGSYS;
1877 info.si_code = SYS_SECCOMP;
1878 info.si_call_addr = (void __user *)KSTK_EIP(current);
1879 info.si_errno = reason;
1880 info.si_arch = syscall_get_arch(current);
1881 info.si_syscall = syscall;
1882 return force_sig_info_to_task(&info, current,
1883 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1886 /* For the crazy architectures that include trap information in
1887 * the errno field, instead of an actual errno value.
1889 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1891 struct kernel_siginfo info;
1893 clear_siginfo(&info);
1894 info.si_signo = SIGTRAP;
1895 info.si_errno = errno;
1896 info.si_code = TRAP_HWBKPT;
1897 info.si_addr = addr;
1898 return force_sig_info(&info);
1901 /* For the rare architectures that include trap information using
1904 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1906 struct kernel_siginfo info;
1908 clear_siginfo(&info);
1909 info.si_signo = sig;
1911 info.si_code = code;
1912 info.si_addr = addr;
1913 info.si_trapno = trapno;
1914 return force_sig_info(&info);
1917 /* For the rare architectures that include trap information using
1920 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1921 struct task_struct *t)
1923 struct kernel_siginfo info;
1925 clear_siginfo(&info);
1926 info.si_signo = sig;
1928 info.si_code = code;
1929 info.si_addr = addr;
1930 info.si_trapno = trapno;
1931 return send_sig_info(info.si_signo, &info, t);
1934 int kill_pgrp(struct pid *pid, int sig, int priv)
1938 read_lock(&tasklist_lock);
1939 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1940 read_unlock(&tasklist_lock);
1944 EXPORT_SYMBOL(kill_pgrp);
1946 int kill_pid(struct pid *pid, int sig, int priv)
1948 return kill_pid_info(sig, __si_special(priv), pid);
1950 EXPORT_SYMBOL(kill_pid);
1953 * These functions support sending signals using preallocated sigqueue
1954 * structures. This is needed "because realtime applications cannot
1955 * afford to lose notifications of asynchronous events, like timer
1956 * expirations or I/O completions". In the case of POSIX Timers
1957 * we allocate the sigqueue structure from the timer_create. If this
1958 * allocation fails we are able to report the failure to the application
1959 * with an EAGAIN error.
1961 struct sigqueue *sigqueue_alloc(void)
1963 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1966 void sigqueue_free(struct sigqueue *q)
1968 unsigned long flags;
1969 spinlock_t *lock = ¤t->sighand->siglock;
1971 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1973 * We must hold ->siglock while testing q->list
1974 * to serialize with collect_signal() or with
1975 * __exit_signal()->flush_sigqueue().
1977 spin_lock_irqsave(lock, flags);
1978 q->flags &= ~SIGQUEUE_PREALLOC;
1980 * If it is queued it will be freed when dequeued,
1981 * like the "regular" sigqueue.
1983 if (!list_empty(&q->list))
1985 spin_unlock_irqrestore(lock, flags);
1991 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1993 int sig = q->info.si_signo;
1994 struct sigpending *pending;
1995 struct task_struct *t;
1996 unsigned long flags;
1999 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
2003 t = pid_task(pid, type);
2004 if (!t || !likely(lock_task_sighand(t, &flags)))
2007 ret = 1; /* the signal is ignored */
2008 result = TRACE_SIGNAL_IGNORED;
2009 if (!prepare_signal(sig, t, false))
2013 if (unlikely(!list_empty(&q->list))) {
2015 * If an SI_TIMER entry is already queue just increment
2016 * the overrun count.
2018 BUG_ON(q->info.si_code != SI_TIMER);
2019 q->info.si_overrun++;
2020 result = TRACE_SIGNAL_ALREADY_PENDING;
2023 q->info.si_overrun = 0;
2025 signalfd_notify(t, sig);
2026 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2027 list_add_tail(&q->list, &pending->list);
2028 sigaddset(&pending->signal, sig);
2029 complete_signal(sig, t, type);
2030 result = TRACE_SIGNAL_DELIVERED;
2032 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2033 unlock_task_sighand(t, &flags);
2039 static void do_notify_pidfd(struct task_struct *task)
2043 WARN_ON(task->exit_state == 0);
2044 pid = task_pid(task);
2045 wake_up_all(&pid->wait_pidfd);
2049 * Let a parent know about the death of a child.
2050 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2052 * Returns true if our parent ignored us and so we've switched to
2055 bool do_notify_parent(struct task_struct *tsk, int sig)
2057 struct kernel_siginfo info;
2058 unsigned long flags;
2059 struct sighand_struct *psig;
2060 bool autoreap = false;
2065 /* do_notify_parent_cldstop should have been called instead. */
2066 BUG_ON(task_is_stopped_or_traced(tsk));
2068 BUG_ON(!tsk->ptrace &&
2069 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2071 /* Wake up all pidfd waiters */
2072 do_notify_pidfd(tsk);
2074 if (sig != SIGCHLD) {
2076 * This is only possible if parent == real_parent.
2077 * Check if it has changed security domain.
2079 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2083 clear_siginfo(&info);
2084 info.si_signo = sig;
2087 * We are under tasklist_lock here so our parent is tied to
2088 * us and cannot change.
2090 * task_active_pid_ns will always return the same pid namespace
2091 * until a task passes through release_task.
2093 * write_lock() currently calls preempt_disable() which is the
2094 * same as rcu_read_lock(), but according to Oleg, this is not
2095 * correct to rely on this
2098 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2099 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2103 task_cputime(tsk, &utime, &stime);
2104 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2105 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2107 info.si_status = tsk->exit_code & 0x7f;
2108 if (tsk->exit_code & 0x80)
2109 info.si_code = CLD_DUMPED;
2110 else if (tsk->exit_code & 0x7f)
2111 info.si_code = CLD_KILLED;
2113 info.si_code = CLD_EXITED;
2114 info.si_status = tsk->exit_code >> 8;
2117 psig = tsk->parent->sighand;
2118 spin_lock_irqsave(&psig->siglock, flags);
2119 if (!tsk->ptrace && sig == SIGCHLD &&
2120 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2121 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2123 * We are exiting and our parent doesn't care. POSIX.1
2124 * defines special semantics for setting SIGCHLD to SIG_IGN
2125 * or setting the SA_NOCLDWAIT flag: we should be reaped
2126 * automatically and not left for our parent's wait4 call.
2127 * Rather than having the parent do it as a magic kind of
2128 * signal handler, we just set this to tell do_exit that we
2129 * can be cleaned up without becoming a zombie. Note that
2130 * we still call __wake_up_parent in this case, because a
2131 * blocked sys_wait4 might now return -ECHILD.
2133 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2134 * is implementation-defined: we do (if you don't want
2135 * it, just use SIG_IGN instead).
2138 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2142 * Send with __send_signal as si_pid and si_uid are in the
2143 * parent's namespaces.
2145 if (valid_signal(sig) && sig)
2146 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2147 __wake_up_parent(tsk, tsk->parent);
2148 spin_unlock_irqrestore(&psig->siglock, flags);
2154 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2155 * @tsk: task reporting the state change
2156 * @for_ptracer: the notification is for ptracer
2157 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2159 * Notify @tsk's parent that the stopped/continued state has changed. If
2160 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2161 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2164 * Must be called with tasklist_lock at least read locked.
2166 static void do_notify_parent_cldstop(struct task_struct *tsk,
2167 bool for_ptracer, int why)
2169 struct kernel_siginfo info;
2170 unsigned long flags;
2171 struct task_struct *parent;
2172 struct sighand_struct *sighand;
2176 parent = tsk->parent;
2178 tsk = tsk->group_leader;
2179 parent = tsk->real_parent;
2182 clear_siginfo(&info);
2183 info.si_signo = SIGCHLD;
2186 * see comment in do_notify_parent() about the following 4 lines
2189 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2190 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2193 task_cputime(tsk, &utime, &stime);
2194 info.si_utime = nsec_to_clock_t(utime);
2195 info.si_stime = nsec_to_clock_t(stime);
2200 info.si_status = SIGCONT;
2203 info.si_status = tsk->signal->group_exit_code & 0x7f;
2206 info.si_status = tsk->exit_code & 0x7f;
2212 sighand = parent->sighand;
2213 spin_lock_irqsave(&sighand->siglock, flags);
2214 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2215 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2216 __group_send_sig_info(SIGCHLD, &info, parent);
2218 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2220 __wake_up_parent(tsk, parent);
2221 spin_unlock_irqrestore(&sighand->siglock, flags);
2225 * This must be called with current->sighand->siglock held.
2227 * This should be the path for all ptrace stops.
2228 * We always set current->last_siginfo while stopped here.
2229 * That makes it a way to test a stopped process for
2230 * being ptrace-stopped vs being job-control-stopped.
2232 * Returns the signal the ptracer requested the code resume
2233 * with. If the code did not stop because the tracer is gone,
2234 * the stop signal remains unchanged unless clear_code.
2236 static int ptrace_stop(int exit_code, int why, int clear_code,
2237 unsigned long message, kernel_siginfo_t *info)
2238 __releases(¤t->sighand->siglock)
2239 __acquires(¤t->sighand->siglock)
2241 bool gstop_done = false;
2242 bool read_code = true;
2244 if (arch_ptrace_stop_needed()) {
2246 * The arch code has something special to do before a
2247 * ptrace stop. This is allowed to block, e.g. for faults
2248 * on user stack pages. We can't keep the siglock while
2249 * calling arch_ptrace_stop, so we must release it now.
2250 * To preserve proper semantics, we must do this before
2251 * any signal bookkeeping like checking group_stop_count.
2253 spin_unlock_irq(¤t->sighand->siglock);
2255 spin_lock_irq(¤t->sighand->siglock);
2259 * schedule() will not sleep if there is a pending signal that
2260 * can awaken the task.
2262 set_special_state(TASK_TRACED);
2265 * We're committing to trapping. TRACED should be visible before
2266 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2267 * Also, transition to TRACED and updates to ->jobctl should be
2268 * atomic with respect to siglock and should be done after the arch
2269 * hook as siglock is released and regrabbed across it.
2274 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2276 * set_current_state() smp_wmb();
2278 * wait_task_stopped()
2279 * task_stopped_code()
2280 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2284 current->ptrace_message = message;
2285 current->last_siginfo = info;
2286 current->exit_code = exit_code;
2289 * If @why is CLD_STOPPED, we're trapping to participate in a group
2290 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2291 * across siglock relocks since INTERRUPT was scheduled, PENDING
2292 * could be clear now. We act as if SIGCONT is received after
2293 * TASK_TRACED is entered - ignore it.
2295 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2296 gstop_done = task_participate_group_stop(current);
2298 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2299 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2300 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2301 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2303 /* entering a trap, clear TRAPPING */
2304 task_clear_jobctl_trapping(current);
2306 spin_unlock_irq(¤t->sighand->siglock);
2307 read_lock(&tasklist_lock);
2308 if (likely(current->ptrace)) {
2310 * Notify parents of the stop.
2312 * While ptraced, there are two parents - the ptracer and
2313 * the real_parent of the group_leader. The ptracer should
2314 * know about every stop while the real parent is only
2315 * interested in the completion of group stop. The states
2316 * for the two don't interact with each other. Notify
2317 * separately unless they're gonna be duplicates.
2319 do_notify_parent_cldstop(current, true, why);
2320 if (gstop_done && ptrace_reparented(current))
2321 do_notify_parent_cldstop(current, false, why);
2324 * Don't want to allow preemption here, because
2325 * sys_ptrace() needs this task to be inactive.
2327 * XXX: implement read_unlock_no_resched().
2330 read_unlock(&tasklist_lock);
2331 cgroup_enter_frozen();
2332 preempt_enable_no_resched();
2333 freezable_schedule();
2334 cgroup_leave_frozen(true);
2337 * By the time we got the lock, our tracer went away.
2338 * Don't drop the lock yet, another tracer may come.
2340 * If @gstop_done, the ptracer went away between group stop
2341 * completion and here. During detach, it would have set
2342 * JOBCTL_STOP_PENDING on us and we'll re-enter
2343 * TASK_STOPPED in do_signal_stop() on return, so notifying
2344 * the real parent of the group stop completion is enough.
2347 do_notify_parent_cldstop(current, false, why);
2349 /* tasklist protects us from ptrace_freeze_traced() */
2350 __set_current_state(TASK_RUNNING);
2354 read_unlock(&tasklist_lock);
2358 * We are back. Now reacquire the siglock before touching
2359 * last_siginfo, so that we are sure to have synchronized with
2360 * any signal-sending on another CPU that wants to examine it.
2362 spin_lock_irq(¤t->sighand->siglock);
2364 exit_code = current->exit_code;
2365 current->last_siginfo = NULL;
2366 current->ptrace_message = 0;
2367 current->exit_code = 0;
2369 /* LISTENING can be set only during STOP traps, clear it */
2370 current->jobctl &= ~JOBCTL_LISTENING;
2373 * Queued signals ignored us while we were stopped for tracing.
2374 * So check for any that we should take before resuming user mode.
2375 * This sets TIF_SIGPENDING, but never clears it.
2377 recalc_sigpending_tsk(current);
2381 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2383 kernel_siginfo_t info;
2385 clear_siginfo(&info);
2386 info.si_signo = signr;
2387 info.si_code = exit_code;
2388 info.si_pid = task_pid_vnr(current);
2389 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2391 /* Let the debugger run. */
2392 return ptrace_stop(exit_code, why, 1, message, &info);
2395 int ptrace_notify(int exit_code, unsigned long message)
2399 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2400 if (unlikely(task_work_pending(current)))
2403 spin_lock_irq(¤t->sighand->siglock);
2404 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2405 spin_unlock_irq(¤t->sighand->siglock);
2410 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2411 * @signr: signr causing group stop if initiating
2413 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2414 * and participate in it. If already set, participate in the existing
2415 * group stop. If participated in a group stop (and thus slept), %true is
2416 * returned with siglock released.
2418 * If ptraced, this function doesn't handle stop itself. Instead,
2419 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2420 * untouched. The caller must ensure that INTERRUPT trap handling takes
2421 * places afterwards.
2424 * Must be called with @current->sighand->siglock held, which is released
2428 * %false if group stop is already cancelled or ptrace trap is scheduled.
2429 * %true if participated in group stop.
2431 static bool do_signal_stop(int signr)
2432 __releases(¤t->sighand->siglock)
2434 struct signal_struct *sig = current->signal;
2436 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2437 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2438 struct task_struct *t;
2440 /* signr will be recorded in task->jobctl for retries */
2441 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2443 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2444 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2445 unlikely(sig->group_exec_task))
2448 * There is no group stop already in progress. We must
2451 * While ptraced, a task may be resumed while group stop is
2452 * still in effect and then receive a stop signal and
2453 * initiate another group stop. This deviates from the
2454 * usual behavior as two consecutive stop signals can't
2455 * cause two group stops when !ptraced. That is why we
2456 * also check !task_is_stopped(t) below.
2458 * The condition can be distinguished by testing whether
2459 * SIGNAL_STOP_STOPPED is already set. Don't generate
2460 * group_exit_code in such case.
2462 * This is not necessary for SIGNAL_STOP_CONTINUED because
2463 * an intervening stop signal is required to cause two
2464 * continued events regardless of ptrace.
2466 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2467 sig->group_exit_code = signr;
2469 sig->group_stop_count = 0;
2471 if (task_set_jobctl_pending(current, signr | gstop))
2472 sig->group_stop_count++;
2475 while_each_thread(current, t) {
2477 * Setting state to TASK_STOPPED for a group
2478 * stop is always done with the siglock held,
2479 * so this check has no races.
2481 if (!task_is_stopped(t) &&
2482 task_set_jobctl_pending(t, signr | gstop)) {
2483 sig->group_stop_count++;
2484 if (likely(!(t->ptrace & PT_SEIZED)))
2485 signal_wake_up(t, 0);
2487 ptrace_trap_notify(t);
2492 if (likely(!current->ptrace)) {
2496 * If there are no other threads in the group, or if there
2497 * is a group stop in progress and we are the last to stop,
2498 * report to the parent.
2500 if (task_participate_group_stop(current))
2501 notify = CLD_STOPPED;
2503 set_special_state(TASK_STOPPED);
2504 spin_unlock_irq(¤t->sighand->siglock);
2507 * Notify the parent of the group stop completion. Because
2508 * we're not holding either the siglock or tasklist_lock
2509 * here, ptracer may attach inbetween; however, this is for
2510 * group stop and should always be delivered to the real
2511 * parent of the group leader. The new ptracer will get
2512 * its notification when this task transitions into
2516 read_lock(&tasklist_lock);
2517 do_notify_parent_cldstop(current, false, notify);
2518 read_unlock(&tasklist_lock);
2521 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2522 cgroup_enter_frozen();
2523 freezable_schedule();
2527 * While ptraced, group stop is handled by STOP trap.
2528 * Schedule it and let the caller deal with it.
2530 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2536 * do_jobctl_trap - take care of ptrace jobctl traps
2538 * When PT_SEIZED, it's used for both group stop and explicit
2539 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2540 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2541 * the stop signal; otherwise, %SIGTRAP.
2543 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2544 * number as exit_code and no siginfo.
2547 * Must be called with @current->sighand->siglock held, which may be
2548 * released and re-acquired before returning with intervening sleep.
2550 static void do_jobctl_trap(void)
2552 struct signal_struct *signal = current->signal;
2553 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2555 if (current->ptrace & PT_SEIZED) {
2556 if (!signal->group_stop_count &&
2557 !(signal->flags & SIGNAL_STOP_STOPPED))
2559 WARN_ON_ONCE(!signr);
2560 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2563 WARN_ON_ONCE(!signr);
2564 ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL);
2569 * do_freezer_trap - handle the freezer jobctl trap
2571 * Puts the task into frozen state, if only the task is not about to quit.
2572 * In this case it drops JOBCTL_TRAP_FREEZE.
2575 * Must be called with @current->sighand->siglock held,
2576 * which is always released before returning.
2578 static void do_freezer_trap(void)
2579 __releases(¤t->sighand->siglock)
2582 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2583 * let's make another loop to give it a chance to be handled.
2584 * In any case, we'll return back.
2586 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2587 JOBCTL_TRAP_FREEZE) {
2588 spin_unlock_irq(¤t->sighand->siglock);
2593 * Now we're sure that there is no pending fatal signal and no
2594 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2595 * immediately (if there is a non-fatal signal pending), and
2596 * put the task into sleep.
2598 __set_current_state(TASK_INTERRUPTIBLE);
2599 clear_thread_flag(TIF_SIGPENDING);
2600 spin_unlock_irq(¤t->sighand->siglock);
2601 cgroup_enter_frozen();
2602 freezable_schedule();
2605 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2608 * We do not check sig_kernel_stop(signr) but set this marker
2609 * unconditionally because we do not know whether debugger will
2610 * change signr. This flag has no meaning unless we are going
2611 * to stop after return from ptrace_stop(). In this case it will
2612 * be checked in do_signal_stop(), we should only stop if it was
2613 * not cleared by SIGCONT while we were sleeping. See also the
2614 * comment in dequeue_signal().
2616 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2617 signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info);
2619 /* We're back. Did the debugger cancel the sig? */
2624 * Update the siginfo structure if the signal has
2625 * changed. If the debugger wanted something
2626 * specific in the siginfo structure then it should
2627 * have updated *info via PTRACE_SETSIGINFO.
2629 if (signr != info->si_signo) {
2630 clear_siginfo(info);
2631 info->si_signo = signr;
2633 info->si_code = SI_USER;
2635 info->si_pid = task_pid_vnr(current->parent);
2636 info->si_uid = from_kuid_munged(current_user_ns(),
2637 task_uid(current->parent));
2641 /* If the (new) signal is now blocked, requeue it. */
2642 if (sigismember(¤t->blocked, signr) ||
2643 fatal_signal_pending(current)) {
2644 send_signal(signr, info, current, type);
2651 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2653 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2655 case SIL_FAULT_TRAPNO:
2656 case SIL_FAULT_MCEERR:
2657 case SIL_FAULT_BNDERR:
2658 case SIL_FAULT_PKUERR:
2659 case SIL_FAULT_PERF_EVENT:
2660 ksig->info.si_addr = arch_untagged_si_addr(
2661 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2673 bool get_signal(struct ksignal *ksig)
2675 struct sighand_struct *sighand = current->sighand;
2676 struct signal_struct *signal = current->signal;
2679 clear_notify_signal();
2680 if (unlikely(task_work_pending(current)))
2683 if (!task_sigpending(current))
2686 if (unlikely(uprobe_deny_signal()))
2690 * Do this once, we can't return to user-mode if freezing() == T.
2691 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2692 * thus do not need another check after return.
2697 spin_lock_irq(&sighand->siglock);
2700 * Every stopped thread goes here after wakeup. Check to see if
2701 * we should notify the parent, prepare_signal(SIGCONT) encodes
2702 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2704 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2707 if (signal->flags & SIGNAL_CLD_CONTINUED)
2708 why = CLD_CONTINUED;
2712 signal->flags &= ~SIGNAL_CLD_MASK;
2714 spin_unlock_irq(&sighand->siglock);
2717 * Notify the parent that we're continuing. This event is
2718 * always per-process and doesn't make whole lot of sense
2719 * for ptracers, who shouldn't consume the state via
2720 * wait(2) either, but, for backward compatibility, notify
2721 * the ptracer of the group leader too unless it's gonna be
2724 read_lock(&tasklist_lock);
2725 do_notify_parent_cldstop(current, false, why);
2727 if (ptrace_reparented(current->group_leader))
2728 do_notify_parent_cldstop(current->group_leader,
2730 read_unlock(&tasklist_lock);
2736 struct k_sigaction *ka;
2739 /* Has this task already been marked for death? */
2740 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2741 signal->group_exec_task) {
2742 ksig->info.si_signo = signr = SIGKILL;
2743 sigdelset(¤t->pending.signal, SIGKILL);
2744 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2745 &sighand->action[SIGKILL - 1]);
2746 recalc_sigpending();
2750 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2754 if (unlikely(current->jobctl &
2755 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2756 if (current->jobctl & JOBCTL_TRAP_MASK) {
2758 spin_unlock_irq(&sighand->siglock);
2759 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2766 * If the task is leaving the frozen state, let's update
2767 * cgroup counters and reset the frozen bit.
2769 if (unlikely(cgroup_task_frozen(current))) {
2770 spin_unlock_irq(&sighand->siglock);
2771 cgroup_leave_frozen(false);
2776 * Signals generated by the execution of an instruction
2777 * need to be delivered before any other pending signals
2778 * so that the instruction pointer in the signal stack
2779 * frame points to the faulting instruction.
2782 signr = dequeue_synchronous_signal(&ksig->info);
2784 signr = dequeue_signal(current, ¤t->blocked,
2785 &ksig->info, &type);
2788 break; /* will return 0 */
2790 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2791 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2792 signr = ptrace_signal(signr, &ksig->info, type);
2797 ka = &sighand->action[signr-1];
2799 /* Trace actually delivered signals. */
2800 trace_signal_deliver(signr, &ksig->info, ka);
2802 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2804 if (ka->sa.sa_handler != SIG_DFL) {
2805 /* Run the handler. */
2808 if (ka->sa.sa_flags & SA_ONESHOT)
2809 ka->sa.sa_handler = SIG_DFL;
2811 break; /* will return non-zero "signr" value */
2815 * Now we are doing the default action for this signal.
2817 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2821 * Global init gets no signals it doesn't want.
2822 * Container-init gets no signals it doesn't want from same
2825 * Note that if global/container-init sees a sig_kernel_only()
2826 * signal here, the signal must have been generated internally
2827 * or must have come from an ancestor namespace. In either
2828 * case, the signal cannot be dropped.
2830 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2831 !sig_kernel_only(signr))
2834 if (sig_kernel_stop(signr)) {
2836 * The default action is to stop all threads in
2837 * the thread group. The job control signals
2838 * do nothing in an orphaned pgrp, but SIGSTOP
2839 * always works. Note that siglock needs to be
2840 * dropped during the call to is_orphaned_pgrp()
2841 * because of lock ordering with tasklist_lock.
2842 * This allows an intervening SIGCONT to be posted.
2843 * We need to check for that and bail out if necessary.
2845 if (signr != SIGSTOP) {
2846 spin_unlock_irq(&sighand->siglock);
2848 /* signals can be posted during this window */
2850 if (is_current_pgrp_orphaned())
2853 spin_lock_irq(&sighand->siglock);
2856 if (likely(do_signal_stop(ksig->info.si_signo))) {
2857 /* It released the siglock. */
2862 * We didn't actually stop, due to a race
2863 * with SIGCONT or something like that.
2869 spin_unlock_irq(&sighand->siglock);
2870 if (unlikely(cgroup_task_frozen(current)))
2871 cgroup_leave_frozen(true);
2874 * Anything else is fatal, maybe with a core dump.
2876 current->flags |= PF_SIGNALED;
2878 if (sig_kernel_coredump(signr)) {
2879 if (print_fatal_signals)
2880 print_fatal_signal(ksig->info.si_signo);
2881 proc_coredump_connector(current);
2883 * If it was able to dump core, this kills all
2884 * other threads in the group and synchronizes with
2885 * their demise. If we lost the race with another
2886 * thread getting here, it set group_exit_code
2887 * first and our do_group_exit call below will use
2888 * that value and ignore the one we pass it.
2890 do_coredump(&ksig->info);
2894 * PF_IO_WORKER threads will catch and exit on fatal signals
2895 * themselves. They have cleanup that must be performed, so
2896 * we cannot call do_exit() on their behalf.
2898 if (current->flags & PF_IO_WORKER)
2902 * Death signals, no core dump.
2904 do_group_exit(ksig->info.si_signo);
2907 spin_unlock_irq(&sighand->siglock);
2911 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2912 hide_si_addr_tag_bits(ksig);
2914 return ksig->sig > 0;
2918 * signal_delivered - called after signal delivery to update blocked signals
2919 * @ksig: kernel signal struct
2920 * @stepping: nonzero if debugger single-step or block-step in use
2922 * This function should be called when a signal has successfully been
2923 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2924 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2925 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2927 static void signal_delivered(struct ksignal *ksig, int stepping)
2931 /* A signal was successfully delivered, and the
2932 saved sigmask was stored on the signal frame,
2933 and will be restored by sigreturn. So we can
2934 simply clear the restore sigmask flag. */
2935 clear_restore_sigmask();
2937 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2938 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2939 sigaddset(&blocked, ksig->sig);
2940 set_current_blocked(&blocked);
2941 if (current->sas_ss_flags & SS_AUTODISARM)
2942 sas_ss_reset(current);
2944 ptrace_notify(SIGTRAP, 0);
2947 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2950 force_sigsegv(ksig->sig);
2952 signal_delivered(ksig, stepping);
2956 * It could be that complete_signal() picked us to notify about the
2957 * group-wide signal. Other threads should be notified now to take
2958 * the shared signals in @which since we will not.
2960 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2963 struct task_struct *t;
2965 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2966 if (sigisemptyset(&retarget))
2970 while_each_thread(tsk, t) {
2971 if (t->flags & PF_EXITING)
2974 if (!has_pending_signals(&retarget, &t->blocked))
2976 /* Remove the signals this thread can handle. */
2977 sigandsets(&retarget, &retarget, &t->blocked);
2979 if (!task_sigpending(t))
2980 signal_wake_up(t, 0);
2982 if (sigisemptyset(&retarget))
2987 void exit_signals(struct task_struct *tsk)
2993 * @tsk is about to have PF_EXITING set - lock out users which
2994 * expect stable threadgroup.
2996 cgroup_threadgroup_change_begin(tsk);
2998 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2999 tsk->flags |= PF_EXITING;
3000 cgroup_threadgroup_change_end(tsk);
3004 spin_lock_irq(&tsk->sighand->siglock);
3006 * From now this task is not visible for group-wide signals,
3007 * see wants_signal(), do_signal_stop().
3009 tsk->flags |= PF_EXITING;
3011 cgroup_threadgroup_change_end(tsk);
3013 if (!task_sigpending(tsk))
3016 unblocked = tsk->blocked;
3017 signotset(&unblocked);
3018 retarget_shared_pending(tsk, &unblocked);
3020 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3021 task_participate_group_stop(tsk))
3022 group_stop = CLD_STOPPED;
3024 spin_unlock_irq(&tsk->sighand->siglock);
3027 * If group stop has completed, deliver the notification. This
3028 * should always go to the real parent of the group leader.
3030 if (unlikely(group_stop)) {
3031 read_lock(&tasklist_lock);
3032 do_notify_parent_cldstop(tsk, false, group_stop);
3033 read_unlock(&tasklist_lock);
3038 * System call entry points.
3042 * sys_restart_syscall - restart a system call
3044 SYSCALL_DEFINE0(restart_syscall)
3046 struct restart_block *restart = ¤t->restart_block;
3047 return restart->fn(restart);
3050 long do_no_restart_syscall(struct restart_block *param)
3055 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3057 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3058 sigset_t newblocked;
3059 /* A set of now blocked but previously unblocked signals. */
3060 sigandnsets(&newblocked, newset, ¤t->blocked);
3061 retarget_shared_pending(tsk, &newblocked);
3063 tsk->blocked = *newset;
3064 recalc_sigpending();
3068 * set_current_blocked - change current->blocked mask
3071 * It is wrong to change ->blocked directly, this helper should be used
3072 * to ensure the process can't miss a shared signal we are going to block.
3074 void set_current_blocked(sigset_t *newset)
3076 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3077 __set_current_blocked(newset);
3080 void __set_current_blocked(const sigset_t *newset)
3082 struct task_struct *tsk = current;
3085 * In case the signal mask hasn't changed, there is nothing we need
3086 * to do. The current->blocked shouldn't be modified by other task.
3088 if (sigequalsets(&tsk->blocked, newset))
3091 spin_lock_irq(&tsk->sighand->siglock);
3092 __set_task_blocked(tsk, newset);
3093 spin_unlock_irq(&tsk->sighand->siglock);
3097 * This is also useful for kernel threads that want to temporarily
3098 * (or permanently) block certain signals.
3100 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3101 * interface happily blocks "unblockable" signals like SIGKILL
3104 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3106 struct task_struct *tsk = current;
3109 /* Lockless, only current can change ->blocked, never from irq */
3111 *oldset = tsk->blocked;
3115 sigorsets(&newset, &tsk->blocked, set);
3118 sigandnsets(&newset, &tsk->blocked, set);
3127 __set_current_blocked(&newset);
3130 EXPORT_SYMBOL(sigprocmask);
3133 * The api helps set app-provided sigmasks.
3135 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3136 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3138 * Note that it does set_restore_sigmask() in advance, so it must be always
3139 * paired with restore_saved_sigmask_unless() before return from syscall.
3141 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3147 if (sigsetsize != sizeof(sigset_t))
3149 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3152 set_restore_sigmask();
3153 current->saved_sigmask = current->blocked;
3154 set_current_blocked(&kmask);
3159 #ifdef CONFIG_COMPAT
3160 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3167 if (sigsetsize != sizeof(compat_sigset_t))
3169 if (get_compat_sigset(&kmask, umask))
3172 set_restore_sigmask();
3173 current->saved_sigmask = current->blocked;
3174 set_current_blocked(&kmask);
3181 * sys_rt_sigprocmask - change the list of currently blocked signals
3182 * @how: whether to add, remove, or set signals
3183 * @nset: stores pending signals
3184 * @oset: previous value of signal mask if non-null
3185 * @sigsetsize: size of sigset_t type
3187 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3188 sigset_t __user *, oset, size_t, sigsetsize)
3190 sigset_t old_set, new_set;
3193 /* XXX: Don't preclude handling different sized sigset_t's. */
3194 if (sigsetsize != sizeof(sigset_t))
3197 old_set = current->blocked;
3200 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3202 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3204 error = sigprocmask(how, &new_set, NULL);
3210 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3217 #ifdef CONFIG_COMPAT
3218 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3219 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3221 sigset_t old_set = current->blocked;
3223 /* XXX: Don't preclude handling different sized sigset_t's. */
3224 if (sigsetsize != sizeof(sigset_t))
3230 if (get_compat_sigset(&new_set, nset))
3232 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3234 error = sigprocmask(how, &new_set, NULL);
3238 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3242 static void do_sigpending(sigset_t *set)
3244 spin_lock_irq(¤t->sighand->siglock);
3245 sigorsets(set, ¤t->pending.signal,
3246 ¤t->signal->shared_pending.signal);
3247 spin_unlock_irq(¤t->sighand->siglock);
3249 /* Outside the lock because only this thread touches it. */
3250 sigandsets(set, ¤t->blocked, set);
3254 * sys_rt_sigpending - examine a pending signal that has been raised
3256 * @uset: stores pending signals
3257 * @sigsetsize: size of sigset_t type or larger
3259 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3263 if (sigsetsize > sizeof(*uset))
3266 do_sigpending(&set);
3268 if (copy_to_user(uset, &set, sigsetsize))
3274 #ifdef CONFIG_COMPAT
3275 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3276 compat_size_t, sigsetsize)
3280 if (sigsetsize > sizeof(*uset))
3283 do_sigpending(&set);
3285 return put_compat_sigset(uset, &set, sigsetsize);
3289 static const struct {
3290 unsigned char limit, layout;
3292 [SIGILL] = { NSIGILL, SIL_FAULT },
3293 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3294 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3295 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3296 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3298 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3300 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3301 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3302 [SIGSYS] = { NSIGSYS, SIL_SYS },
3305 static bool known_siginfo_layout(unsigned sig, int si_code)
3307 if (si_code == SI_KERNEL)
3309 else if ((si_code > SI_USER)) {
3310 if (sig_specific_sicodes(sig)) {
3311 if (si_code <= sig_sicodes[sig].limit)
3314 else if (si_code <= NSIGPOLL)
3317 else if (si_code >= SI_DETHREAD)
3319 else if (si_code == SI_ASYNCNL)
3324 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3326 enum siginfo_layout layout = SIL_KILL;
3327 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3328 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3329 (si_code <= sig_sicodes[sig].limit)) {
3330 layout = sig_sicodes[sig].layout;
3331 /* Handle the exceptions */
3332 if ((sig == SIGBUS) &&
3333 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3334 layout = SIL_FAULT_MCEERR;
3335 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3336 layout = SIL_FAULT_BNDERR;
3338 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3339 layout = SIL_FAULT_PKUERR;
3341 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3342 layout = SIL_FAULT_PERF_EVENT;
3343 else if (IS_ENABLED(CONFIG_SPARC) &&
3344 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3345 layout = SIL_FAULT_TRAPNO;
3346 else if (IS_ENABLED(CONFIG_ALPHA) &&
3348 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3349 layout = SIL_FAULT_TRAPNO;
3351 else if (si_code <= NSIGPOLL)
3354 if (si_code == SI_TIMER)
3356 else if (si_code == SI_SIGIO)
3358 else if (si_code < 0)
3364 static inline char __user *si_expansion(const siginfo_t __user *info)
3366 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3369 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3371 char __user *expansion = si_expansion(to);
3372 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3374 if (clear_user(expansion, SI_EXPANSION_SIZE))
3379 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3380 const siginfo_t __user *from)
3382 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3383 char __user *expansion = si_expansion(from);
3384 char buf[SI_EXPANSION_SIZE];
3387 * An unknown si_code might need more than
3388 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3389 * extra bytes are 0. This guarantees copy_siginfo_to_user
3390 * will return this data to userspace exactly.
3392 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3394 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3402 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3403 const siginfo_t __user *from)
3405 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3407 to->si_signo = signo;
3408 return post_copy_siginfo_from_user(to, from);
3411 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3413 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3415 return post_copy_siginfo_from_user(to, from);
3418 #ifdef CONFIG_COMPAT
3420 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3421 * @to: compat siginfo destination
3422 * @from: kernel siginfo source
3424 * Note: This function does not work properly for the SIGCHLD on x32, but
3425 * fortunately it doesn't have to. The only valid callers for this function are
3426 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3427 * The latter does not care because SIGCHLD will never cause a coredump.
3429 void copy_siginfo_to_external32(struct compat_siginfo *to,
3430 const struct kernel_siginfo *from)
3432 memset(to, 0, sizeof(*to));
3434 to->si_signo = from->si_signo;
3435 to->si_errno = from->si_errno;
3436 to->si_code = from->si_code;
3437 switch(siginfo_layout(from->si_signo, from->si_code)) {
3439 to->si_pid = from->si_pid;
3440 to->si_uid = from->si_uid;
3443 to->si_tid = from->si_tid;
3444 to->si_overrun = from->si_overrun;
3445 to->si_int = from->si_int;
3448 to->si_band = from->si_band;
3449 to->si_fd = from->si_fd;
3452 to->si_addr = ptr_to_compat(from->si_addr);
3454 case SIL_FAULT_TRAPNO:
3455 to->si_addr = ptr_to_compat(from->si_addr);
3456 to->si_trapno = from->si_trapno;
3458 case SIL_FAULT_MCEERR:
3459 to->si_addr = ptr_to_compat(from->si_addr);
3460 to->si_addr_lsb = from->si_addr_lsb;
3462 case SIL_FAULT_BNDERR:
3463 to->si_addr = ptr_to_compat(from->si_addr);
3464 to->si_lower = ptr_to_compat(from->si_lower);
3465 to->si_upper = ptr_to_compat(from->si_upper);
3467 case SIL_FAULT_PKUERR:
3468 to->si_addr = ptr_to_compat(from->si_addr);
3469 to->si_pkey = from->si_pkey;
3471 case SIL_FAULT_PERF_EVENT:
3472 to->si_addr = ptr_to_compat(from->si_addr);
3473 to->si_perf_data = from->si_perf_data;
3474 to->si_perf_type = from->si_perf_type;
3477 to->si_pid = from->si_pid;
3478 to->si_uid = from->si_uid;
3479 to->si_status = from->si_status;
3480 to->si_utime = from->si_utime;
3481 to->si_stime = from->si_stime;
3484 to->si_pid = from->si_pid;
3485 to->si_uid = from->si_uid;
3486 to->si_int = from->si_int;
3489 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3490 to->si_syscall = from->si_syscall;
3491 to->si_arch = from->si_arch;
3496 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3497 const struct kernel_siginfo *from)
3499 struct compat_siginfo new;
3501 copy_siginfo_to_external32(&new, from);
3502 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3507 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3508 const struct compat_siginfo *from)
3511 to->si_signo = from->si_signo;
3512 to->si_errno = from->si_errno;
3513 to->si_code = from->si_code;
3514 switch(siginfo_layout(from->si_signo, from->si_code)) {
3516 to->si_pid = from->si_pid;
3517 to->si_uid = from->si_uid;
3520 to->si_tid = from->si_tid;
3521 to->si_overrun = from->si_overrun;
3522 to->si_int = from->si_int;
3525 to->si_band = from->si_band;
3526 to->si_fd = from->si_fd;
3529 to->si_addr = compat_ptr(from->si_addr);
3531 case SIL_FAULT_TRAPNO:
3532 to->si_addr = compat_ptr(from->si_addr);
3533 to->si_trapno = from->si_trapno;
3535 case SIL_FAULT_MCEERR:
3536 to->si_addr = compat_ptr(from->si_addr);
3537 to->si_addr_lsb = from->si_addr_lsb;
3539 case SIL_FAULT_BNDERR:
3540 to->si_addr = compat_ptr(from->si_addr);
3541 to->si_lower = compat_ptr(from->si_lower);
3542 to->si_upper = compat_ptr(from->si_upper);
3544 case SIL_FAULT_PKUERR:
3545 to->si_addr = compat_ptr(from->si_addr);
3546 to->si_pkey = from->si_pkey;
3548 case SIL_FAULT_PERF_EVENT:
3549 to->si_addr = compat_ptr(from->si_addr);
3550 to->si_perf_data = from->si_perf_data;
3551 to->si_perf_type = from->si_perf_type;
3554 to->si_pid = from->si_pid;
3555 to->si_uid = from->si_uid;
3556 to->si_status = from->si_status;
3557 #ifdef CONFIG_X86_X32_ABI
3558 if (in_x32_syscall()) {
3559 to->si_utime = from->_sifields._sigchld_x32._utime;
3560 to->si_stime = from->_sifields._sigchld_x32._stime;
3564 to->si_utime = from->si_utime;
3565 to->si_stime = from->si_stime;
3569 to->si_pid = from->si_pid;
3570 to->si_uid = from->si_uid;
3571 to->si_int = from->si_int;
3574 to->si_call_addr = compat_ptr(from->si_call_addr);
3575 to->si_syscall = from->si_syscall;
3576 to->si_arch = from->si_arch;
3582 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3583 const struct compat_siginfo __user *ufrom)
3585 struct compat_siginfo from;
3587 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3590 from.si_signo = signo;
3591 return post_copy_siginfo_from_user32(to, &from);
3594 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3595 const struct compat_siginfo __user *ufrom)
3597 struct compat_siginfo from;
3599 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3602 return post_copy_siginfo_from_user32(to, &from);
3604 #endif /* CONFIG_COMPAT */
3607 * do_sigtimedwait - wait for queued signals specified in @which
3608 * @which: queued signals to wait for
3609 * @info: if non-null, the signal's siginfo is returned here
3610 * @ts: upper bound on process time suspension
3612 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3613 const struct timespec64 *ts)
3615 ktime_t *to = NULL, timeout = KTIME_MAX;
3616 struct task_struct *tsk = current;
3617 sigset_t mask = *which;
3622 if (!timespec64_valid(ts))
3624 timeout = timespec64_to_ktime(*ts);
3629 * Invert the set of allowed signals to get those we want to block.
3631 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3634 spin_lock_irq(&tsk->sighand->siglock);
3635 sig = dequeue_signal(tsk, &mask, info, &type);
3636 if (!sig && timeout) {
3638 * None ready, temporarily unblock those we're interested
3639 * while we are sleeping in so that we'll be awakened when
3640 * they arrive. Unblocking is always fine, we can avoid
3641 * set_current_blocked().
3643 tsk->real_blocked = tsk->blocked;
3644 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3645 recalc_sigpending();
3646 spin_unlock_irq(&tsk->sighand->siglock);
3648 __set_current_state(TASK_INTERRUPTIBLE);
3649 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3651 spin_lock_irq(&tsk->sighand->siglock);
3652 __set_task_blocked(tsk, &tsk->real_blocked);
3653 sigemptyset(&tsk->real_blocked);
3654 sig = dequeue_signal(tsk, &mask, info, &type);
3656 spin_unlock_irq(&tsk->sighand->siglock);
3660 return ret ? -EINTR : -EAGAIN;
3664 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3666 * @uthese: queued signals to wait for
3667 * @uinfo: if non-null, the signal's siginfo is returned here
3668 * @uts: upper bound on process time suspension
3669 * @sigsetsize: size of sigset_t type
3671 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3672 siginfo_t __user *, uinfo,
3673 const struct __kernel_timespec __user *, uts,
3677 struct timespec64 ts;
3678 kernel_siginfo_t info;
3681 /* XXX: Don't preclude handling different sized sigset_t's. */
3682 if (sigsetsize != sizeof(sigset_t))
3685 if (copy_from_user(&these, uthese, sizeof(these)))
3689 if (get_timespec64(&ts, uts))
3693 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3695 if (ret > 0 && uinfo) {
3696 if (copy_siginfo_to_user(uinfo, &info))
3703 #ifdef CONFIG_COMPAT_32BIT_TIME
3704 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3705 siginfo_t __user *, uinfo,
3706 const struct old_timespec32 __user *, uts,
3710 struct timespec64 ts;
3711 kernel_siginfo_t info;
3714 if (sigsetsize != sizeof(sigset_t))
3717 if (copy_from_user(&these, uthese, sizeof(these)))
3721 if (get_old_timespec32(&ts, uts))
3725 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3727 if (ret > 0 && uinfo) {
3728 if (copy_siginfo_to_user(uinfo, &info))
3736 #ifdef CONFIG_COMPAT
3737 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3738 struct compat_siginfo __user *, uinfo,
3739 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3742 struct timespec64 t;
3743 kernel_siginfo_t info;
3746 if (sigsetsize != sizeof(sigset_t))
3749 if (get_compat_sigset(&s, uthese))
3753 if (get_timespec64(&t, uts))
3757 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3759 if (ret > 0 && uinfo) {
3760 if (copy_siginfo_to_user32(uinfo, &info))
3767 #ifdef CONFIG_COMPAT_32BIT_TIME
3768 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3769 struct compat_siginfo __user *, uinfo,
3770 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3773 struct timespec64 t;
3774 kernel_siginfo_t info;
3777 if (sigsetsize != sizeof(sigset_t))
3780 if (get_compat_sigset(&s, uthese))
3784 if (get_old_timespec32(&t, uts))
3788 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3790 if (ret > 0 && uinfo) {
3791 if (copy_siginfo_to_user32(uinfo, &info))
3800 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3802 clear_siginfo(info);
3803 info->si_signo = sig;
3805 info->si_code = SI_USER;
3806 info->si_pid = task_tgid_vnr(current);
3807 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3811 * sys_kill - send a signal to a process
3812 * @pid: the PID of the process
3813 * @sig: signal to be sent
3815 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3817 struct kernel_siginfo info;
3819 prepare_kill_siginfo(sig, &info);
3821 return kill_something_info(sig, &info, pid);
3825 * Verify that the signaler and signalee either are in the same pid namespace
3826 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3829 static bool access_pidfd_pidns(struct pid *pid)
3831 struct pid_namespace *active = task_active_pid_ns(current);
3832 struct pid_namespace *p = ns_of_pid(pid);
3845 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3846 siginfo_t __user *info)
3848 #ifdef CONFIG_COMPAT
3850 * Avoid hooking up compat syscalls and instead handle necessary
3851 * conversions here. Note, this is a stop-gap measure and should not be
3852 * considered a generic solution.
3854 if (in_compat_syscall())
3855 return copy_siginfo_from_user32(
3856 kinfo, (struct compat_siginfo __user *)info);
3858 return copy_siginfo_from_user(kinfo, info);
3861 static struct pid *pidfd_to_pid(const struct file *file)
3865 pid = pidfd_pid(file);
3869 return tgid_pidfd_to_pid(file);
3873 * sys_pidfd_send_signal - Signal a process through a pidfd
3874 * @pidfd: file descriptor of the process
3875 * @sig: signal to send
3876 * @info: signal info
3877 * @flags: future flags
3879 * The syscall currently only signals via PIDTYPE_PID which covers
3880 * kill(<positive-pid>, <signal>. It does not signal threads or process
3882 * In order to extend the syscall to threads and process groups the @flags
3883 * argument should be used. In essence, the @flags argument will determine
3884 * what is signaled and not the file descriptor itself. Put in other words,
3885 * grouping is a property of the flags argument not a property of the file
3888 * Return: 0 on success, negative errno on failure
3890 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3891 siginfo_t __user *, info, unsigned int, flags)
3896 kernel_siginfo_t kinfo;
3898 /* Enforce flags be set to 0 until we add an extension. */
3906 /* Is this a pidfd? */
3907 pid = pidfd_to_pid(f.file);
3914 if (!access_pidfd_pidns(pid))
3918 ret = copy_siginfo_from_user_any(&kinfo, info);
3923 if (unlikely(sig != kinfo.si_signo))
3926 /* Only allow sending arbitrary signals to yourself. */
3928 if ((task_pid(current) != pid) &&
3929 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3932 prepare_kill_siginfo(sig, &kinfo);
3935 ret = kill_pid_info(sig, &kinfo, pid);
3943 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3945 struct task_struct *p;
3949 p = find_task_by_vpid(pid);
3950 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3951 error = check_kill_permission(sig, info, p);
3953 * The null signal is a permissions and process existence
3954 * probe. No signal is actually delivered.
3956 if (!error && sig) {
3957 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3959 * If lock_task_sighand() failed we pretend the task
3960 * dies after receiving the signal. The window is tiny,
3961 * and the signal is private anyway.
3963 if (unlikely(error == -ESRCH))
3972 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3974 struct kernel_siginfo info;
3976 clear_siginfo(&info);
3977 info.si_signo = sig;
3979 info.si_code = SI_TKILL;
3980 info.si_pid = task_tgid_vnr(current);
3981 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3983 return do_send_specific(tgid, pid, sig, &info);
3987 * sys_tgkill - send signal to one specific thread
3988 * @tgid: the thread group ID of the thread
3989 * @pid: the PID of the thread
3990 * @sig: signal to be sent
3992 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3993 * exists but it's not belonging to the target process anymore. This
3994 * method solves the problem of threads exiting and PIDs getting reused.
3996 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3998 /* This is only valid for single tasks */
3999 if (pid <= 0 || tgid <= 0)
4002 return do_tkill(tgid, pid, sig);
4006 * sys_tkill - send signal to one specific task
4007 * @pid: the PID of the task
4008 * @sig: signal to be sent
4010 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4012 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4014 /* This is only valid for single tasks */
4018 return do_tkill(0, pid, sig);
4021 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4023 /* Not even root can pretend to send signals from the kernel.
4024 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4026 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4027 (task_pid_vnr(current) != pid))
4030 /* POSIX.1b doesn't mention process groups. */
4031 return kill_proc_info(sig, info, pid);
4035 * sys_rt_sigqueueinfo - send signal information to a signal
4036 * @pid: the PID of the thread
4037 * @sig: signal to be sent
4038 * @uinfo: signal info to be sent
4040 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4041 siginfo_t __user *, uinfo)
4043 kernel_siginfo_t info;
4044 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4047 return do_rt_sigqueueinfo(pid, sig, &info);
4050 #ifdef CONFIG_COMPAT
4051 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4054 struct compat_siginfo __user *, uinfo)
4056 kernel_siginfo_t info;
4057 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4060 return do_rt_sigqueueinfo(pid, sig, &info);
4064 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4066 /* This is only valid for single tasks */
4067 if (pid <= 0 || tgid <= 0)
4070 /* Not even root can pretend to send signals from the kernel.
4071 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4073 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4074 (task_pid_vnr(current) != pid))
4077 return do_send_specific(tgid, pid, sig, info);
4080 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4081 siginfo_t __user *, uinfo)
4083 kernel_siginfo_t info;
4084 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4087 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4090 #ifdef CONFIG_COMPAT
4091 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4095 struct compat_siginfo __user *, uinfo)
4097 kernel_siginfo_t info;
4098 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4101 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4106 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4108 void kernel_sigaction(int sig, __sighandler_t action)
4110 spin_lock_irq(¤t->sighand->siglock);
4111 current->sighand->action[sig - 1].sa.sa_handler = action;
4112 if (action == SIG_IGN) {
4116 sigaddset(&mask, sig);
4118 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4119 flush_sigqueue_mask(&mask, ¤t->pending);
4120 recalc_sigpending();
4122 spin_unlock_irq(¤t->sighand->siglock);
4124 EXPORT_SYMBOL(kernel_sigaction);
4126 void __weak sigaction_compat_abi(struct k_sigaction *act,
4127 struct k_sigaction *oact)
4131 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4133 struct task_struct *p = current, *t;
4134 struct k_sigaction *k;
4137 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4140 k = &p->sighand->action[sig-1];
4142 spin_lock_irq(&p->sighand->siglock);
4143 if (k->sa.sa_flags & SA_IMMUTABLE) {
4144 spin_unlock_irq(&p->sighand->siglock);
4151 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4152 * e.g. by having an architecture use the bit in their uapi.
4154 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4157 * Clear unknown flag bits in order to allow userspace to detect missing
4158 * support for flag bits and to allow the kernel to use non-uapi bits
4162 act->sa.sa_flags &= UAPI_SA_FLAGS;
4164 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4166 sigaction_compat_abi(act, oact);
4169 sigdelsetmask(&act->sa.sa_mask,
4170 sigmask(SIGKILL) | sigmask(SIGSTOP));
4174 * "Setting a signal action to SIG_IGN for a signal that is
4175 * pending shall cause the pending signal to be discarded,
4176 * whether or not it is blocked."
4178 * "Setting a signal action to SIG_DFL for a signal that is
4179 * pending and whose default action is to ignore the signal
4180 * (for example, SIGCHLD), shall cause the pending signal to
4181 * be discarded, whether or not it is blocked"
4183 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4185 sigaddset(&mask, sig);
4186 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4187 for_each_thread(p, t)
4188 flush_sigqueue_mask(&mask, &t->pending);
4192 spin_unlock_irq(&p->sighand->siglock);
4196 #ifdef CONFIG_DYNAMIC_SIGFRAME
4197 static inline void sigaltstack_lock(void)
4198 __acquires(¤t->sighand->siglock)
4200 spin_lock_irq(¤t->sighand->siglock);
4203 static inline void sigaltstack_unlock(void)
4204 __releases(¤t->sighand->siglock)
4206 spin_unlock_irq(¤t->sighand->siglock);
4209 static inline void sigaltstack_lock(void) { }
4210 static inline void sigaltstack_unlock(void) { }
4214 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4217 struct task_struct *t = current;
4221 memset(oss, 0, sizeof(stack_t));
4222 oss->ss_sp = (void __user *) t->sas_ss_sp;
4223 oss->ss_size = t->sas_ss_size;
4224 oss->ss_flags = sas_ss_flags(sp) |
4225 (current->sas_ss_flags & SS_FLAG_BITS);
4229 void __user *ss_sp = ss->ss_sp;
4230 size_t ss_size = ss->ss_size;
4231 unsigned ss_flags = ss->ss_flags;
4234 if (unlikely(on_sig_stack(sp)))
4237 ss_mode = ss_flags & ~SS_FLAG_BITS;
4238 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4243 * Return before taking any locks if no actual
4244 * sigaltstack changes were requested.
4246 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4247 t->sas_ss_size == ss_size &&
4248 t->sas_ss_flags == ss_flags)
4252 if (ss_mode == SS_DISABLE) {
4256 if (unlikely(ss_size < min_ss_size))
4258 if (!sigaltstack_size_valid(ss_size))
4262 t->sas_ss_sp = (unsigned long) ss_sp;
4263 t->sas_ss_size = ss_size;
4264 t->sas_ss_flags = ss_flags;
4266 sigaltstack_unlock();
4271 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4275 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4277 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4278 current_user_stack_pointer(),
4280 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4285 int restore_altstack(const stack_t __user *uss)
4288 if (copy_from_user(&new, uss, sizeof(stack_t)))
4290 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4292 /* squash all but EFAULT for now */
4296 int __save_altstack(stack_t __user *uss, unsigned long sp)
4298 struct task_struct *t = current;
4299 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4300 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4301 __put_user(t->sas_ss_size, &uss->ss_size);
4305 #ifdef CONFIG_COMPAT
4306 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4307 compat_stack_t __user *uoss_ptr)
4313 compat_stack_t uss32;
4314 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4316 uss.ss_sp = compat_ptr(uss32.ss_sp);
4317 uss.ss_flags = uss32.ss_flags;
4318 uss.ss_size = uss32.ss_size;
4320 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4321 compat_user_stack_pointer(),
4322 COMPAT_MINSIGSTKSZ);
4323 if (ret >= 0 && uoss_ptr) {
4325 memset(&old, 0, sizeof(old));
4326 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4327 old.ss_flags = uoss.ss_flags;
4328 old.ss_size = uoss.ss_size;
4329 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4335 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4336 const compat_stack_t __user *, uss_ptr,
4337 compat_stack_t __user *, uoss_ptr)
4339 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4342 int compat_restore_altstack(const compat_stack_t __user *uss)
4344 int err = do_compat_sigaltstack(uss, NULL);
4345 /* squash all but -EFAULT for now */
4346 return err == -EFAULT ? err : 0;
4349 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4352 struct task_struct *t = current;
4353 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4355 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4356 __put_user(t->sas_ss_size, &uss->ss_size);
4361 #ifdef __ARCH_WANT_SYS_SIGPENDING
4364 * sys_sigpending - examine pending signals
4365 * @uset: where mask of pending signal is returned
4367 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4371 if (sizeof(old_sigset_t) > sizeof(*uset))
4374 do_sigpending(&set);
4376 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4382 #ifdef CONFIG_COMPAT
4383 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4387 do_sigpending(&set);
4389 return put_user(set.sig[0], set32);
4395 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4397 * sys_sigprocmask - examine and change blocked signals
4398 * @how: whether to add, remove, or set signals
4399 * @nset: signals to add or remove (if non-null)
4400 * @oset: previous value of signal mask if non-null
4402 * Some platforms have their own version with special arguments;
4403 * others support only sys_rt_sigprocmask.
4406 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4407 old_sigset_t __user *, oset)
4409 old_sigset_t old_set, new_set;
4410 sigset_t new_blocked;
4412 old_set = current->blocked.sig[0];
4415 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4418 new_blocked = current->blocked;
4422 sigaddsetmask(&new_blocked, new_set);
4425 sigdelsetmask(&new_blocked, new_set);
4428 new_blocked.sig[0] = new_set;
4434 set_current_blocked(&new_blocked);
4438 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4444 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4446 #ifndef CONFIG_ODD_RT_SIGACTION
4448 * sys_rt_sigaction - alter an action taken by a process
4449 * @sig: signal to be sent
4450 * @act: new sigaction
4451 * @oact: used to save the previous sigaction
4452 * @sigsetsize: size of sigset_t type
4454 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4455 const struct sigaction __user *, act,
4456 struct sigaction __user *, oact,
4459 struct k_sigaction new_sa, old_sa;
4462 /* XXX: Don't preclude handling different sized sigset_t's. */
4463 if (sigsetsize != sizeof(sigset_t))
4466 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4469 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4473 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4478 #ifdef CONFIG_COMPAT
4479 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4480 const struct compat_sigaction __user *, act,
4481 struct compat_sigaction __user *, oact,
4482 compat_size_t, sigsetsize)
4484 struct k_sigaction new_ka, old_ka;
4485 #ifdef __ARCH_HAS_SA_RESTORER
4486 compat_uptr_t restorer;
4490 /* XXX: Don't preclude handling different sized sigset_t's. */
4491 if (sigsetsize != sizeof(compat_sigset_t))
4495 compat_uptr_t handler;
4496 ret = get_user(handler, &act->sa_handler);
4497 new_ka.sa.sa_handler = compat_ptr(handler);
4498 #ifdef __ARCH_HAS_SA_RESTORER
4499 ret |= get_user(restorer, &act->sa_restorer);
4500 new_ka.sa.sa_restorer = compat_ptr(restorer);
4502 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4503 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4508 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4510 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4512 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4513 sizeof(oact->sa_mask));
4514 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4515 #ifdef __ARCH_HAS_SA_RESTORER
4516 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4517 &oact->sa_restorer);
4523 #endif /* !CONFIG_ODD_RT_SIGACTION */
4525 #ifdef CONFIG_OLD_SIGACTION
4526 SYSCALL_DEFINE3(sigaction, int, sig,
4527 const struct old_sigaction __user *, act,
4528 struct old_sigaction __user *, oact)
4530 struct k_sigaction new_ka, old_ka;
4535 if (!access_ok(act, sizeof(*act)) ||
4536 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4537 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4538 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4539 __get_user(mask, &act->sa_mask))
4541 #ifdef __ARCH_HAS_KA_RESTORER
4542 new_ka.ka_restorer = NULL;
4544 siginitset(&new_ka.sa.sa_mask, mask);
4547 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4550 if (!access_ok(oact, sizeof(*oact)) ||
4551 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4552 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4553 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4554 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4561 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4562 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4563 const struct compat_old_sigaction __user *, act,
4564 struct compat_old_sigaction __user *, oact)
4566 struct k_sigaction new_ka, old_ka;
4568 compat_old_sigset_t mask;
4569 compat_uptr_t handler, restorer;
4572 if (!access_ok(act, sizeof(*act)) ||
4573 __get_user(handler, &act->sa_handler) ||
4574 __get_user(restorer, &act->sa_restorer) ||
4575 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4576 __get_user(mask, &act->sa_mask))
4579 #ifdef __ARCH_HAS_KA_RESTORER
4580 new_ka.ka_restorer = NULL;
4582 new_ka.sa.sa_handler = compat_ptr(handler);
4583 new_ka.sa.sa_restorer = compat_ptr(restorer);
4584 siginitset(&new_ka.sa.sa_mask, mask);
4587 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4590 if (!access_ok(oact, sizeof(*oact)) ||
4591 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4592 &oact->sa_handler) ||
4593 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4594 &oact->sa_restorer) ||
4595 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4596 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4603 #ifdef CONFIG_SGETMASK_SYSCALL
4606 * For backwards compatibility. Functionality superseded by sigprocmask.
4608 SYSCALL_DEFINE0(sgetmask)
4611 return current->blocked.sig[0];
4614 SYSCALL_DEFINE1(ssetmask, int, newmask)
4616 int old = current->blocked.sig[0];
4619 siginitset(&newset, newmask);
4620 set_current_blocked(&newset);
4624 #endif /* CONFIG_SGETMASK_SYSCALL */
4626 #ifdef __ARCH_WANT_SYS_SIGNAL
4628 * For backwards compatibility. Functionality superseded by sigaction.
4630 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4632 struct k_sigaction new_sa, old_sa;
4635 new_sa.sa.sa_handler = handler;
4636 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4637 sigemptyset(&new_sa.sa.sa_mask);
4639 ret = do_sigaction(sig, &new_sa, &old_sa);
4641 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4643 #endif /* __ARCH_WANT_SYS_SIGNAL */
4645 #ifdef __ARCH_WANT_SYS_PAUSE
4647 SYSCALL_DEFINE0(pause)
4649 while (!signal_pending(current)) {
4650 __set_current_state(TASK_INTERRUPTIBLE);
4653 return -ERESTARTNOHAND;
4658 static int sigsuspend(sigset_t *set)
4660 current->saved_sigmask = current->blocked;
4661 set_current_blocked(set);
4663 while (!signal_pending(current)) {
4664 __set_current_state(TASK_INTERRUPTIBLE);
4667 set_restore_sigmask();
4668 return -ERESTARTNOHAND;
4672 * sys_rt_sigsuspend - replace the signal mask for a value with the
4673 * @unewset value until a signal is received
4674 * @unewset: new signal mask value
4675 * @sigsetsize: size of sigset_t type
4677 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4681 /* XXX: Don't preclude handling different sized sigset_t's. */
4682 if (sigsetsize != sizeof(sigset_t))
4685 if (copy_from_user(&newset, unewset, sizeof(newset)))
4687 return sigsuspend(&newset);
4690 #ifdef CONFIG_COMPAT
4691 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4695 /* XXX: Don't preclude handling different sized sigset_t's. */
4696 if (sigsetsize != sizeof(sigset_t))
4699 if (get_compat_sigset(&newset, unewset))
4701 return sigsuspend(&newset);
4705 #ifdef CONFIG_OLD_SIGSUSPEND
4706 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4709 siginitset(&blocked, mask);
4710 return sigsuspend(&blocked);
4713 #ifdef CONFIG_OLD_SIGSUSPEND3
4714 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4717 siginitset(&blocked, mask);
4718 return sigsuspend(&blocked);
4722 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4727 static inline void siginfo_buildtime_checks(void)
4729 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4731 /* Verify the offsets in the two siginfos match */
4732 #define CHECK_OFFSET(field) \
4733 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4736 CHECK_OFFSET(si_pid);
4737 CHECK_OFFSET(si_uid);
4740 CHECK_OFFSET(si_tid);
4741 CHECK_OFFSET(si_overrun);
4742 CHECK_OFFSET(si_value);
4745 CHECK_OFFSET(si_pid);
4746 CHECK_OFFSET(si_uid);
4747 CHECK_OFFSET(si_value);
4750 CHECK_OFFSET(si_pid);
4751 CHECK_OFFSET(si_uid);
4752 CHECK_OFFSET(si_status);
4753 CHECK_OFFSET(si_utime);
4754 CHECK_OFFSET(si_stime);
4757 CHECK_OFFSET(si_addr);
4758 CHECK_OFFSET(si_trapno);
4759 CHECK_OFFSET(si_addr_lsb);
4760 CHECK_OFFSET(si_lower);
4761 CHECK_OFFSET(si_upper);
4762 CHECK_OFFSET(si_pkey);
4763 CHECK_OFFSET(si_perf_data);
4764 CHECK_OFFSET(si_perf_type);
4767 CHECK_OFFSET(si_band);
4768 CHECK_OFFSET(si_fd);
4771 CHECK_OFFSET(si_call_addr);
4772 CHECK_OFFSET(si_syscall);
4773 CHECK_OFFSET(si_arch);
4777 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4778 offsetof(struct siginfo, si_addr));
4779 if (sizeof(int) == sizeof(void __user *)) {
4780 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4781 sizeof(void __user *));
4783 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4784 sizeof_field(struct siginfo, si_uid)) !=
4785 sizeof(void __user *));
4786 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4787 offsetof(struct siginfo, si_uid));
4789 #ifdef CONFIG_COMPAT
4790 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4791 offsetof(struct compat_siginfo, si_addr));
4792 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4793 sizeof(compat_uptr_t));
4794 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4795 sizeof_field(struct siginfo, si_pid));
4799 void __init signals_init(void)
4801 siginfo_buildtime_checks();
4803 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4806 #ifdef CONFIG_KGDB_KDB
4807 #include <linux/kdb.h>
4809 * kdb_send_sig - Allows kdb to send signals without exposing
4810 * signal internals. This function checks if the required locks are
4811 * available before calling the main signal code, to avoid kdb
4814 void kdb_send_sig(struct task_struct *t, int sig)
4816 static struct task_struct *kdb_prev_t;
4818 if (!spin_trylock(&t->sighand->siglock)) {
4819 kdb_printf("Can't do kill command now.\n"
4820 "The sigmask lock is held somewhere else in "
4821 "kernel, try again later\n");
4824 new_t = kdb_prev_t != t;
4826 if (!task_is_running(t) && new_t) {
4827 spin_unlock(&t->sighand->siglock);
4828 kdb_printf("Process is not RUNNING, sending a signal from "
4829 "kdb risks deadlock\n"
4830 "on the run queue locks. "
4831 "The signal has _not_ been sent.\n"
4832 "Reissue the kill command if you want to risk "
4836 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4837 spin_unlock(&t->sighand->siglock);
4839 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4842 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4844 #endif /* CONFIG_KGDB_KDB */