1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
414 struct sigqueue *q = NULL;
415 struct ucounts *ucounts = NULL;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 ucounts = task_ucounts(t);
428 sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
429 switch (sigpending) {
431 if (likely(get_ucounts(ucounts)))
436 * we need to decrease the ucount in the userns tree on any
437 * failure to avoid counts leaking.
439 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
445 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
446 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
448 print_dropped_signal(sig);
451 if (unlikely(q == NULL)) {
452 if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
453 put_ucounts(ucounts);
455 INIT_LIST_HEAD(&q->list);
456 q->flags = sigqueue_flags;
457 q->ucounts = ucounts;
462 static void __sigqueue_free(struct sigqueue *q)
464 if (q->flags & SIGQUEUE_PREALLOC)
466 if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
467 put_ucounts(q->ucounts);
470 kmem_cache_free(sigqueue_cachep, q);
473 void flush_sigqueue(struct sigpending *queue)
477 sigemptyset(&queue->signal);
478 while (!list_empty(&queue->list)) {
479 q = list_entry(queue->list.next, struct sigqueue , list);
480 list_del_init(&q->list);
486 * Flush all pending signals for this kthread.
488 void flush_signals(struct task_struct *t)
492 spin_lock_irqsave(&t->sighand->siglock, flags);
493 clear_tsk_thread_flag(t, TIF_SIGPENDING);
494 flush_sigqueue(&t->pending);
495 flush_sigqueue(&t->signal->shared_pending);
496 spin_unlock_irqrestore(&t->sighand->siglock, flags);
498 EXPORT_SYMBOL(flush_signals);
500 #ifdef CONFIG_POSIX_TIMERS
501 static void __flush_itimer_signals(struct sigpending *pending)
503 sigset_t signal, retain;
504 struct sigqueue *q, *n;
506 signal = pending->signal;
507 sigemptyset(&retain);
509 list_for_each_entry_safe(q, n, &pending->list, list) {
510 int sig = q->info.si_signo;
512 if (likely(q->info.si_code != SI_TIMER)) {
513 sigaddset(&retain, sig);
515 sigdelset(&signal, sig);
516 list_del_init(&q->list);
521 sigorsets(&pending->signal, &signal, &retain);
524 void flush_itimer_signals(void)
526 struct task_struct *tsk = current;
529 spin_lock_irqsave(&tsk->sighand->siglock, flags);
530 __flush_itimer_signals(&tsk->pending);
531 __flush_itimer_signals(&tsk->signal->shared_pending);
532 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
536 void ignore_signals(struct task_struct *t)
540 for (i = 0; i < _NSIG; ++i)
541 t->sighand->action[i].sa.sa_handler = SIG_IGN;
547 * Flush all handlers for a task.
551 flush_signal_handlers(struct task_struct *t, int force_default)
554 struct k_sigaction *ka = &t->sighand->action[0];
555 for (i = _NSIG ; i != 0 ; i--) {
556 if (force_default || ka->sa.sa_handler != SIG_IGN)
557 ka->sa.sa_handler = SIG_DFL;
559 #ifdef __ARCH_HAS_SA_RESTORER
560 ka->sa.sa_restorer = NULL;
562 sigemptyset(&ka->sa.sa_mask);
567 bool unhandled_signal(struct task_struct *tsk, int sig)
569 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
570 if (is_global_init(tsk))
573 if (handler != SIG_IGN && handler != SIG_DFL)
576 /* if ptraced, let the tracer determine */
580 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
583 struct sigqueue *q, *first = NULL;
586 * Collect the siginfo appropriate to this signal. Check if
587 * there is another siginfo for the same signal.
589 list_for_each_entry(q, &list->list, list) {
590 if (q->info.si_signo == sig) {
597 sigdelset(&list->signal, sig);
601 list_del_init(&first->list);
602 copy_siginfo(info, &first->info);
605 (first->flags & SIGQUEUE_PREALLOC) &&
606 (info->si_code == SI_TIMER) &&
607 (info->si_sys_private);
609 __sigqueue_free(first);
612 * Ok, it wasn't in the queue. This must be
613 * a fast-pathed signal or we must have been
614 * out of queue space. So zero out the info.
617 info->si_signo = sig;
619 info->si_code = SI_USER;
625 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
626 kernel_siginfo_t *info, bool *resched_timer)
628 int sig = next_signal(pending, mask);
631 collect_signal(sig, pending, info, resched_timer);
636 * Dequeue a signal and return the element to the caller, which is
637 * expected to free it.
639 * All callers have to hold the siglock.
641 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
643 bool resched_timer = false;
646 /* We only dequeue private signals from ourselves, we don't let
647 * signalfd steal them
649 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
651 signr = __dequeue_signal(&tsk->signal->shared_pending,
652 mask, info, &resched_timer);
653 #ifdef CONFIG_POSIX_TIMERS
657 * itimers are process shared and we restart periodic
658 * itimers in the signal delivery path to prevent DoS
659 * attacks in the high resolution timer case. This is
660 * compliant with the old way of self-restarting
661 * itimers, as the SIGALRM is a legacy signal and only
662 * queued once. Changing the restart behaviour to
663 * restart the timer in the signal dequeue path is
664 * reducing the timer noise on heavy loaded !highres
667 if (unlikely(signr == SIGALRM)) {
668 struct hrtimer *tmr = &tsk->signal->real_timer;
670 if (!hrtimer_is_queued(tmr) &&
671 tsk->signal->it_real_incr != 0) {
672 hrtimer_forward(tmr, tmr->base->get_time(),
673 tsk->signal->it_real_incr);
674 hrtimer_restart(tmr);
684 if (unlikely(sig_kernel_stop(signr))) {
686 * Set a marker that we have dequeued a stop signal. Our
687 * caller might release the siglock and then the pending
688 * stop signal it is about to process is no longer in the
689 * pending bitmasks, but must still be cleared by a SIGCONT
690 * (and overruled by a SIGKILL). So those cases clear this
691 * shared flag after we've set it. Note that this flag may
692 * remain set after the signal we return is ignored or
693 * handled. That doesn't matter because its only purpose
694 * is to alert stop-signal processing code when another
695 * processor has come along and cleared the flag.
697 current->jobctl |= JOBCTL_STOP_DEQUEUED;
699 #ifdef CONFIG_POSIX_TIMERS
702 * Release the siglock to ensure proper locking order
703 * of timer locks outside of siglocks. Note, we leave
704 * irqs disabled here, since the posix-timers code is
705 * about to disable them again anyway.
707 spin_unlock(&tsk->sighand->siglock);
708 posixtimer_rearm(info);
709 spin_lock(&tsk->sighand->siglock);
711 /* Don't expose the si_sys_private value to userspace */
712 info->si_sys_private = 0;
717 EXPORT_SYMBOL_GPL(dequeue_signal);
719 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
721 struct task_struct *tsk = current;
722 struct sigpending *pending = &tsk->pending;
723 struct sigqueue *q, *sync = NULL;
726 * Might a synchronous signal be in the queue?
728 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
732 * Return the first synchronous signal in the queue.
734 list_for_each_entry(q, &pending->list, list) {
735 /* Synchronous signals have a positive si_code */
736 if ((q->info.si_code > SI_USER) &&
737 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
745 * Check if there is another siginfo for the same signal.
747 list_for_each_entry_continue(q, &pending->list, list) {
748 if (q->info.si_signo == sync->info.si_signo)
752 sigdelset(&pending->signal, sync->info.si_signo);
755 list_del_init(&sync->list);
756 copy_siginfo(info, &sync->info);
757 __sigqueue_free(sync);
758 return info->si_signo;
762 * Tell a process that it has a new active signal..
764 * NOTE! we rely on the previous spin_lock to
765 * lock interrupts for us! We can only be called with
766 * "siglock" held, and the local interrupt must
767 * have been disabled when that got acquired!
769 * No need to set need_resched since signal event passing
770 * goes through ->blocked
772 void signal_wake_up_state(struct task_struct *t, unsigned int state)
774 set_tsk_thread_flag(t, TIF_SIGPENDING);
776 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
777 * case. We don't check t->state here because there is a race with it
778 * executing another processor and just now entering stopped state.
779 * By using wake_up_state, we ensure the process will wake up and
780 * handle its death signal.
782 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
787 * Remove signals in mask from the pending set and queue.
788 * Returns 1 if any signals were found.
790 * All callers must be holding the siglock.
792 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
794 struct sigqueue *q, *n;
797 sigandsets(&m, mask, &s->signal);
798 if (sigisemptyset(&m))
801 sigandnsets(&s->signal, &s->signal, mask);
802 list_for_each_entry_safe(q, n, &s->list, list) {
803 if (sigismember(mask, q->info.si_signo)) {
804 list_del_init(&q->list);
810 static inline int is_si_special(const struct kernel_siginfo *info)
812 return info <= SEND_SIG_PRIV;
815 static inline bool si_fromuser(const struct kernel_siginfo *info)
817 return info == SEND_SIG_NOINFO ||
818 (!is_si_special(info) && SI_FROMUSER(info));
822 * called with RCU read lock from check_kill_permission()
824 static bool kill_ok_by_cred(struct task_struct *t)
826 const struct cred *cred = current_cred();
827 const struct cred *tcred = __task_cred(t);
829 return uid_eq(cred->euid, tcred->suid) ||
830 uid_eq(cred->euid, tcred->uid) ||
831 uid_eq(cred->uid, tcred->suid) ||
832 uid_eq(cred->uid, tcred->uid) ||
833 ns_capable(tcred->user_ns, CAP_KILL);
837 * Bad permissions for sending the signal
838 * - the caller must hold the RCU read lock
840 static int check_kill_permission(int sig, struct kernel_siginfo *info,
841 struct task_struct *t)
846 if (!valid_signal(sig))
849 if (!si_fromuser(info))
852 error = audit_signal_info(sig, t); /* Let audit system see the signal */
856 if (!same_thread_group(current, t) &&
857 !kill_ok_by_cred(t)) {
860 sid = task_session(t);
862 * We don't return the error if sid == NULL. The
863 * task was unhashed, the caller must notice this.
865 if (!sid || sid == task_session(current))
873 return security_task_kill(t, info, sig, NULL);
877 * ptrace_trap_notify - schedule trap to notify ptracer
878 * @t: tracee wanting to notify tracer
880 * This function schedules sticky ptrace trap which is cleared on the next
881 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
884 * If @t is running, STOP trap will be taken. If trapped for STOP and
885 * ptracer is listening for events, tracee is woken up so that it can
886 * re-trap for the new event. If trapped otherwise, STOP trap will be
887 * eventually taken without returning to userland after the existing traps
888 * are finished by PTRACE_CONT.
891 * Must be called with @task->sighand->siglock held.
893 static void ptrace_trap_notify(struct task_struct *t)
895 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
896 assert_spin_locked(&t->sighand->siglock);
898 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
899 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
903 * Handle magic process-wide effects of stop/continue signals. Unlike
904 * the signal actions, these happen immediately at signal-generation
905 * time regardless of blocking, ignoring, or handling. This does the
906 * actual continuing for SIGCONT, but not the actual stopping for stop
907 * signals. The process stop is done as a signal action for SIG_DFL.
909 * Returns true if the signal should be actually delivered, otherwise
910 * it should be dropped.
912 static bool prepare_signal(int sig, struct task_struct *p, bool force)
914 struct signal_struct *signal = p->signal;
915 struct task_struct *t;
918 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
919 if (!(signal->flags & SIGNAL_GROUP_EXIT))
920 return sig == SIGKILL;
922 * The process is in the middle of dying, nothing to do.
924 } else if (sig_kernel_stop(sig)) {
926 * This is a stop signal. Remove SIGCONT from all queues.
928 siginitset(&flush, sigmask(SIGCONT));
929 flush_sigqueue_mask(&flush, &signal->shared_pending);
930 for_each_thread(p, t)
931 flush_sigqueue_mask(&flush, &t->pending);
932 } else if (sig == SIGCONT) {
935 * Remove all stop signals from all queues, wake all threads.
937 siginitset(&flush, SIG_KERNEL_STOP_MASK);
938 flush_sigqueue_mask(&flush, &signal->shared_pending);
939 for_each_thread(p, t) {
940 flush_sigqueue_mask(&flush, &t->pending);
941 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
942 if (likely(!(t->ptrace & PT_SEIZED)))
943 wake_up_state(t, __TASK_STOPPED);
945 ptrace_trap_notify(t);
949 * Notify the parent with CLD_CONTINUED if we were stopped.
951 * If we were in the middle of a group stop, we pretend it
952 * was already finished, and then continued. Since SIGCHLD
953 * doesn't queue we report only CLD_STOPPED, as if the next
954 * CLD_CONTINUED was dropped.
957 if (signal->flags & SIGNAL_STOP_STOPPED)
958 why |= SIGNAL_CLD_CONTINUED;
959 else if (signal->group_stop_count)
960 why |= SIGNAL_CLD_STOPPED;
964 * The first thread which returns from do_signal_stop()
965 * will take ->siglock, notice SIGNAL_CLD_MASK, and
966 * notify its parent. See get_signal().
968 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
969 signal->group_stop_count = 0;
970 signal->group_exit_code = 0;
974 return !sig_ignored(p, sig, force);
978 * Test if P wants to take SIG. After we've checked all threads with this,
979 * it's equivalent to finding no threads not blocking SIG. Any threads not
980 * blocking SIG were ruled out because they are not running and already
981 * have pending signals. Such threads will dequeue from the shared queue
982 * as soon as they're available, so putting the signal on the shared queue
983 * will be equivalent to sending it to one such thread.
985 static inline bool wants_signal(int sig, struct task_struct *p)
987 if (sigismember(&p->blocked, sig))
990 if (p->flags & PF_EXITING)
996 if (task_is_stopped_or_traced(p))
999 return task_curr(p) || !task_sigpending(p);
1002 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1004 struct signal_struct *signal = p->signal;
1005 struct task_struct *t;
1008 * Now find a thread we can wake up to take the signal off the queue.
1010 * If the main thread wants the signal, it gets first crack.
1011 * Probably the least surprising to the average bear.
1013 if (wants_signal(sig, p))
1015 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1017 * There is just one thread and it does not need to be woken.
1018 * It will dequeue unblocked signals before it runs again.
1023 * Otherwise try to find a suitable thread.
1025 t = signal->curr_target;
1026 while (!wants_signal(sig, t)) {
1028 if (t == signal->curr_target)
1030 * No thread needs to be woken.
1031 * Any eligible threads will see
1032 * the signal in the queue soon.
1036 signal->curr_target = t;
1040 * Found a killable thread. If the signal will be fatal,
1041 * then start taking the whole group down immediately.
1043 if (sig_fatal(p, sig) &&
1044 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1045 !sigismember(&t->real_blocked, sig) &&
1046 (sig == SIGKILL || !p->ptrace)) {
1048 * This signal will be fatal to the whole group.
1050 if (!sig_kernel_coredump(sig)) {
1052 * Start a group exit and wake everybody up.
1053 * This way we don't have other threads
1054 * running and doing things after a slower
1055 * thread has the fatal signal pending.
1057 signal->flags = SIGNAL_GROUP_EXIT;
1058 signal->group_exit_code = sig;
1059 signal->group_stop_count = 0;
1062 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1063 sigaddset(&t->pending.signal, SIGKILL);
1064 signal_wake_up(t, 1);
1065 } while_each_thread(p, t);
1071 * The signal is already in the shared-pending queue.
1072 * Tell the chosen thread to wake up and dequeue it.
1074 signal_wake_up(t, sig == SIGKILL);
1078 static inline bool legacy_queue(struct sigpending *signals, int sig)
1080 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1083 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1084 enum pid_type type, bool force)
1086 struct sigpending *pending;
1088 int override_rlimit;
1089 int ret = 0, result;
1091 assert_spin_locked(&t->sighand->siglock);
1093 result = TRACE_SIGNAL_IGNORED;
1094 if (!prepare_signal(sig, t, force))
1097 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1099 * Short-circuit ignored signals and support queuing
1100 * exactly one non-rt signal, so that we can get more
1101 * detailed information about the cause of the signal.
1103 result = TRACE_SIGNAL_ALREADY_PENDING;
1104 if (legacy_queue(pending, sig))
1107 result = TRACE_SIGNAL_DELIVERED;
1109 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1111 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1115 * Real-time signals must be queued if sent by sigqueue, or
1116 * some other real-time mechanism. It is implementation
1117 * defined whether kill() does so. We attempt to do so, on
1118 * the principle of least surprise, but since kill is not
1119 * allowed to fail with EAGAIN when low on memory we just
1120 * make sure at least one signal gets delivered and don't
1121 * pass on the info struct.
1124 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1126 override_rlimit = 0;
1128 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1131 list_add_tail(&q->list, &pending->list);
1132 switch ((unsigned long) info) {
1133 case (unsigned long) SEND_SIG_NOINFO:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_USER;
1138 q->info.si_pid = task_tgid_nr_ns(current,
1139 task_active_pid_ns(t));
1142 from_kuid_munged(task_cred_xxx(t, user_ns),
1146 case (unsigned long) SEND_SIG_PRIV:
1147 clear_siginfo(&q->info);
1148 q->info.si_signo = sig;
1149 q->info.si_errno = 0;
1150 q->info.si_code = SI_KERNEL;
1155 copy_siginfo(&q->info, info);
1158 } else if (!is_si_special(info) &&
1159 sig >= SIGRTMIN && info->si_code != SI_USER) {
1161 * Queue overflow, abort. We may abort if the
1162 * signal was rt and sent by user using something
1163 * other than kill().
1165 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1170 * This is a silent loss of information. We still
1171 * send the signal, but the *info bits are lost.
1173 result = TRACE_SIGNAL_LOSE_INFO;
1177 signalfd_notify(t, sig);
1178 sigaddset(&pending->signal, sig);
1180 /* Let multiprocess signals appear after on-going forks */
1181 if (type > PIDTYPE_TGID) {
1182 struct multiprocess_signals *delayed;
1183 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1184 sigset_t *signal = &delayed->signal;
1185 /* Can't queue both a stop and a continue signal */
1187 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1188 else if (sig_kernel_stop(sig))
1189 sigdelset(signal, SIGCONT);
1190 sigaddset(signal, sig);
1194 complete_signal(sig, t, type);
1196 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1200 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1203 switch (siginfo_layout(info->si_signo, info->si_code)) {
1212 case SIL_FAULT_TRAPNO:
1213 case SIL_FAULT_MCEERR:
1214 case SIL_FAULT_BNDERR:
1215 case SIL_FAULT_PKUERR:
1216 case SIL_FAULT_PERF_EVENT:
1224 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1227 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1230 if (info == SEND_SIG_NOINFO) {
1231 /* Force if sent from an ancestor pid namespace */
1232 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1233 } else if (info == SEND_SIG_PRIV) {
1234 /* Don't ignore kernel generated signals */
1236 } else if (has_si_pid_and_uid(info)) {
1237 /* SIGKILL and SIGSTOP is special or has ids */
1238 struct user_namespace *t_user_ns;
1241 t_user_ns = task_cred_xxx(t, user_ns);
1242 if (current_user_ns() != t_user_ns) {
1243 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1244 info->si_uid = from_kuid_munged(t_user_ns, uid);
1248 /* A kernel generated signal? */
1249 force = (info->si_code == SI_KERNEL);
1251 /* From an ancestor pid namespace? */
1252 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1257 return __send_signal(sig, info, t, type, force);
1260 static void print_fatal_signal(int signr)
1262 struct pt_regs *regs = signal_pt_regs();
1263 pr_info("potentially unexpected fatal signal %d.\n", signr);
1265 #if defined(__i386__) && !defined(__arch_um__)
1266 pr_info("code at %08lx: ", regs->ip);
1269 for (i = 0; i < 16; i++) {
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 pr_cont("%02x ", insn);
1284 static int __init setup_print_fatal_signals(char *str)
1286 get_option (&str, &print_fatal_signals);
1291 __setup("print-fatal-signals=", setup_print_fatal_signals);
1294 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1296 return send_signal(sig, info, p, PIDTYPE_TGID);
1299 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1302 unsigned long flags;
1305 if (lock_task_sighand(p, &flags)) {
1306 ret = send_signal(sig, info, p, type);
1307 unlock_task_sighand(p, &flags);
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1325 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1327 unsigned long int flags;
1328 int ret, blocked, ignored;
1329 struct k_sigaction *action;
1330 int sig = info->si_signo;
1332 spin_lock_irqsave(&t->sighand->siglock, flags);
1333 action = &t->sighand->action[sig-1];
1334 ignored = action->sa.sa_handler == SIG_IGN;
1335 blocked = sigismember(&t->blocked, sig);
1336 if (blocked || ignored) {
1337 action->sa.sa_handler = SIG_DFL;
1339 sigdelset(&t->blocked, sig);
1340 recalc_sigpending_and_wake(t);
1344 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1345 * debugging to leave init killable.
1347 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349 ret = send_signal(sig, info, t, PIDTYPE_PID);
1350 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1355 int force_sig_info(struct kernel_siginfo *info)
1357 return force_sig_info_to_task(info, current);
1361 * Nuke all other threads in the group.
1363 int zap_other_threads(struct task_struct *p)
1365 struct task_struct *t = p;
1368 p->signal->group_stop_count = 0;
1370 while_each_thread(p, t) {
1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1374 /* Don't bother with already dead threads */
1377 sigaddset(&t->pending.signal, SIGKILL);
1378 signal_wake_up(t, 1);
1384 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385 unsigned long *flags)
1387 struct sighand_struct *sighand;
1391 sighand = rcu_dereference(tsk->sighand);
1392 if (unlikely(sighand == NULL))
1396 * This sighand can be already freed and even reused, but
1397 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1398 * initializes ->siglock: this slab can't go away, it has
1399 * the same object type, ->siglock can't be reinitialized.
1401 * We need to ensure that tsk->sighand is still the same
1402 * after we take the lock, we can race with de_thread() or
1403 * __exit_signal(). In the latter case the next iteration
1404 * must see ->sighand == NULL.
1406 spin_lock_irqsave(&sighand->siglock, *flags);
1407 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1409 spin_unlock_irqrestore(&sighand->siglock, *flags);
1416 #ifdef CONFIG_LOCKDEP
1417 void lockdep_assert_task_sighand_held(struct task_struct *task)
1419 struct sighand_struct *sighand;
1422 sighand = rcu_dereference(task->sighand);
1424 lockdep_assert_held(&sighand->siglock);
1432 * send signal info to all the members of a group
1434 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1435 struct task_struct *p, enum pid_type type)
1440 ret = check_kill_permission(sig, info, p);
1444 ret = do_send_sig_info(sig, info, p, type);
1450 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1451 * control characters do (^C, ^Z etc)
1452 * - the caller must hold at least a readlock on tasklist_lock
1454 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1456 struct task_struct *p = NULL;
1457 int retval, success;
1461 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1462 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1465 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1466 return success ? 0 : retval;
1469 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1472 struct task_struct *p;
1476 p = pid_task(pid, PIDTYPE_PID);
1478 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1480 if (likely(!p || error != -ESRCH))
1484 * The task was unhashed in between, try again. If it
1485 * is dead, pid_task() will return NULL, if we race with
1486 * de_thread() it will find the new leader.
1491 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1495 error = kill_pid_info(sig, info, find_vpid(pid));
1500 static inline bool kill_as_cred_perm(const struct cred *cred,
1501 struct task_struct *target)
1503 const struct cred *pcred = __task_cred(target);
1505 return uid_eq(cred->euid, pcred->suid) ||
1506 uid_eq(cred->euid, pcred->uid) ||
1507 uid_eq(cred->uid, pcred->suid) ||
1508 uid_eq(cred->uid, pcred->uid);
1512 * The usb asyncio usage of siginfo is wrong. The glibc support
1513 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1514 * AKA after the generic fields:
1515 * kernel_pid_t si_pid;
1516 * kernel_uid32_t si_uid;
1517 * sigval_t si_value;
1519 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1520 * after the generic fields is:
1521 * void __user *si_addr;
1523 * This is a practical problem when there is a 64bit big endian kernel
1524 * and a 32bit userspace. As the 32bit address will encoded in the low
1525 * 32bits of the pointer. Those low 32bits will be stored at higher
1526 * address than appear in a 32 bit pointer. So userspace will not
1527 * see the address it was expecting for it's completions.
1529 * There is nothing in the encoding that can allow
1530 * copy_siginfo_to_user32 to detect this confusion of formats, so
1531 * handle this by requiring the caller of kill_pid_usb_asyncio to
1532 * notice when this situration takes place and to store the 32bit
1533 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1536 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1537 struct pid *pid, const struct cred *cred)
1539 struct kernel_siginfo info;
1540 struct task_struct *p;
1541 unsigned long flags;
1544 if (!valid_signal(sig))
1547 clear_siginfo(&info);
1548 info.si_signo = sig;
1549 info.si_errno = errno;
1550 info.si_code = SI_ASYNCIO;
1551 *((sigval_t *)&info.si_pid) = addr;
1554 p = pid_task(pid, PIDTYPE_PID);
1559 if (!kill_as_cred_perm(cred, p)) {
1563 ret = security_task_kill(p, &info, sig, cred);
1568 if (lock_task_sighand(p, &flags)) {
1569 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1570 unlock_task_sighand(p, &flags);
1578 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1581 * kill_something_info() interprets pid in interesting ways just like kill(2).
1583 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1584 * is probably wrong. Should make it like BSD or SYSV.
1587 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1592 return kill_proc_info(sig, info, pid);
1594 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1598 read_lock(&tasklist_lock);
1600 ret = __kill_pgrp_info(sig, info,
1601 pid ? find_vpid(-pid) : task_pgrp(current));
1603 int retval = 0, count = 0;
1604 struct task_struct * p;
1606 for_each_process(p) {
1607 if (task_pid_vnr(p) > 1 &&
1608 !same_thread_group(p, current)) {
1609 int err = group_send_sig_info(sig, info, p,
1616 ret = count ? retval : -ESRCH;
1618 read_unlock(&tasklist_lock);
1624 * These are for backward compatibility with the rest of the kernel source.
1627 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1630 * Make sure legacy kernel users don't send in bad values
1631 * (normal paths check this in check_kill_permission).
1633 if (!valid_signal(sig))
1636 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1638 EXPORT_SYMBOL(send_sig_info);
1640 #define __si_special(priv) \
1641 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1644 send_sig(int sig, struct task_struct *p, int priv)
1646 return send_sig_info(sig, __si_special(priv), p);
1648 EXPORT_SYMBOL(send_sig);
1650 void force_sig(int sig)
1652 struct kernel_siginfo info;
1654 clear_siginfo(&info);
1655 info.si_signo = sig;
1657 info.si_code = SI_KERNEL;
1660 force_sig_info(&info);
1662 EXPORT_SYMBOL(force_sig);
1665 * When things go south during signal handling, we
1666 * will force a SIGSEGV. And if the signal that caused
1667 * the problem was already a SIGSEGV, we'll want to
1668 * make sure we don't even try to deliver the signal..
1670 void force_sigsegv(int sig)
1672 struct task_struct *p = current;
1674 if (sig == SIGSEGV) {
1675 unsigned long flags;
1676 spin_lock_irqsave(&p->sighand->siglock, flags);
1677 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1678 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1683 int force_sig_fault_to_task(int sig, int code, void __user *addr
1684 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1685 , struct task_struct *t)
1687 struct kernel_siginfo info;
1689 clear_siginfo(&info);
1690 info.si_signo = sig;
1692 info.si_code = code;
1693 info.si_addr = addr;
1696 info.si_flags = flags;
1699 return force_sig_info_to_task(&info, t);
1702 int force_sig_fault(int sig, int code, void __user *addr
1703 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1705 return force_sig_fault_to_task(sig, code, addr
1706 ___ARCH_SI_IA64(imm, flags, isr), current);
1709 int send_sig_fault(int sig, int code, void __user *addr
1710 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1711 , struct task_struct *t)
1713 struct kernel_siginfo info;
1715 clear_siginfo(&info);
1716 info.si_signo = sig;
1718 info.si_code = code;
1719 info.si_addr = addr;
1722 info.si_flags = flags;
1725 return send_sig_info(info.si_signo, &info, t);
1728 int force_sig_mceerr(int code, void __user *addr, short lsb)
1730 struct kernel_siginfo info;
1732 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1733 clear_siginfo(&info);
1734 info.si_signo = SIGBUS;
1736 info.si_code = code;
1737 info.si_addr = addr;
1738 info.si_addr_lsb = lsb;
1739 return force_sig_info(&info);
1742 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1744 struct kernel_siginfo info;
1746 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1747 clear_siginfo(&info);
1748 info.si_signo = SIGBUS;
1750 info.si_code = code;
1751 info.si_addr = addr;
1752 info.si_addr_lsb = lsb;
1753 return send_sig_info(info.si_signo, &info, t);
1755 EXPORT_SYMBOL(send_sig_mceerr);
1757 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1759 struct kernel_siginfo info;
1761 clear_siginfo(&info);
1762 info.si_signo = SIGSEGV;
1764 info.si_code = SEGV_BNDERR;
1765 info.si_addr = addr;
1766 info.si_lower = lower;
1767 info.si_upper = upper;
1768 return force_sig_info(&info);
1772 int force_sig_pkuerr(void __user *addr, u32 pkey)
1774 struct kernel_siginfo info;
1776 clear_siginfo(&info);
1777 info.si_signo = SIGSEGV;
1779 info.si_code = SEGV_PKUERR;
1780 info.si_addr = addr;
1781 info.si_pkey = pkey;
1782 return force_sig_info(&info);
1786 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1788 struct kernel_siginfo info;
1790 clear_siginfo(&info);
1791 info.si_signo = SIGTRAP;
1793 info.si_code = TRAP_PERF;
1794 info.si_addr = addr;
1795 info.si_perf_data = sig_data;
1796 info.si_perf_type = type;
1798 return force_sig_info(&info);
1801 /* For the crazy architectures that include trap information in
1802 * the errno field, instead of an actual errno value.
1804 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1806 struct kernel_siginfo info;
1808 clear_siginfo(&info);
1809 info.si_signo = SIGTRAP;
1810 info.si_errno = errno;
1811 info.si_code = TRAP_HWBKPT;
1812 info.si_addr = addr;
1813 return force_sig_info(&info);
1816 /* For the rare architectures that include trap information using
1819 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1821 struct kernel_siginfo info;
1823 clear_siginfo(&info);
1824 info.si_signo = sig;
1826 info.si_code = code;
1827 info.si_addr = addr;
1828 info.si_trapno = trapno;
1829 return force_sig_info(&info);
1832 /* For the rare architectures that include trap information using
1835 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1836 struct task_struct *t)
1838 struct kernel_siginfo info;
1840 clear_siginfo(&info);
1841 info.si_signo = sig;
1843 info.si_code = code;
1844 info.si_addr = addr;
1845 info.si_trapno = trapno;
1846 return send_sig_info(info.si_signo, &info, t);
1849 int kill_pgrp(struct pid *pid, int sig, int priv)
1853 read_lock(&tasklist_lock);
1854 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1855 read_unlock(&tasklist_lock);
1859 EXPORT_SYMBOL(kill_pgrp);
1861 int kill_pid(struct pid *pid, int sig, int priv)
1863 return kill_pid_info(sig, __si_special(priv), pid);
1865 EXPORT_SYMBOL(kill_pid);
1868 * These functions support sending signals using preallocated sigqueue
1869 * structures. This is needed "because realtime applications cannot
1870 * afford to lose notifications of asynchronous events, like timer
1871 * expirations or I/O completions". In the case of POSIX Timers
1872 * we allocate the sigqueue structure from the timer_create. If this
1873 * allocation fails we are able to report the failure to the application
1874 * with an EAGAIN error.
1876 struct sigqueue *sigqueue_alloc(void)
1878 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1881 void sigqueue_free(struct sigqueue *q)
1883 unsigned long flags;
1884 spinlock_t *lock = ¤t->sighand->siglock;
1886 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1888 * We must hold ->siglock while testing q->list
1889 * to serialize with collect_signal() or with
1890 * __exit_signal()->flush_sigqueue().
1892 spin_lock_irqsave(lock, flags);
1893 q->flags &= ~SIGQUEUE_PREALLOC;
1895 * If it is queued it will be freed when dequeued,
1896 * like the "regular" sigqueue.
1898 if (!list_empty(&q->list))
1900 spin_unlock_irqrestore(lock, flags);
1906 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1908 int sig = q->info.si_signo;
1909 struct sigpending *pending;
1910 struct task_struct *t;
1911 unsigned long flags;
1914 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1918 t = pid_task(pid, type);
1919 if (!t || !likely(lock_task_sighand(t, &flags)))
1922 ret = 1; /* the signal is ignored */
1923 result = TRACE_SIGNAL_IGNORED;
1924 if (!prepare_signal(sig, t, false))
1928 if (unlikely(!list_empty(&q->list))) {
1930 * If an SI_TIMER entry is already queue just increment
1931 * the overrun count.
1933 BUG_ON(q->info.si_code != SI_TIMER);
1934 q->info.si_overrun++;
1935 result = TRACE_SIGNAL_ALREADY_PENDING;
1938 q->info.si_overrun = 0;
1940 signalfd_notify(t, sig);
1941 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1942 list_add_tail(&q->list, &pending->list);
1943 sigaddset(&pending->signal, sig);
1944 complete_signal(sig, t, type);
1945 result = TRACE_SIGNAL_DELIVERED;
1947 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1948 unlock_task_sighand(t, &flags);
1954 static void do_notify_pidfd(struct task_struct *task)
1958 WARN_ON(task->exit_state == 0);
1959 pid = task_pid(task);
1960 wake_up_all(&pid->wait_pidfd);
1964 * Let a parent know about the death of a child.
1965 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1967 * Returns true if our parent ignored us and so we've switched to
1970 bool do_notify_parent(struct task_struct *tsk, int sig)
1972 struct kernel_siginfo info;
1973 unsigned long flags;
1974 struct sighand_struct *psig;
1975 bool autoreap = false;
1980 /* do_notify_parent_cldstop should have been called instead. */
1981 BUG_ON(task_is_stopped_or_traced(tsk));
1983 BUG_ON(!tsk->ptrace &&
1984 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1986 /* Wake up all pidfd waiters */
1987 do_notify_pidfd(tsk);
1989 if (sig != SIGCHLD) {
1991 * This is only possible if parent == real_parent.
1992 * Check if it has changed security domain.
1994 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1998 clear_siginfo(&info);
1999 info.si_signo = sig;
2002 * We are under tasklist_lock here so our parent is tied to
2003 * us and cannot change.
2005 * task_active_pid_ns will always return the same pid namespace
2006 * until a task passes through release_task.
2008 * write_lock() currently calls preempt_disable() which is the
2009 * same as rcu_read_lock(), but according to Oleg, this is not
2010 * correct to rely on this
2013 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2014 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2018 task_cputime(tsk, &utime, &stime);
2019 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2020 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2022 info.si_status = tsk->exit_code & 0x7f;
2023 if (tsk->exit_code & 0x80)
2024 info.si_code = CLD_DUMPED;
2025 else if (tsk->exit_code & 0x7f)
2026 info.si_code = CLD_KILLED;
2028 info.si_code = CLD_EXITED;
2029 info.si_status = tsk->exit_code >> 8;
2032 psig = tsk->parent->sighand;
2033 spin_lock_irqsave(&psig->siglock, flags);
2034 if (!tsk->ptrace && sig == SIGCHLD &&
2035 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2036 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2038 * We are exiting and our parent doesn't care. POSIX.1
2039 * defines special semantics for setting SIGCHLD to SIG_IGN
2040 * or setting the SA_NOCLDWAIT flag: we should be reaped
2041 * automatically and not left for our parent's wait4 call.
2042 * Rather than having the parent do it as a magic kind of
2043 * signal handler, we just set this to tell do_exit that we
2044 * can be cleaned up without becoming a zombie. Note that
2045 * we still call __wake_up_parent in this case, because a
2046 * blocked sys_wait4 might now return -ECHILD.
2048 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2049 * is implementation-defined: we do (if you don't want
2050 * it, just use SIG_IGN instead).
2053 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2057 * Send with __send_signal as si_pid and si_uid are in the
2058 * parent's namespaces.
2060 if (valid_signal(sig) && sig)
2061 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2062 __wake_up_parent(tsk, tsk->parent);
2063 spin_unlock_irqrestore(&psig->siglock, flags);
2069 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2070 * @tsk: task reporting the state change
2071 * @for_ptracer: the notification is for ptracer
2072 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2074 * Notify @tsk's parent that the stopped/continued state has changed. If
2075 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2076 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2079 * Must be called with tasklist_lock at least read locked.
2081 static void do_notify_parent_cldstop(struct task_struct *tsk,
2082 bool for_ptracer, int why)
2084 struct kernel_siginfo info;
2085 unsigned long flags;
2086 struct task_struct *parent;
2087 struct sighand_struct *sighand;
2091 parent = tsk->parent;
2093 tsk = tsk->group_leader;
2094 parent = tsk->real_parent;
2097 clear_siginfo(&info);
2098 info.si_signo = SIGCHLD;
2101 * see comment in do_notify_parent() about the following 4 lines
2104 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2105 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2108 task_cputime(tsk, &utime, &stime);
2109 info.si_utime = nsec_to_clock_t(utime);
2110 info.si_stime = nsec_to_clock_t(stime);
2115 info.si_status = SIGCONT;
2118 info.si_status = tsk->signal->group_exit_code & 0x7f;
2121 info.si_status = tsk->exit_code & 0x7f;
2127 sighand = parent->sighand;
2128 spin_lock_irqsave(&sighand->siglock, flags);
2129 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2130 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2131 __group_send_sig_info(SIGCHLD, &info, parent);
2133 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2135 __wake_up_parent(tsk, parent);
2136 spin_unlock_irqrestore(&sighand->siglock, flags);
2139 static inline bool may_ptrace_stop(void)
2141 if (!likely(current->ptrace))
2144 * Are we in the middle of do_coredump?
2145 * If so and our tracer is also part of the coredump stopping
2146 * is a deadlock situation, and pointless because our tracer
2147 * is dead so don't allow us to stop.
2148 * If SIGKILL was already sent before the caller unlocked
2149 * ->siglock we must see ->core_state != NULL. Otherwise it
2150 * is safe to enter schedule().
2152 * This is almost outdated, a task with the pending SIGKILL can't
2153 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2154 * after SIGKILL was already dequeued.
2156 if (unlikely(current->mm->core_state) &&
2157 unlikely(current->mm == current->parent->mm))
2164 * Return non-zero if there is a SIGKILL that should be waking us up.
2165 * Called with the siglock held.
2167 static bool sigkill_pending(struct task_struct *tsk)
2169 return sigismember(&tsk->pending.signal, SIGKILL) ||
2170 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2174 * This must be called with current->sighand->siglock held.
2176 * This should be the path for all ptrace stops.
2177 * We always set current->last_siginfo while stopped here.
2178 * That makes it a way to test a stopped process for
2179 * being ptrace-stopped vs being job-control-stopped.
2181 * If we actually decide not to stop at all because the tracer
2182 * is gone, we keep current->exit_code unless clear_code.
2184 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2185 __releases(¤t->sighand->siglock)
2186 __acquires(¤t->sighand->siglock)
2188 bool gstop_done = false;
2190 if (arch_ptrace_stop_needed(exit_code, info)) {
2192 * The arch code has something special to do before a
2193 * ptrace stop. This is allowed to block, e.g. for faults
2194 * on user stack pages. We can't keep the siglock while
2195 * calling arch_ptrace_stop, so we must release it now.
2196 * To preserve proper semantics, we must do this before
2197 * any signal bookkeeping like checking group_stop_count.
2198 * Meanwhile, a SIGKILL could come in before we retake the
2199 * siglock. That must prevent us from sleeping in TASK_TRACED.
2200 * So after regaining the lock, we must check for SIGKILL.
2202 spin_unlock_irq(¤t->sighand->siglock);
2203 arch_ptrace_stop(exit_code, info);
2204 spin_lock_irq(¤t->sighand->siglock);
2205 if (sigkill_pending(current))
2209 set_special_state(TASK_TRACED);
2212 * We're committing to trapping. TRACED should be visible before
2213 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2214 * Also, transition to TRACED and updates to ->jobctl should be
2215 * atomic with respect to siglock and should be done after the arch
2216 * hook as siglock is released and regrabbed across it.
2221 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2223 * set_current_state() smp_wmb();
2225 * wait_task_stopped()
2226 * task_stopped_code()
2227 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2231 current->last_siginfo = info;
2232 current->exit_code = exit_code;
2235 * If @why is CLD_STOPPED, we're trapping to participate in a group
2236 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2237 * across siglock relocks since INTERRUPT was scheduled, PENDING
2238 * could be clear now. We act as if SIGCONT is received after
2239 * TASK_TRACED is entered - ignore it.
2241 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2242 gstop_done = task_participate_group_stop(current);
2244 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2245 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2246 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2247 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2249 /* entering a trap, clear TRAPPING */
2250 task_clear_jobctl_trapping(current);
2252 spin_unlock_irq(¤t->sighand->siglock);
2253 read_lock(&tasklist_lock);
2254 if (may_ptrace_stop()) {
2256 * Notify parents of the stop.
2258 * While ptraced, there are two parents - the ptracer and
2259 * the real_parent of the group_leader. The ptracer should
2260 * know about every stop while the real parent is only
2261 * interested in the completion of group stop. The states
2262 * for the two don't interact with each other. Notify
2263 * separately unless they're gonna be duplicates.
2265 do_notify_parent_cldstop(current, true, why);
2266 if (gstop_done && ptrace_reparented(current))
2267 do_notify_parent_cldstop(current, false, why);
2270 * Don't want to allow preemption here, because
2271 * sys_ptrace() needs this task to be inactive.
2273 * XXX: implement read_unlock_no_resched().
2276 read_unlock(&tasklist_lock);
2277 cgroup_enter_frozen();
2278 preempt_enable_no_resched();
2279 freezable_schedule();
2280 cgroup_leave_frozen(true);
2283 * By the time we got the lock, our tracer went away.
2284 * Don't drop the lock yet, another tracer may come.
2286 * If @gstop_done, the ptracer went away between group stop
2287 * completion and here. During detach, it would have set
2288 * JOBCTL_STOP_PENDING on us and we'll re-enter
2289 * TASK_STOPPED in do_signal_stop() on return, so notifying
2290 * the real parent of the group stop completion is enough.
2293 do_notify_parent_cldstop(current, false, why);
2295 /* tasklist protects us from ptrace_freeze_traced() */
2296 __set_current_state(TASK_RUNNING);
2298 current->exit_code = 0;
2299 read_unlock(&tasklist_lock);
2303 * We are back. Now reacquire the siglock before touching
2304 * last_siginfo, so that we are sure to have synchronized with
2305 * any signal-sending on another CPU that wants to examine it.
2307 spin_lock_irq(¤t->sighand->siglock);
2308 current->last_siginfo = NULL;
2310 /* LISTENING can be set only during STOP traps, clear it */
2311 current->jobctl &= ~JOBCTL_LISTENING;
2314 * Queued signals ignored us while we were stopped for tracing.
2315 * So check for any that we should take before resuming user mode.
2316 * This sets TIF_SIGPENDING, but never clears it.
2318 recalc_sigpending_tsk(current);
2321 static void ptrace_do_notify(int signr, int exit_code, int why)
2323 kernel_siginfo_t info;
2325 clear_siginfo(&info);
2326 info.si_signo = signr;
2327 info.si_code = exit_code;
2328 info.si_pid = task_pid_vnr(current);
2329 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2331 /* Let the debugger run. */
2332 ptrace_stop(exit_code, why, 1, &info);
2335 void ptrace_notify(int exit_code)
2337 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2338 if (unlikely(current->task_works))
2341 spin_lock_irq(¤t->sighand->siglock);
2342 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2343 spin_unlock_irq(¤t->sighand->siglock);
2347 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2348 * @signr: signr causing group stop if initiating
2350 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2351 * and participate in it. If already set, participate in the existing
2352 * group stop. If participated in a group stop (and thus slept), %true is
2353 * returned with siglock released.
2355 * If ptraced, this function doesn't handle stop itself. Instead,
2356 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2357 * untouched. The caller must ensure that INTERRUPT trap handling takes
2358 * places afterwards.
2361 * Must be called with @current->sighand->siglock held, which is released
2365 * %false if group stop is already cancelled or ptrace trap is scheduled.
2366 * %true if participated in group stop.
2368 static bool do_signal_stop(int signr)
2369 __releases(¤t->sighand->siglock)
2371 struct signal_struct *sig = current->signal;
2373 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2374 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2375 struct task_struct *t;
2377 /* signr will be recorded in task->jobctl for retries */
2378 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2380 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2381 unlikely(signal_group_exit(sig)))
2384 * There is no group stop already in progress. We must
2387 * While ptraced, a task may be resumed while group stop is
2388 * still in effect and then receive a stop signal and
2389 * initiate another group stop. This deviates from the
2390 * usual behavior as two consecutive stop signals can't
2391 * cause two group stops when !ptraced. That is why we
2392 * also check !task_is_stopped(t) below.
2394 * The condition can be distinguished by testing whether
2395 * SIGNAL_STOP_STOPPED is already set. Don't generate
2396 * group_exit_code in such case.
2398 * This is not necessary for SIGNAL_STOP_CONTINUED because
2399 * an intervening stop signal is required to cause two
2400 * continued events regardless of ptrace.
2402 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2403 sig->group_exit_code = signr;
2405 sig->group_stop_count = 0;
2407 if (task_set_jobctl_pending(current, signr | gstop))
2408 sig->group_stop_count++;
2411 while_each_thread(current, t) {
2413 * Setting state to TASK_STOPPED for a group
2414 * stop is always done with the siglock held,
2415 * so this check has no races.
2417 if (!task_is_stopped(t) &&
2418 task_set_jobctl_pending(t, signr | gstop)) {
2419 sig->group_stop_count++;
2420 if (likely(!(t->ptrace & PT_SEIZED)))
2421 signal_wake_up(t, 0);
2423 ptrace_trap_notify(t);
2428 if (likely(!current->ptrace)) {
2432 * If there are no other threads in the group, or if there
2433 * is a group stop in progress and we are the last to stop,
2434 * report to the parent.
2436 if (task_participate_group_stop(current))
2437 notify = CLD_STOPPED;
2439 set_special_state(TASK_STOPPED);
2440 spin_unlock_irq(¤t->sighand->siglock);
2443 * Notify the parent of the group stop completion. Because
2444 * we're not holding either the siglock or tasklist_lock
2445 * here, ptracer may attach inbetween; however, this is for
2446 * group stop and should always be delivered to the real
2447 * parent of the group leader. The new ptracer will get
2448 * its notification when this task transitions into
2452 read_lock(&tasklist_lock);
2453 do_notify_parent_cldstop(current, false, notify);
2454 read_unlock(&tasklist_lock);
2457 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2458 cgroup_enter_frozen();
2459 freezable_schedule();
2463 * While ptraced, group stop is handled by STOP trap.
2464 * Schedule it and let the caller deal with it.
2466 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2472 * do_jobctl_trap - take care of ptrace jobctl traps
2474 * When PT_SEIZED, it's used for both group stop and explicit
2475 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2476 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2477 * the stop signal; otherwise, %SIGTRAP.
2479 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2480 * number as exit_code and no siginfo.
2483 * Must be called with @current->sighand->siglock held, which may be
2484 * released and re-acquired before returning with intervening sleep.
2486 static void do_jobctl_trap(void)
2488 struct signal_struct *signal = current->signal;
2489 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2491 if (current->ptrace & PT_SEIZED) {
2492 if (!signal->group_stop_count &&
2493 !(signal->flags & SIGNAL_STOP_STOPPED))
2495 WARN_ON_ONCE(!signr);
2496 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2499 WARN_ON_ONCE(!signr);
2500 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2501 current->exit_code = 0;
2506 * do_freezer_trap - handle the freezer jobctl trap
2508 * Puts the task into frozen state, if only the task is not about to quit.
2509 * In this case it drops JOBCTL_TRAP_FREEZE.
2512 * Must be called with @current->sighand->siglock held,
2513 * which is always released before returning.
2515 static void do_freezer_trap(void)
2516 __releases(¤t->sighand->siglock)
2519 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2520 * let's make another loop to give it a chance to be handled.
2521 * In any case, we'll return back.
2523 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2524 JOBCTL_TRAP_FREEZE) {
2525 spin_unlock_irq(¤t->sighand->siglock);
2530 * Now we're sure that there is no pending fatal signal and no
2531 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2532 * immediately (if there is a non-fatal signal pending), and
2533 * put the task into sleep.
2535 __set_current_state(TASK_INTERRUPTIBLE);
2536 clear_thread_flag(TIF_SIGPENDING);
2537 spin_unlock_irq(¤t->sighand->siglock);
2538 cgroup_enter_frozen();
2539 freezable_schedule();
2542 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2545 * We do not check sig_kernel_stop(signr) but set this marker
2546 * unconditionally because we do not know whether debugger will
2547 * change signr. This flag has no meaning unless we are going
2548 * to stop after return from ptrace_stop(). In this case it will
2549 * be checked in do_signal_stop(), we should only stop if it was
2550 * not cleared by SIGCONT while we were sleeping. See also the
2551 * comment in dequeue_signal().
2553 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2554 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2556 /* We're back. Did the debugger cancel the sig? */
2557 signr = current->exit_code;
2561 current->exit_code = 0;
2564 * Update the siginfo structure if the signal has
2565 * changed. If the debugger wanted something
2566 * specific in the siginfo structure then it should
2567 * have updated *info via PTRACE_SETSIGINFO.
2569 if (signr != info->si_signo) {
2570 clear_siginfo(info);
2571 info->si_signo = signr;
2573 info->si_code = SI_USER;
2575 info->si_pid = task_pid_vnr(current->parent);
2576 info->si_uid = from_kuid_munged(current_user_ns(),
2577 task_uid(current->parent));
2581 /* If the (new) signal is now blocked, requeue it. */
2582 if (sigismember(¤t->blocked, signr)) {
2583 send_signal(signr, info, current, PIDTYPE_PID);
2590 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2592 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2594 case SIL_FAULT_TRAPNO:
2595 case SIL_FAULT_MCEERR:
2596 case SIL_FAULT_BNDERR:
2597 case SIL_FAULT_PKUERR:
2598 case SIL_FAULT_PERF_EVENT:
2599 ksig->info.si_addr = arch_untagged_si_addr(
2600 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2612 bool get_signal(struct ksignal *ksig)
2614 struct sighand_struct *sighand = current->sighand;
2615 struct signal_struct *signal = current->signal;
2618 if (unlikely(current->task_works))
2622 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2623 * that the arch handlers don't all have to do it. If we get here
2624 * without TIF_SIGPENDING, just exit after running signal work.
2626 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2627 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2628 tracehook_notify_signal();
2629 if (!task_sigpending(current))
2633 if (unlikely(uprobe_deny_signal()))
2637 * Do this once, we can't return to user-mode if freezing() == T.
2638 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2639 * thus do not need another check after return.
2644 spin_lock_irq(&sighand->siglock);
2647 * Every stopped thread goes here after wakeup. Check to see if
2648 * we should notify the parent, prepare_signal(SIGCONT) encodes
2649 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2651 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2654 if (signal->flags & SIGNAL_CLD_CONTINUED)
2655 why = CLD_CONTINUED;
2659 signal->flags &= ~SIGNAL_CLD_MASK;
2661 spin_unlock_irq(&sighand->siglock);
2664 * Notify the parent that we're continuing. This event is
2665 * always per-process and doesn't make whole lot of sense
2666 * for ptracers, who shouldn't consume the state via
2667 * wait(2) either, but, for backward compatibility, notify
2668 * the ptracer of the group leader too unless it's gonna be
2671 read_lock(&tasklist_lock);
2672 do_notify_parent_cldstop(current, false, why);
2674 if (ptrace_reparented(current->group_leader))
2675 do_notify_parent_cldstop(current->group_leader,
2677 read_unlock(&tasklist_lock);
2682 /* Has this task already been marked for death? */
2683 if (signal_group_exit(signal)) {
2684 ksig->info.si_signo = signr = SIGKILL;
2685 sigdelset(¤t->pending.signal, SIGKILL);
2686 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2687 &sighand->action[SIGKILL - 1]);
2688 recalc_sigpending();
2693 struct k_sigaction *ka;
2695 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2699 if (unlikely(current->jobctl &
2700 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2701 if (current->jobctl & JOBCTL_TRAP_MASK) {
2703 spin_unlock_irq(&sighand->siglock);
2704 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2711 * If the task is leaving the frozen state, let's update
2712 * cgroup counters and reset the frozen bit.
2714 if (unlikely(cgroup_task_frozen(current))) {
2715 spin_unlock_irq(&sighand->siglock);
2716 cgroup_leave_frozen(false);
2721 * Signals generated by the execution of an instruction
2722 * need to be delivered before any other pending signals
2723 * so that the instruction pointer in the signal stack
2724 * frame points to the faulting instruction.
2726 signr = dequeue_synchronous_signal(&ksig->info);
2728 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2731 break; /* will return 0 */
2733 if (unlikely(current->ptrace) && signr != SIGKILL) {
2734 signr = ptrace_signal(signr, &ksig->info);
2739 ka = &sighand->action[signr-1];
2741 /* Trace actually delivered signals. */
2742 trace_signal_deliver(signr, &ksig->info, ka);
2744 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2746 if (ka->sa.sa_handler != SIG_DFL) {
2747 /* Run the handler. */
2750 if (ka->sa.sa_flags & SA_ONESHOT)
2751 ka->sa.sa_handler = SIG_DFL;
2753 break; /* will return non-zero "signr" value */
2757 * Now we are doing the default action for this signal.
2759 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2763 * Global init gets no signals it doesn't want.
2764 * Container-init gets no signals it doesn't want from same
2767 * Note that if global/container-init sees a sig_kernel_only()
2768 * signal here, the signal must have been generated internally
2769 * or must have come from an ancestor namespace. In either
2770 * case, the signal cannot be dropped.
2772 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2773 !sig_kernel_only(signr))
2776 if (sig_kernel_stop(signr)) {
2778 * The default action is to stop all threads in
2779 * the thread group. The job control signals
2780 * do nothing in an orphaned pgrp, but SIGSTOP
2781 * always works. Note that siglock needs to be
2782 * dropped during the call to is_orphaned_pgrp()
2783 * because of lock ordering with tasklist_lock.
2784 * This allows an intervening SIGCONT to be posted.
2785 * We need to check for that and bail out if necessary.
2787 if (signr != SIGSTOP) {
2788 spin_unlock_irq(&sighand->siglock);
2790 /* signals can be posted during this window */
2792 if (is_current_pgrp_orphaned())
2795 spin_lock_irq(&sighand->siglock);
2798 if (likely(do_signal_stop(ksig->info.si_signo))) {
2799 /* It released the siglock. */
2804 * We didn't actually stop, due to a race
2805 * with SIGCONT or something like that.
2811 spin_unlock_irq(&sighand->siglock);
2812 if (unlikely(cgroup_task_frozen(current)))
2813 cgroup_leave_frozen(true);
2816 * Anything else is fatal, maybe with a core dump.
2818 current->flags |= PF_SIGNALED;
2820 if (sig_kernel_coredump(signr)) {
2821 if (print_fatal_signals)
2822 print_fatal_signal(ksig->info.si_signo);
2823 proc_coredump_connector(current);
2825 * If it was able to dump core, this kills all
2826 * other threads in the group and synchronizes with
2827 * their demise. If we lost the race with another
2828 * thread getting here, it set group_exit_code
2829 * first and our do_group_exit call below will use
2830 * that value and ignore the one we pass it.
2832 do_coredump(&ksig->info);
2836 * PF_IO_WORKER threads will catch and exit on fatal signals
2837 * themselves. They have cleanup that must be performed, so
2838 * we cannot call do_exit() on their behalf.
2840 if (current->flags & PF_IO_WORKER)
2844 * Death signals, no core dump.
2846 do_group_exit(ksig->info.si_signo);
2849 spin_unlock_irq(&sighand->siglock);
2853 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2854 hide_si_addr_tag_bits(ksig);
2856 return ksig->sig > 0;
2860 * signal_delivered -
2861 * @ksig: kernel signal struct
2862 * @stepping: nonzero if debugger single-step or block-step in use
2864 * This function should be called when a signal has successfully been
2865 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2866 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2867 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2869 static void signal_delivered(struct ksignal *ksig, int stepping)
2873 /* A signal was successfully delivered, and the
2874 saved sigmask was stored on the signal frame,
2875 and will be restored by sigreturn. So we can
2876 simply clear the restore sigmask flag. */
2877 clear_restore_sigmask();
2879 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2880 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2881 sigaddset(&blocked, ksig->sig);
2882 set_current_blocked(&blocked);
2883 if (current->sas_ss_flags & SS_AUTODISARM)
2884 sas_ss_reset(current);
2885 tracehook_signal_handler(stepping);
2888 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2891 force_sigsegv(ksig->sig);
2893 signal_delivered(ksig, stepping);
2897 * It could be that complete_signal() picked us to notify about the
2898 * group-wide signal. Other threads should be notified now to take
2899 * the shared signals in @which since we will not.
2901 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2904 struct task_struct *t;
2906 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2907 if (sigisemptyset(&retarget))
2911 while_each_thread(tsk, t) {
2912 if (t->flags & PF_EXITING)
2915 if (!has_pending_signals(&retarget, &t->blocked))
2917 /* Remove the signals this thread can handle. */
2918 sigandsets(&retarget, &retarget, &t->blocked);
2920 if (!task_sigpending(t))
2921 signal_wake_up(t, 0);
2923 if (sigisemptyset(&retarget))
2928 void exit_signals(struct task_struct *tsk)
2934 * @tsk is about to have PF_EXITING set - lock out users which
2935 * expect stable threadgroup.
2937 cgroup_threadgroup_change_begin(tsk);
2939 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2940 tsk->flags |= PF_EXITING;
2941 cgroup_threadgroup_change_end(tsk);
2945 spin_lock_irq(&tsk->sighand->siglock);
2947 * From now this task is not visible for group-wide signals,
2948 * see wants_signal(), do_signal_stop().
2950 tsk->flags |= PF_EXITING;
2952 cgroup_threadgroup_change_end(tsk);
2954 if (!task_sigpending(tsk))
2957 unblocked = tsk->blocked;
2958 signotset(&unblocked);
2959 retarget_shared_pending(tsk, &unblocked);
2961 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2962 task_participate_group_stop(tsk))
2963 group_stop = CLD_STOPPED;
2965 spin_unlock_irq(&tsk->sighand->siglock);
2968 * If group stop has completed, deliver the notification. This
2969 * should always go to the real parent of the group leader.
2971 if (unlikely(group_stop)) {
2972 read_lock(&tasklist_lock);
2973 do_notify_parent_cldstop(tsk, false, group_stop);
2974 read_unlock(&tasklist_lock);
2979 * System call entry points.
2983 * sys_restart_syscall - restart a system call
2985 SYSCALL_DEFINE0(restart_syscall)
2987 struct restart_block *restart = ¤t->restart_block;
2988 return restart->fn(restart);
2991 long do_no_restart_syscall(struct restart_block *param)
2996 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2998 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2999 sigset_t newblocked;
3000 /* A set of now blocked but previously unblocked signals. */
3001 sigandnsets(&newblocked, newset, ¤t->blocked);
3002 retarget_shared_pending(tsk, &newblocked);
3004 tsk->blocked = *newset;
3005 recalc_sigpending();
3009 * set_current_blocked - change current->blocked mask
3012 * It is wrong to change ->blocked directly, this helper should be used
3013 * to ensure the process can't miss a shared signal we are going to block.
3015 void set_current_blocked(sigset_t *newset)
3017 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3018 __set_current_blocked(newset);
3021 void __set_current_blocked(const sigset_t *newset)
3023 struct task_struct *tsk = current;
3026 * In case the signal mask hasn't changed, there is nothing we need
3027 * to do. The current->blocked shouldn't be modified by other task.
3029 if (sigequalsets(&tsk->blocked, newset))
3032 spin_lock_irq(&tsk->sighand->siglock);
3033 __set_task_blocked(tsk, newset);
3034 spin_unlock_irq(&tsk->sighand->siglock);
3038 * This is also useful for kernel threads that want to temporarily
3039 * (or permanently) block certain signals.
3041 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3042 * interface happily blocks "unblockable" signals like SIGKILL
3045 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3047 struct task_struct *tsk = current;
3050 /* Lockless, only current can change ->blocked, never from irq */
3052 *oldset = tsk->blocked;
3056 sigorsets(&newset, &tsk->blocked, set);
3059 sigandnsets(&newset, &tsk->blocked, set);
3068 __set_current_blocked(&newset);
3071 EXPORT_SYMBOL(sigprocmask);
3074 * The api helps set app-provided sigmasks.
3076 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3077 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3079 * Note that it does set_restore_sigmask() in advance, so it must be always
3080 * paired with restore_saved_sigmask_unless() before return from syscall.
3082 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3088 if (sigsetsize != sizeof(sigset_t))
3090 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3093 set_restore_sigmask();
3094 current->saved_sigmask = current->blocked;
3095 set_current_blocked(&kmask);
3100 #ifdef CONFIG_COMPAT
3101 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3108 if (sigsetsize != sizeof(compat_sigset_t))
3110 if (get_compat_sigset(&kmask, umask))
3113 set_restore_sigmask();
3114 current->saved_sigmask = current->blocked;
3115 set_current_blocked(&kmask);
3122 * sys_rt_sigprocmask - change the list of currently blocked signals
3123 * @how: whether to add, remove, or set signals
3124 * @nset: stores pending signals
3125 * @oset: previous value of signal mask if non-null
3126 * @sigsetsize: size of sigset_t type
3128 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3129 sigset_t __user *, oset, size_t, sigsetsize)
3131 sigset_t old_set, new_set;
3134 /* XXX: Don't preclude handling different sized sigset_t's. */
3135 if (sigsetsize != sizeof(sigset_t))
3138 old_set = current->blocked;
3141 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3143 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3145 error = sigprocmask(how, &new_set, NULL);
3151 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3158 #ifdef CONFIG_COMPAT
3159 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3160 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3162 sigset_t old_set = current->blocked;
3164 /* XXX: Don't preclude handling different sized sigset_t's. */
3165 if (sigsetsize != sizeof(sigset_t))
3171 if (get_compat_sigset(&new_set, nset))
3173 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3175 error = sigprocmask(how, &new_set, NULL);
3179 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3183 static void do_sigpending(sigset_t *set)
3185 spin_lock_irq(¤t->sighand->siglock);
3186 sigorsets(set, ¤t->pending.signal,
3187 ¤t->signal->shared_pending.signal);
3188 spin_unlock_irq(¤t->sighand->siglock);
3190 /* Outside the lock because only this thread touches it. */
3191 sigandsets(set, ¤t->blocked, set);
3195 * sys_rt_sigpending - examine a pending signal that has been raised
3197 * @uset: stores pending signals
3198 * @sigsetsize: size of sigset_t type or larger
3200 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3204 if (sigsetsize > sizeof(*uset))
3207 do_sigpending(&set);
3209 if (copy_to_user(uset, &set, sigsetsize))
3215 #ifdef CONFIG_COMPAT
3216 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3217 compat_size_t, sigsetsize)
3221 if (sigsetsize > sizeof(*uset))
3224 do_sigpending(&set);
3226 return put_compat_sigset(uset, &set, sigsetsize);
3230 static const struct {
3231 unsigned char limit, layout;
3233 [SIGILL] = { NSIGILL, SIL_FAULT },
3234 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3235 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3236 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3237 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3239 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3241 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3242 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3243 [SIGSYS] = { NSIGSYS, SIL_SYS },
3246 static bool known_siginfo_layout(unsigned sig, int si_code)
3248 if (si_code == SI_KERNEL)
3250 else if ((si_code > SI_USER)) {
3251 if (sig_specific_sicodes(sig)) {
3252 if (si_code <= sig_sicodes[sig].limit)
3255 else if (si_code <= NSIGPOLL)
3258 else if (si_code >= SI_DETHREAD)
3260 else if (si_code == SI_ASYNCNL)
3265 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3267 enum siginfo_layout layout = SIL_KILL;
3268 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3269 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3270 (si_code <= sig_sicodes[sig].limit)) {
3271 layout = sig_sicodes[sig].layout;
3272 /* Handle the exceptions */
3273 if ((sig == SIGBUS) &&
3274 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3275 layout = SIL_FAULT_MCEERR;
3276 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3277 layout = SIL_FAULT_BNDERR;
3279 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3280 layout = SIL_FAULT_PKUERR;
3282 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3283 layout = SIL_FAULT_PERF_EVENT;
3284 else if (IS_ENABLED(CONFIG_SPARC) &&
3285 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3286 layout = SIL_FAULT_TRAPNO;
3287 else if (IS_ENABLED(CONFIG_ALPHA) &&
3289 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3290 layout = SIL_FAULT_TRAPNO;
3292 else if (si_code <= NSIGPOLL)
3295 if (si_code == SI_TIMER)
3297 else if (si_code == SI_SIGIO)
3299 else if (si_code < 0)
3305 static inline char __user *si_expansion(const siginfo_t __user *info)
3307 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3310 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3312 char __user *expansion = si_expansion(to);
3313 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3315 if (clear_user(expansion, SI_EXPANSION_SIZE))
3320 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3321 const siginfo_t __user *from)
3323 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3324 char __user *expansion = si_expansion(from);
3325 char buf[SI_EXPANSION_SIZE];
3328 * An unknown si_code might need more than
3329 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3330 * extra bytes are 0. This guarantees copy_siginfo_to_user
3331 * will return this data to userspace exactly.
3333 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3335 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3343 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3344 const siginfo_t __user *from)
3346 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3348 to->si_signo = signo;
3349 return post_copy_siginfo_from_user(to, from);
3352 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3354 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3356 return post_copy_siginfo_from_user(to, from);
3359 #ifdef CONFIG_COMPAT
3361 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3362 * @to: compat siginfo destination
3363 * @from: kernel siginfo source
3365 * Note: This function does not work properly for the SIGCHLD on x32, but
3366 * fortunately it doesn't have to. The only valid callers for this function are
3367 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3368 * The latter does not care because SIGCHLD will never cause a coredump.
3370 void copy_siginfo_to_external32(struct compat_siginfo *to,
3371 const struct kernel_siginfo *from)
3373 memset(to, 0, sizeof(*to));
3375 to->si_signo = from->si_signo;
3376 to->si_errno = from->si_errno;
3377 to->si_code = from->si_code;
3378 switch(siginfo_layout(from->si_signo, from->si_code)) {
3380 to->si_pid = from->si_pid;
3381 to->si_uid = from->si_uid;
3384 to->si_tid = from->si_tid;
3385 to->si_overrun = from->si_overrun;
3386 to->si_int = from->si_int;
3389 to->si_band = from->si_band;
3390 to->si_fd = from->si_fd;
3393 to->si_addr = ptr_to_compat(from->si_addr);
3395 case SIL_FAULT_TRAPNO:
3396 to->si_addr = ptr_to_compat(from->si_addr);
3397 to->si_trapno = from->si_trapno;
3399 case SIL_FAULT_MCEERR:
3400 to->si_addr = ptr_to_compat(from->si_addr);
3401 to->si_addr_lsb = from->si_addr_lsb;
3403 case SIL_FAULT_BNDERR:
3404 to->si_addr = ptr_to_compat(from->si_addr);
3405 to->si_lower = ptr_to_compat(from->si_lower);
3406 to->si_upper = ptr_to_compat(from->si_upper);
3408 case SIL_FAULT_PKUERR:
3409 to->si_addr = ptr_to_compat(from->si_addr);
3410 to->si_pkey = from->si_pkey;
3412 case SIL_FAULT_PERF_EVENT:
3413 to->si_addr = ptr_to_compat(from->si_addr);
3414 to->si_perf_data = from->si_perf_data;
3415 to->si_perf_type = from->si_perf_type;
3418 to->si_pid = from->si_pid;
3419 to->si_uid = from->si_uid;
3420 to->si_status = from->si_status;
3421 to->si_utime = from->si_utime;
3422 to->si_stime = from->si_stime;
3425 to->si_pid = from->si_pid;
3426 to->si_uid = from->si_uid;
3427 to->si_int = from->si_int;
3430 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3431 to->si_syscall = from->si_syscall;
3432 to->si_arch = from->si_arch;
3437 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3438 const struct kernel_siginfo *from)
3440 struct compat_siginfo new;
3442 copy_siginfo_to_external32(&new, from);
3443 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3448 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3449 const struct compat_siginfo *from)
3452 to->si_signo = from->si_signo;
3453 to->si_errno = from->si_errno;
3454 to->si_code = from->si_code;
3455 switch(siginfo_layout(from->si_signo, from->si_code)) {
3457 to->si_pid = from->si_pid;
3458 to->si_uid = from->si_uid;
3461 to->si_tid = from->si_tid;
3462 to->si_overrun = from->si_overrun;
3463 to->si_int = from->si_int;
3466 to->si_band = from->si_band;
3467 to->si_fd = from->si_fd;
3470 to->si_addr = compat_ptr(from->si_addr);
3472 case SIL_FAULT_TRAPNO:
3473 to->si_addr = compat_ptr(from->si_addr);
3474 to->si_trapno = from->si_trapno;
3476 case SIL_FAULT_MCEERR:
3477 to->si_addr = compat_ptr(from->si_addr);
3478 to->si_addr_lsb = from->si_addr_lsb;
3480 case SIL_FAULT_BNDERR:
3481 to->si_addr = compat_ptr(from->si_addr);
3482 to->si_lower = compat_ptr(from->si_lower);
3483 to->si_upper = compat_ptr(from->si_upper);
3485 case SIL_FAULT_PKUERR:
3486 to->si_addr = compat_ptr(from->si_addr);
3487 to->si_pkey = from->si_pkey;
3489 case SIL_FAULT_PERF_EVENT:
3490 to->si_addr = compat_ptr(from->si_addr);
3491 to->si_perf_data = from->si_perf_data;
3492 to->si_perf_type = from->si_perf_type;
3495 to->si_pid = from->si_pid;
3496 to->si_uid = from->si_uid;
3497 to->si_status = from->si_status;
3498 #ifdef CONFIG_X86_X32_ABI
3499 if (in_x32_syscall()) {
3500 to->si_utime = from->_sifields._sigchld_x32._utime;
3501 to->si_stime = from->_sifields._sigchld_x32._stime;
3505 to->si_utime = from->si_utime;
3506 to->si_stime = from->si_stime;
3510 to->si_pid = from->si_pid;
3511 to->si_uid = from->si_uid;
3512 to->si_int = from->si_int;
3515 to->si_call_addr = compat_ptr(from->si_call_addr);
3516 to->si_syscall = from->si_syscall;
3517 to->si_arch = from->si_arch;
3523 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3524 const struct compat_siginfo __user *ufrom)
3526 struct compat_siginfo from;
3528 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3531 from.si_signo = signo;
3532 return post_copy_siginfo_from_user32(to, &from);
3535 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3536 const struct compat_siginfo __user *ufrom)
3538 struct compat_siginfo from;
3540 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3543 return post_copy_siginfo_from_user32(to, &from);
3545 #endif /* CONFIG_COMPAT */
3548 * do_sigtimedwait - wait for queued signals specified in @which
3549 * @which: queued signals to wait for
3550 * @info: if non-null, the signal's siginfo is returned here
3551 * @ts: upper bound on process time suspension
3553 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3554 const struct timespec64 *ts)
3556 ktime_t *to = NULL, timeout = KTIME_MAX;
3557 struct task_struct *tsk = current;
3558 sigset_t mask = *which;
3562 if (!timespec64_valid(ts))
3564 timeout = timespec64_to_ktime(*ts);
3569 * Invert the set of allowed signals to get those we want to block.
3571 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3574 spin_lock_irq(&tsk->sighand->siglock);
3575 sig = dequeue_signal(tsk, &mask, info);
3576 if (!sig && timeout) {
3578 * None ready, temporarily unblock those we're interested
3579 * while we are sleeping in so that we'll be awakened when
3580 * they arrive. Unblocking is always fine, we can avoid
3581 * set_current_blocked().
3583 tsk->real_blocked = tsk->blocked;
3584 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3585 recalc_sigpending();
3586 spin_unlock_irq(&tsk->sighand->siglock);
3588 __set_current_state(TASK_INTERRUPTIBLE);
3589 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3591 spin_lock_irq(&tsk->sighand->siglock);
3592 __set_task_blocked(tsk, &tsk->real_blocked);
3593 sigemptyset(&tsk->real_blocked);
3594 sig = dequeue_signal(tsk, &mask, info);
3596 spin_unlock_irq(&tsk->sighand->siglock);
3600 return ret ? -EINTR : -EAGAIN;
3604 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3606 * @uthese: queued signals to wait for
3607 * @uinfo: if non-null, the signal's siginfo is returned here
3608 * @uts: upper bound on process time suspension
3609 * @sigsetsize: size of sigset_t type
3611 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3612 siginfo_t __user *, uinfo,
3613 const struct __kernel_timespec __user *, uts,
3617 struct timespec64 ts;
3618 kernel_siginfo_t info;
3621 /* XXX: Don't preclude handling different sized sigset_t's. */
3622 if (sigsetsize != sizeof(sigset_t))
3625 if (copy_from_user(&these, uthese, sizeof(these)))
3629 if (get_timespec64(&ts, uts))
3633 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3635 if (ret > 0 && uinfo) {
3636 if (copy_siginfo_to_user(uinfo, &info))
3643 #ifdef CONFIG_COMPAT_32BIT_TIME
3644 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3645 siginfo_t __user *, uinfo,
3646 const struct old_timespec32 __user *, uts,
3650 struct timespec64 ts;
3651 kernel_siginfo_t info;
3654 if (sigsetsize != sizeof(sigset_t))
3657 if (copy_from_user(&these, uthese, sizeof(these)))
3661 if (get_old_timespec32(&ts, uts))
3665 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3667 if (ret > 0 && uinfo) {
3668 if (copy_siginfo_to_user(uinfo, &info))
3676 #ifdef CONFIG_COMPAT
3677 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3678 struct compat_siginfo __user *, uinfo,
3679 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3682 struct timespec64 t;
3683 kernel_siginfo_t info;
3686 if (sigsetsize != sizeof(sigset_t))
3689 if (get_compat_sigset(&s, uthese))
3693 if (get_timespec64(&t, uts))
3697 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3699 if (ret > 0 && uinfo) {
3700 if (copy_siginfo_to_user32(uinfo, &info))
3707 #ifdef CONFIG_COMPAT_32BIT_TIME
3708 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3709 struct compat_siginfo __user *, uinfo,
3710 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3713 struct timespec64 t;
3714 kernel_siginfo_t info;
3717 if (sigsetsize != sizeof(sigset_t))
3720 if (get_compat_sigset(&s, uthese))
3724 if (get_old_timespec32(&t, uts))
3728 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3730 if (ret > 0 && uinfo) {
3731 if (copy_siginfo_to_user32(uinfo, &info))
3740 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3742 clear_siginfo(info);
3743 info->si_signo = sig;
3745 info->si_code = SI_USER;
3746 info->si_pid = task_tgid_vnr(current);
3747 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3751 * sys_kill - send a signal to a process
3752 * @pid: the PID of the process
3753 * @sig: signal to be sent
3755 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3757 struct kernel_siginfo info;
3759 prepare_kill_siginfo(sig, &info);
3761 return kill_something_info(sig, &info, pid);
3765 * Verify that the signaler and signalee either are in the same pid namespace
3766 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3769 static bool access_pidfd_pidns(struct pid *pid)
3771 struct pid_namespace *active = task_active_pid_ns(current);
3772 struct pid_namespace *p = ns_of_pid(pid);
3785 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3786 siginfo_t __user *info)
3788 #ifdef CONFIG_COMPAT
3790 * Avoid hooking up compat syscalls and instead handle necessary
3791 * conversions here. Note, this is a stop-gap measure and should not be
3792 * considered a generic solution.
3794 if (in_compat_syscall())
3795 return copy_siginfo_from_user32(
3796 kinfo, (struct compat_siginfo __user *)info);
3798 return copy_siginfo_from_user(kinfo, info);
3801 static struct pid *pidfd_to_pid(const struct file *file)
3805 pid = pidfd_pid(file);
3809 return tgid_pidfd_to_pid(file);
3813 * sys_pidfd_send_signal - Signal a process through a pidfd
3814 * @pidfd: file descriptor of the process
3815 * @sig: signal to send
3816 * @info: signal info
3817 * @flags: future flags
3819 * The syscall currently only signals via PIDTYPE_PID which covers
3820 * kill(<positive-pid>, <signal>. It does not signal threads or process
3822 * In order to extend the syscall to threads and process groups the @flags
3823 * argument should be used. In essence, the @flags argument will determine
3824 * what is signaled and not the file descriptor itself. Put in other words,
3825 * grouping is a property of the flags argument not a property of the file
3828 * Return: 0 on success, negative errno on failure
3830 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3831 siginfo_t __user *, info, unsigned int, flags)
3836 kernel_siginfo_t kinfo;
3838 /* Enforce flags be set to 0 until we add an extension. */
3846 /* Is this a pidfd? */
3847 pid = pidfd_to_pid(f.file);
3854 if (!access_pidfd_pidns(pid))
3858 ret = copy_siginfo_from_user_any(&kinfo, info);
3863 if (unlikely(sig != kinfo.si_signo))
3866 /* Only allow sending arbitrary signals to yourself. */
3868 if ((task_pid(current) != pid) &&
3869 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3872 prepare_kill_siginfo(sig, &kinfo);
3875 ret = kill_pid_info(sig, &kinfo, pid);
3883 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3885 struct task_struct *p;
3889 p = find_task_by_vpid(pid);
3890 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3891 error = check_kill_permission(sig, info, p);
3893 * The null signal is a permissions and process existence
3894 * probe. No signal is actually delivered.
3896 if (!error && sig) {
3897 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3899 * If lock_task_sighand() failed we pretend the task
3900 * dies after receiving the signal. The window is tiny,
3901 * and the signal is private anyway.
3903 if (unlikely(error == -ESRCH))
3912 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3914 struct kernel_siginfo info;
3916 clear_siginfo(&info);
3917 info.si_signo = sig;
3919 info.si_code = SI_TKILL;
3920 info.si_pid = task_tgid_vnr(current);
3921 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3923 return do_send_specific(tgid, pid, sig, &info);
3927 * sys_tgkill - send signal to one specific thread
3928 * @tgid: the thread group ID of the thread
3929 * @pid: the PID of the thread
3930 * @sig: signal to be sent
3932 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3933 * exists but it's not belonging to the target process anymore. This
3934 * method solves the problem of threads exiting and PIDs getting reused.
3936 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3938 /* This is only valid for single tasks */
3939 if (pid <= 0 || tgid <= 0)
3942 return do_tkill(tgid, pid, sig);
3946 * sys_tkill - send signal to one specific task
3947 * @pid: the PID of the task
3948 * @sig: signal to be sent
3950 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3952 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3954 /* This is only valid for single tasks */
3958 return do_tkill(0, pid, sig);
3961 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3963 /* Not even root can pretend to send signals from the kernel.
3964 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3966 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3967 (task_pid_vnr(current) != pid))
3970 /* POSIX.1b doesn't mention process groups. */
3971 return kill_proc_info(sig, info, pid);
3975 * sys_rt_sigqueueinfo - send signal information to a signal
3976 * @pid: the PID of the thread
3977 * @sig: signal to be sent
3978 * @uinfo: signal info to be sent
3980 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3981 siginfo_t __user *, uinfo)
3983 kernel_siginfo_t info;
3984 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3987 return do_rt_sigqueueinfo(pid, sig, &info);
3990 #ifdef CONFIG_COMPAT
3991 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3994 struct compat_siginfo __user *, uinfo)
3996 kernel_siginfo_t info;
3997 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4000 return do_rt_sigqueueinfo(pid, sig, &info);
4004 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4006 /* This is only valid for single tasks */
4007 if (pid <= 0 || tgid <= 0)
4010 /* Not even root can pretend to send signals from the kernel.
4011 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4013 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4014 (task_pid_vnr(current) != pid))
4017 return do_send_specific(tgid, pid, sig, info);
4020 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4021 siginfo_t __user *, uinfo)
4023 kernel_siginfo_t info;
4024 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4027 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4030 #ifdef CONFIG_COMPAT
4031 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4035 struct compat_siginfo __user *, uinfo)
4037 kernel_siginfo_t info;
4038 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4041 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4046 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4048 void kernel_sigaction(int sig, __sighandler_t action)
4050 spin_lock_irq(¤t->sighand->siglock);
4051 current->sighand->action[sig - 1].sa.sa_handler = action;
4052 if (action == SIG_IGN) {
4056 sigaddset(&mask, sig);
4058 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4059 flush_sigqueue_mask(&mask, ¤t->pending);
4060 recalc_sigpending();
4062 spin_unlock_irq(¤t->sighand->siglock);
4064 EXPORT_SYMBOL(kernel_sigaction);
4066 void __weak sigaction_compat_abi(struct k_sigaction *act,
4067 struct k_sigaction *oact)
4071 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4073 struct task_struct *p = current, *t;
4074 struct k_sigaction *k;
4077 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4080 k = &p->sighand->action[sig-1];
4082 spin_lock_irq(&p->sighand->siglock);
4087 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4088 * e.g. by having an architecture use the bit in their uapi.
4090 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4093 * Clear unknown flag bits in order to allow userspace to detect missing
4094 * support for flag bits and to allow the kernel to use non-uapi bits
4098 act->sa.sa_flags &= UAPI_SA_FLAGS;
4100 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4102 sigaction_compat_abi(act, oact);
4105 sigdelsetmask(&act->sa.sa_mask,
4106 sigmask(SIGKILL) | sigmask(SIGSTOP));
4110 * "Setting a signal action to SIG_IGN for a signal that is
4111 * pending shall cause the pending signal to be discarded,
4112 * whether or not it is blocked."
4114 * "Setting a signal action to SIG_DFL for a signal that is
4115 * pending and whose default action is to ignore the signal
4116 * (for example, SIGCHLD), shall cause the pending signal to
4117 * be discarded, whether or not it is blocked"
4119 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4121 sigaddset(&mask, sig);
4122 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4123 for_each_thread(p, t)
4124 flush_sigqueue_mask(&mask, &t->pending);
4128 spin_unlock_irq(&p->sighand->siglock);
4133 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4136 struct task_struct *t = current;
4139 memset(oss, 0, sizeof(stack_t));
4140 oss->ss_sp = (void __user *) t->sas_ss_sp;
4141 oss->ss_size = t->sas_ss_size;
4142 oss->ss_flags = sas_ss_flags(sp) |
4143 (current->sas_ss_flags & SS_FLAG_BITS);
4147 void __user *ss_sp = ss->ss_sp;
4148 size_t ss_size = ss->ss_size;
4149 unsigned ss_flags = ss->ss_flags;
4152 if (unlikely(on_sig_stack(sp)))
4155 ss_mode = ss_flags & ~SS_FLAG_BITS;
4156 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4160 if (ss_mode == SS_DISABLE) {
4164 if (unlikely(ss_size < min_ss_size))
4168 t->sas_ss_sp = (unsigned long) ss_sp;
4169 t->sas_ss_size = ss_size;
4170 t->sas_ss_flags = ss_flags;
4175 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4179 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4181 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4182 current_user_stack_pointer(),
4184 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4189 int restore_altstack(const stack_t __user *uss)
4192 if (copy_from_user(&new, uss, sizeof(stack_t)))
4194 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4196 /* squash all but EFAULT for now */
4200 int __save_altstack(stack_t __user *uss, unsigned long sp)
4202 struct task_struct *t = current;
4203 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4204 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4205 __put_user(t->sas_ss_size, &uss->ss_size);
4209 #ifdef CONFIG_COMPAT
4210 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4211 compat_stack_t __user *uoss_ptr)
4217 compat_stack_t uss32;
4218 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4220 uss.ss_sp = compat_ptr(uss32.ss_sp);
4221 uss.ss_flags = uss32.ss_flags;
4222 uss.ss_size = uss32.ss_size;
4224 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4225 compat_user_stack_pointer(),
4226 COMPAT_MINSIGSTKSZ);
4227 if (ret >= 0 && uoss_ptr) {
4229 memset(&old, 0, sizeof(old));
4230 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4231 old.ss_flags = uoss.ss_flags;
4232 old.ss_size = uoss.ss_size;
4233 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4239 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4240 const compat_stack_t __user *, uss_ptr,
4241 compat_stack_t __user *, uoss_ptr)
4243 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4246 int compat_restore_altstack(const compat_stack_t __user *uss)
4248 int err = do_compat_sigaltstack(uss, NULL);
4249 /* squash all but -EFAULT for now */
4250 return err == -EFAULT ? err : 0;
4253 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4256 struct task_struct *t = current;
4257 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4259 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4260 __put_user(t->sas_ss_size, &uss->ss_size);
4265 #ifdef __ARCH_WANT_SYS_SIGPENDING
4268 * sys_sigpending - examine pending signals
4269 * @uset: where mask of pending signal is returned
4271 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4275 if (sizeof(old_sigset_t) > sizeof(*uset))
4278 do_sigpending(&set);
4280 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4286 #ifdef CONFIG_COMPAT
4287 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4291 do_sigpending(&set);
4293 return put_user(set.sig[0], set32);
4299 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4301 * sys_sigprocmask - examine and change blocked signals
4302 * @how: whether to add, remove, or set signals
4303 * @nset: signals to add or remove (if non-null)
4304 * @oset: previous value of signal mask if non-null
4306 * Some platforms have their own version with special arguments;
4307 * others support only sys_rt_sigprocmask.
4310 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4311 old_sigset_t __user *, oset)
4313 old_sigset_t old_set, new_set;
4314 sigset_t new_blocked;
4316 old_set = current->blocked.sig[0];
4319 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4322 new_blocked = current->blocked;
4326 sigaddsetmask(&new_blocked, new_set);
4329 sigdelsetmask(&new_blocked, new_set);
4332 new_blocked.sig[0] = new_set;
4338 set_current_blocked(&new_blocked);
4342 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4348 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4350 #ifndef CONFIG_ODD_RT_SIGACTION
4352 * sys_rt_sigaction - alter an action taken by a process
4353 * @sig: signal to be sent
4354 * @act: new sigaction
4355 * @oact: used to save the previous sigaction
4356 * @sigsetsize: size of sigset_t type
4358 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4359 const struct sigaction __user *, act,
4360 struct sigaction __user *, oact,
4363 struct k_sigaction new_sa, old_sa;
4366 /* XXX: Don't preclude handling different sized sigset_t's. */
4367 if (sigsetsize != sizeof(sigset_t))
4370 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4373 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4377 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4382 #ifdef CONFIG_COMPAT
4383 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4384 const struct compat_sigaction __user *, act,
4385 struct compat_sigaction __user *, oact,
4386 compat_size_t, sigsetsize)
4388 struct k_sigaction new_ka, old_ka;
4389 #ifdef __ARCH_HAS_SA_RESTORER
4390 compat_uptr_t restorer;
4394 /* XXX: Don't preclude handling different sized sigset_t's. */
4395 if (sigsetsize != sizeof(compat_sigset_t))
4399 compat_uptr_t handler;
4400 ret = get_user(handler, &act->sa_handler);
4401 new_ka.sa.sa_handler = compat_ptr(handler);
4402 #ifdef __ARCH_HAS_SA_RESTORER
4403 ret |= get_user(restorer, &act->sa_restorer);
4404 new_ka.sa.sa_restorer = compat_ptr(restorer);
4406 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4407 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4412 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4414 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4416 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4417 sizeof(oact->sa_mask));
4418 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4419 #ifdef __ARCH_HAS_SA_RESTORER
4420 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4421 &oact->sa_restorer);
4427 #endif /* !CONFIG_ODD_RT_SIGACTION */
4429 #ifdef CONFIG_OLD_SIGACTION
4430 SYSCALL_DEFINE3(sigaction, int, sig,
4431 const struct old_sigaction __user *, act,
4432 struct old_sigaction __user *, oact)
4434 struct k_sigaction new_ka, old_ka;
4439 if (!access_ok(act, sizeof(*act)) ||
4440 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4441 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4442 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4443 __get_user(mask, &act->sa_mask))
4445 #ifdef __ARCH_HAS_KA_RESTORER
4446 new_ka.ka_restorer = NULL;
4448 siginitset(&new_ka.sa.sa_mask, mask);
4451 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4454 if (!access_ok(oact, sizeof(*oact)) ||
4455 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4456 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4457 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4458 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4465 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4466 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4467 const struct compat_old_sigaction __user *, act,
4468 struct compat_old_sigaction __user *, oact)
4470 struct k_sigaction new_ka, old_ka;
4472 compat_old_sigset_t mask;
4473 compat_uptr_t handler, restorer;
4476 if (!access_ok(act, sizeof(*act)) ||
4477 __get_user(handler, &act->sa_handler) ||
4478 __get_user(restorer, &act->sa_restorer) ||
4479 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4480 __get_user(mask, &act->sa_mask))
4483 #ifdef __ARCH_HAS_KA_RESTORER
4484 new_ka.ka_restorer = NULL;
4486 new_ka.sa.sa_handler = compat_ptr(handler);
4487 new_ka.sa.sa_restorer = compat_ptr(restorer);
4488 siginitset(&new_ka.sa.sa_mask, mask);
4491 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4494 if (!access_ok(oact, sizeof(*oact)) ||
4495 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4496 &oact->sa_handler) ||
4497 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4498 &oact->sa_restorer) ||
4499 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4500 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4507 #ifdef CONFIG_SGETMASK_SYSCALL
4510 * For backwards compatibility. Functionality superseded by sigprocmask.
4512 SYSCALL_DEFINE0(sgetmask)
4515 return current->blocked.sig[0];
4518 SYSCALL_DEFINE1(ssetmask, int, newmask)
4520 int old = current->blocked.sig[0];
4523 siginitset(&newset, newmask);
4524 set_current_blocked(&newset);
4528 #endif /* CONFIG_SGETMASK_SYSCALL */
4530 #ifdef __ARCH_WANT_SYS_SIGNAL
4532 * For backwards compatibility. Functionality superseded by sigaction.
4534 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4536 struct k_sigaction new_sa, old_sa;
4539 new_sa.sa.sa_handler = handler;
4540 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4541 sigemptyset(&new_sa.sa.sa_mask);
4543 ret = do_sigaction(sig, &new_sa, &old_sa);
4545 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4547 #endif /* __ARCH_WANT_SYS_SIGNAL */
4549 #ifdef __ARCH_WANT_SYS_PAUSE
4551 SYSCALL_DEFINE0(pause)
4553 while (!signal_pending(current)) {
4554 __set_current_state(TASK_INTERRUPTIBLE);
4557 return -ERESTARTNOHAND;
4562 static int sigsuspend(sigset_t *set)
4564 current->saved_sigmask = current->blocked;
4565 set_current_blocked(set);
4567 while (!signal_pending(current)) {
4568 __set_current_state(TASK_INTERRUPTIBLE);
4571 set_restore_sigmask();
4572 return -ERESTARTNOHAND;
4576 * sys_rt_sigsuspend - replace the signal mask for a value with the
4577 * @unewset value until a signal is received
4578 * @unewset: new signal mask value
4579 * @sigsetsize: size of sigset_t type
4581 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4585 /* XXX: Don't preclude handling different sized sigset_t's. */
4586 if (sigsetsize != sizeof(sigset_t))
4589 if (copy_from_user(&newset, unewset, sizeof(newset)))
4591 return sigsuspend(&newset);
4594 #ifdef CONFIG_COMPAT
4595 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4599 /* XXX: Don't preclude handling different sized sigset_t's. */
4600 if (sigsetsize != sizeof(sigset_t))
4603 if (get_compat_sigset(&newset, unewset))
4605 return sigsuspend(&newset);
4609 #ifdef CONFIG_OLD_SIGSUSPEND
4610 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4613 siginitset(&blocked, mask);
4614 return sigsuspend(&blocked);
4617 #ifdef CONFIG_OLD_SIGSUSPEND3
4618 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4621 siginitset(&blocked, mask);
4622 return sigsuspend(&blocked);
4626 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4631 static inline void siginfo_buildtime_checks(void)
4633 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4635 /* Verify the offsets in the two siginfos match */
4636 #define CHECK_OFFSET(field) \
4637 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4640 CHECK_OFFSET(si_pid);
4641 CHECK_OFFSET(si_uid);
4644 CHECK_OFFSET(si_tid);
4645 CHECK_OFFSET(si_overrun);
4646 CHECK_OFFSET(si_value);
4649 CHECK_OFFSET(si_pid);
4650 CHECK_OFFSET(si_uid);
4651 CHECK_OFFSET(si_value);
4654 CHECK_OFFSET(si_pid);
4655 CHECK_OFFSET(si_uid);
4656 CHECK_OFFSET(si_status);
4657 CHECK_OFFSET(si_utime);
4658 CHECK_OFFSET(si_stime);
4661 CHECK_OFFSET(si_addr);
4662 CHECK_OFFSET(si_trapno);
4663 CHECK_OFFSET(si_addr_lsb);
4664 CHECK_OFFSET(si_lower);
4665 CHECK_OFFSET(si_upper);
4666 CHECK_OFFSET(si_pkey);
4667 CHECK_OFFSET(si_perf_data);
4668 CHECK_OFFSET(si_perf_type);
4671 CHECK_OFFSET(si_band);
4672 CHECK_OFFSET(si_fd);
4675 CHECK_OFFSET(si_call_addr);
4676 CHECK_OFFSET(si_syscall);
4677 CHECK_OFFSET(si_arch);
4681 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4682 offsetof(struct siginfo, si_addr));
4683 if (sizeof(int) == sizeof(void __user *)) {
4684 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4685 sizeof(void __user *));
4687 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4688 sizeof_field(struct siginfo, si_uid)) !=
4689 sizeof(void __user *));
4690 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4691 offsetof(struct siginfo, si_uid));
4693 #ifdef CONFIG_COMPAT
4694 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4695 offsetof(struct compat_siginfo, si_addr));
4696 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4697 sizeof(compat_uptr_t));
4698 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4699 sizeof_field(struct siginfo, si_pid));
4703 void __init signals_init(void)
4705 siginfo_buildtime_checks();
4707 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4710 #ifdef CONFIG_KGDB_KDB
4711 #include <linux/kdb.h>
4713 * kdb_send_sig - Allows kdb to send signals without exposing
4714 * signal internals. This function checks if the required locks are
4715 * available before calling the main signal code, to avoid kdb
4718 void kdb_send_sig(struct task_struct *t, int sig)
4720 static struct task_struct *kdb_prev_t;
4722 if (!spin_trylock(&t->sighand->siglock)) {
4723 kdb_printf("Can't do kill command now.\n"
4724 "The sigmask lock is held somewhere else in "
4725 "kernel, try again later\n");
4728 new_t = kdb_prev_t != t;
4730 if (!task_is_running(t) && new_t) {
4731 spin_unlock(&t->sighand->siglock);
4732 kdb_printf("Process is not RUNNING, sending a signal from "
4733 "kdb risks deadlock\n"
4734 "on the run queue locks. "
4735 "The signal has _not_ been sent.\n"
4736 "Reissue the kill command if you want to risk "
4740 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4741 spin_unlock(&t->sighand->siglock);
4743 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4746 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4748 #endif /* CONFIG_KGDB_KDB */