1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 print_dropped_signal(sig);
439 if (unlikely(q == NULL)) {
440 if (atomic_dec_and_test(&user->sigpending))
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
451 static void __sigqueue_free(struct sigqueue *q)
453 if (q->flags & SIGQUEUE_PREALLOC)
455 if (atomic_dec_and_test(&q->user->sigpending))
457 kmem_cache_free(sigqueue_cachep, q);
460 void flush_sigqueue(struct sigpending *queue)
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
473 * Flush all pending signals for this kthread.
475 void flush_signals(struct task_struct *t)
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 EXPORT_SYMBOL(flush_signals);
487 #ifdef CONFIG_POSIX_TIMERS
488 static void __flush_itimer_signals(struct sigpending *pending)
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
493 signal = pending->signal;
494 sigemptyset(&retain);
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
508 sigorsets(&pending->signal, &signal, &retain);
511 void flush_itimer_signals(void)
513 struct task_struct *tsk = current;
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
523 void ignore_signals(struct task_struct *t)
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
534 * Flush all handlers for a task.
538 flush_signal_handlers(struct task_struct *t, int force_default)
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
546 #ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
549 sigemptyset(&ka->sa.sa_mask);
554 bool unhandled_signal(struct task_struct *tsk, int sig)
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
560 if (handler != SIG_IGN && handler != SIG_DFL)
563 /* if ptraced, let the tracer determine */
567 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
570 struct sigqueue *q, *first = NULL;
573 * Collect the siginfo appropriate to this signal. Check if
574 * there is another siginfo for the same signal.
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
584 sigdelset(&list->signal, sig);
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
596 __sigqueue_free(first);
599 * Ok, it wasn't in the queue. This must be
600 * a fast-pathed signal or we must have been
601 * out of queue space. So zero out the info.
604 info->si_signo = sig;
606 info->si_code = SI_USER;
612 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
615 int sig = next_signal(pending, mask);
618 collect_signal(sig, pending, info, resched_timer);
623 * Dequeue a signal and return the element to the caller, which is
624 * expected to free it.
626 * All callers have to hold the siglock.
628 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 bool resched_timer = false;
633 /* We only dequeue private signals from ourselves, we don't let
634 * signalfd steal them
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640 #ifdef CONFIG_POSIX_TIMERS
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
671 if (unlikely(sig_kernel_stop(signr))) {
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 #ifdef CONFIG_POSIX_TIMERS
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
704 EXPORT_SYMBOL_GPL(dequeue_signal);
706 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
713 * Might a synchronous signal be in the queue?
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
719 * Return the first synchronous signal in the queue.
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a positive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 * Check if there is another siginfo for the same signal.
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
739 sigdelset(&pending->signal, sync->info.si_signo);
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
749 * Tell a process that it has a new active signal..
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
759 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
763 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
764 * case. We don't check t->state here because there is a race with it
765 * executing another processor and just now entering stopped state.
766 * By using wake_up_state, we ensure the process will wake up and
767 * handle its death signal.
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
774 * Remove signals in mask from the pending set and queue.
775 * Returns 1 if any signals were found.
777 * All callers must be holding the siglock.
779 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 struct sigqueue *q, *n;
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
797 static inline int is_si_special(const struct kernel_siginfo *info)
799 return info <= SEND_SIG_PRIV;
802 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
809 * called with RCU read lock from check_kill_permission()
811 static bool kill_ok_by_cred(struct task_struct *t)
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
824 * Bad permissions for sending the signal
825 * - the caller must hold the RCU read lock
827 static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
833 if (!valid_signal(sig))
836 if (!si_fromuser(info))
839 error = audit_signal_info(sig, t); /* Let audit system see the signal */
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
847 sid = task_session(t);
849 * We don't return the error if sid == NULL. The
850 * task was unhashed, the caller must notice this.
852 if (!sid || sid == task_session(current))
860 return security_task_kill(t, info, sig, NULL);
864 * ptrace_trap_notify - schedule trap to notify ptracer
865 * @t: tracee wanting to notify tracer
867 * This function schedules sticky ptrace trap which is cleared on the next
868 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
871 * If @t is running, STOP trap will be taken. If trapped for STOP and
872 * ptracer is listening for events, tracee is woken up so that it can
873 * re-trap for the new event. If trapped otherwise, STOP trap will be
874 * eventually taken without returning to userland after the existing traps
875 * are finished by PTRACE_CONT.
878 * Must be called with @task->sighand->siglock held.
880 static void ptrace_trap_notify(struct task_struct *t)
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
890 * Handle magic process-wide effects of stop/continue signals. Unlike
891 * the signal actions, these happen immediately at signal-generation
892 * time regardless of blocking, ignoring, or handling. This does the
893 * actual continuing for SIGCONT, but not the actual stopping for stop
894 * signals. The process stop is done as a signal action for SIG_DFL.
896 * Returns true if the signal should be actually delivered, otherwise
897 * it should be dropped.
899 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
909 * The process is in the middle of dying, nothing to do.
911 } else if (sig_kernel_stop(sig)) {
913 * This is a stop signal. Remove SIGCONT from all queues.
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
922 * Remove all stop signals from all queues, wake all threads.
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
932 ptrace_trap_notify(t);
936 * Notify the parent with CLD_CONTINUED if we were stopped.
938 * If we were in the middle of a group stop, we pretend it
939 * was already finished, and then continued. Since SIGCHLD
940 * doesn't queue we report only CLD_STOPPED, as if the next
941 * CLD_CONTINUED was dropped.
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
951 * The first thread which returns from do_signal_stop()
952 * will take ->siglock, notice SIGNAL_CLD_MASK, and
953 * notify its parent. See get_signal().
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
961 return !sig_ignored(p, sig, force);
965 * Test if P wants to take SIG. After we've checked all threads with this,
966 * it's equivalent to finding no threads not blocking SIG. Any threads not
967 * blocking SIG were ruled out because they are not running and already
968 * have pending signals. Such threads will dequeue from the shared queue
969 * as soon as they're available, so putting the signal on the shared queue
970 * will be equivalent to sending it to one such thread.
972 static inline bool wants_signal(int sig, struct task_struct *p)
974 if (sigismember(&p->blocked, sig))
977 if (p->flags & PF_EXITING)
983 if (task_is_stopped_or_traced(p))
986 return task_curr(p) || !task_sigpending(p);
989 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
995 * Now find a thread we can wake up to take the signal off the queue.
997 * If the main thread wants the signal, it gets first crack.
998 * Probably the least surprising to the average bear.
1000 if (wants_signal(sig, p))
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 * There is just one thread and it does not need to be woken.
1005 * It will dequeue unblocked signals before it runs again.
1010 * Otherwise try to find a suitable thread.
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1015 if (t == signal->curr_target)
1017 * No thread needs to be woken.
1018 * Any eligible threads will see
1019 * the signal in the queue soon.
1023 signal->curr_target = t;
1027 * Found a killable thread. If the signal will be fatal,
1028 * then start taking the whole group down immediately.
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1035 * This signal will be fatal to the whole group.
1037 if (!sig_kernel_coredump(sig)) {
1039 * Start a group exit and wake everybody up.
1040 * This way we don't have other threads
1041 * running and doing things after a slower
1042 * thread has the fatal signal pending.
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1058 * The signal is already in the shared-pending queue.
1059 * Tell the chosen thread to wake up and dequeue it.
1061 signal_wake_up(t, sig == SIGKILL);
1065 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1070 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1073 struct sigpending *pending;
1075 int override_rlimit;
1076 int ret = 0, result;
1078 assert_spin_locked(&t->sighand->siglock);
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 * Short-circuit ignored signals and support queuing
1087 * exactly one non-rt signal, so that we can get more
1088 * detailed information about the cause of the signal.
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1094 result = TRACE_SIGNAL_DELIVERED;
1096 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 override_rlimit = 0;
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1118 list_add_tail(&q->list, &pending->list);
1119 switch ((unsigned long) info) {
1120 case (unsigned long) SEND_SIG_NOINFO:
1121 clear_siginfo(&q->info);
1122 q->info.si_signo = sig;
1123 q->info.si_errno = 0;
1124 q->info.si_code = SI_USER;
1125 q->info.si_pid = task_tgid_nr_ns(current,
1126 task_active_pid_ns(t));
1129 from_kuid_munged(task_cred_xxx(t, user_ns),
1133 case (unsigned long) SEND_SIG_PRIV:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_KERNEL;
1142 copy_siginfo(&q->info, info);
1145 } else if (!is_si_special(info) &&
1146 sig >= SIGRTMIN && info->si_code != SI_USER) {
1148 * Queue overflow, abort. We may abort if the
1149 * signal was rt and sent by user using something
1150 * other than kill().
1152 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1157 * This is a silent loss of information. We still
1158 * send the signal, but the *info bits are lost.
1160 result = TRACE_SIGNAL_LOSE_INFO;
1164 signalfd_notify(t, sig);
1165 sigaddset(&pending->signal, sig);
1167 /* Let multiprocess signals appear after on-going forks */
1168 if (type > PIDTYPE_TGID) {
1169 struct multiprocess_signals *delayed;
1170 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 sigset_t *signal = &delayed->signal;
1172 /* Can't queue both a stop and a continue signal */
1174 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 else if (sig_kernel_stop(sig))
1176 sigdelset(signal, SIGCONT);
1177 sigaddset(signal, sig);
1181 complete_signal(sig, t, type);
1183 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1187 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1190 switch (siginfo_layout(info->si_signo, info->si_code)) {
1199 case SIL_FAULT_TRAPNO:
1200 case SIL_FAULT_MCEERR:
1201 case SIL_FAULT_BNDERR:
1202 case SIL_FAULT_PKUERR:
1203 case SIL_PERF_EVENT:
1211 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1214 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1217 if (info == SEND_SIG_NOINFO) {
1218 /* Force if sent from an ancestor pid namespace */
1219 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1220 } else if (info == SEND_SIG_PRIV) {
1221 /* Don't ignore kernel generated signals */
1223 } else if (has_si_pid_and_uid(info)) {
1224 /* SIGKILL and SIGSTOP is special or has ids */
1225 struct user_namespace *t_user_ns;
1228 t_user_ns = task_cred_xxx(t, user_ns);
1229 if (current_user_ns() != t_user_ns) {
1230 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1231 info->si_uid = from_kuid_munged(t_user_ns, uid);
1235 /* A kernel generated signal? */
1236 force = (info->si_code == SI_KERNEL);
1238 /* From an ancestor pid namespace? */
1239 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1244 return __send_signal(sig, info, t, type, force);
1247 static void print_fatal_signal(int signr)
1249 struct pt_regs *regs = signal_pt_regs();
1250 pr_info("potentially unexpected fatal signal %d.\n", signr);
1252 #if defined(__i386__) && !defined(__arch_um__)
1253 pr_info("code at %08lx: ", regs->ip);
1256 for (i = 0; i < 16; i++) {
1259 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1261 pr_cont("%02x ", insn);
1271 static int __init setup_print_fatal_signals(char *str)
1273 get_option (&str, &print_fatal_signals);
1278 __setup("print-fatal-signals=", setup_print_fatal_signals);
1281 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1283 return send_signal(sig, info, p, PIDTYPE_TGID);
1286 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1289 unsigned long flags;
1292 if (lock_task_sighand(p, &flags)) {
1293 ret = send_signal(sig, info, p, type);
1294 unlock_task_sighand(p, &flags);
1301 * Force a signal that the process can't ignore: if necessary
1302 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1304 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1305 * since we do not want to have a signal handler that was blocked
1306 * be invoked when user space had explicitly blocked it.
1308 * We don't want to have recursive SIGSEGV's etc, for example,
1309 * that is why we also clear SIGNAL_UNKILLABLE.
1312 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1314 unsigned long int flags;
1315 int ret, blocked, ignored;
1316 struct k_sigaction *action;
1317 int sig = info->si_signo;
1319 spin_lock_irqsave(&t->sighand->siglock, flags);
1320 action = &t->sighand->action[sig-1];
1321 ignored = action->sa.sa_handler == SIG_IGN;
1322 blocked = sigismember(&t->blocked, sig);
1323 if (blocked || ignored) {
1324 action->sa.sa_handler = SIG_DFL;
1326 sigdelset(&t->blocked, sig);
1327 recalc_sigpending_and_wake(t);
1331 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1332 * debugging to leave init killable.
1334 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1335 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1336 ret = send_signal(sig, info, t, PIDTYPE_PID);
1337 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1342 int force_sig_info(struct kernel_siginfo *info)
1344 return force_sig_info_to_task(info, current);
1348 * Nuke all other threads in the group.
1350 int zap_other_threads(struct task_struct *p)
1352 struct task_struct *t = p;
1355 p->signal->group_stop_count = 0;
1357 while_each_thread(p, t) {
1358 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1361 /* Don't bother with already dead threads */
1364 sigaddset(&t->pending.signal, SIGKILL);
1365 signal_wake_up(t, 1);
1371 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1372 unsigned long *flags)
1374 struct sighand_struct *sighand;
1378 sighand = rcu_dereference(tsk->sighand);
1379 if (unlikely(sighand == NULL))
1383 * This sighand can be already freed and even reused, but
1384 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1385 * initializes ->siglock: this slab can't go away, it has
1386 * the same object type, ->siglock can't be reinitialized.
1388 * We need to ensure that tsk->sighand is still the same
1389 * after we take the lock, we can race with de_thread() or
1390 * __exit_signal(). In the latter case the next iteration
1391 * must see ->sighand == NULL.
1393 spin_lock_irqsave(&sighand->siglock, *flags);
1394 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1396 spin_unlock_irqrestore(&sighand->siglock, *flags);
1404 * send signal info to all the members of a group
1406 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1407 struct task_struct *p, enum pid_type type)
1412 ret = check_kill_permission(sig, info, p);
1416 ret = do_send_sig_info(sig, info, p, type);
1422 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1423 * control characters do (^C, ^Z etc)
1424 * - the caller must hold at least a readlock on tasklist_lock
1426 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1428 struct task_struct *p = NULL;
1429 int retval, success;
1433 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1434 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1437 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1438 return success ? 0 : retval;
1441 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1444 struct task_struct *p;
1448 p = pid_task(pid, PIDTYPE_PID);
1450 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1452 if (likely(!p || error != -ESRCH))
1456 * The task was unhashed in between, try again. If it
1457 * is dead, pid_task() will return NULL, if we race with
1458 * de_thread() it will find the new leader.
1463 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1467 error = kill_pid_info(sig, info, find_vpid(pid));
1472 static inline bool kill_as_cred_perm(const struct cred *cred,
1473 struct task_struct *target)
1475 const struct cred *pcred = __task_cred(target);
1477 return uid_eq(cred->euid, pcred->suid) ||
1478 uid_eq(cred->euid, pcred->uid) ||
1479 uid_eq(cred->uid, pcred->suid) ||
1480 uid_eq(cred->uid, pcred->uid);
1484 * The usb asyncio usage of siginfo is wrong. The glibc support
1485 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1486 * AKA after the generic fields:
1487 * kernel_pid_t si_pid;
1488 * kernel_uid32_t si_uid;
1489 * sigval_t si_value;
1491 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1492 * after the generic fields is:
1493 * void __user *si_addr;
1495 * This is a practical problem when there is a 64bit big endian kernel
1496 * and a 32bit userspace. As the 32bit address will encoded in the low
1497 * 32bits of the pointer. Those low 32bits will be stored at higher
1498 * address than appear in a 32 bit pointer. So userspace will not
1499 * see the address it was expecting for it's completions.
1501 * There is nothing in the encoding that can allow
1502 * copy_siginfo_to_user32 to detect this confusion of formats, so
1503 * handle this by requiring the caller of kill_pid_usb_asyncio to
1504 * notice when this situration takes place and to store the 32bit
1505 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1508 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1509 struct pid *pid, const struct cred *cred)
1511 struct kernel_siginfo info;
1512 struct task_struct *p;
1513 unsigned long flags;
1516 if (!valid_signal(sig))
1519 clear_siginfo(&info);
1520 info.si_signo = sig;
1521 info.si_errno = errno;
1522 info.si_code = SI_ASYNCIO;
1523 *((sigval_t *)&info.si_pid) = addr;
1526 p = pid_task(pid, PIDTYPE_PID);
1531 if (!kill_as_cred_perm(cred, p)) {
1535 ret = security_task_kill(p, &info, sig, cred);
1540 if (lock_task_sighand(p, &flags)) {
1541 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1542 unlock_task_sighand(p, &flags);
1550 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1553 * kill_something_info() interprets pid in interesting ways just like kill(2).
1555 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1556 * is probably wrong. Should make it like BSD or SYSV.
1559 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1564 return kill_proc_info(sig, info, pid);
1566 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1570 read_lock(&tasklist_lock);
1572 ret = __kill_pgrp_info(sig, info,
1573 pid ? find_vpid(-pid) : task_pgrp(current));
1575 int retval = 0, count = 0;
1576 struct task_struct * p;
1578 for_each_process(p) {
1579 if (task_pid_vnr(p) > 1 &&
1580 !same_thread_group(p, current)) {
1581 int err = group_send_sig_info(sig, info, p,
1588 ret = count ? retval : -ESRCH;
1590 read_unlock(&tasklist_lock);
1596 * These are for backward compatibility with the rest of the kernel source.
1599 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1602 * Make sure legacy kernel users don't send in bad values
1603 * (normal paths check this in check_kill_permission).
1605 if (!valid_signal(sig))
1608 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1610 EXPORT_SYMBOL(send_sig_info);
1612 #define __si_special(priv) \
1613 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1616 send_sig(int sig, struct task_struct *p, int priv)
1618 return send_sig_info(sig, __si_special(priv), p);
1620 EXPORT_SYMBOL(send_sig);
1622 void force_sig(int sig)
1624 struct kernel_siginfo info;
1626 clear_siginfo(&info);
1627 info.si_signo = sig;
1629 info.si_code = SI_KERNEL;
1632 force_sig_info(&info);
1634 EXPORT_SYMBOL(force_sig);
1637 * When things go south during signal handling, we
1638 * will force a SIGSEGV. And if the signal that caused
1639 * the problem was already a SIGSEGV, we'll want to
1640 * make sure we don't even try to deliver the signal..
1642 void force_sigsegv(int sig)
1644 struct task_struct *p = current;
1646 if (sig == SIGSEGV) {
1647 unsigned long flags;
1648 spin_lock_irqsave(&p->sighand->siglock, flags);
1649 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1650 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1655 int force_sig_fault_to_task(int sig, int code, void __user *addr
1656 ___ARCH_SI_TRAPNO(int trapno)
1657 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1658 , struct task_struct *t)
1660 struct kernel_siginfo info;
1662 clear_siginfo(&info);
1663 info.si_signo = sig;
1665 info.si_code = code;
1666 info.si_addr = addr;
1667 #ifdef __ARCH_SI_TRAPNO
1668 info.si_trapno = trapno;
1672 info.si_flags = flags;
1675 return force_sig_info_to_task(&info, t);
1678 int force_sig_fault(int sig, int code, void __user *addr
1679 ___ARCH_SI_TRAPNO(int trapno)
1680 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1682 return force_sig_fault_to_task(sig, code, addr
1683 ___ARCH_SI_TRAPNO(trapno)
1684 ___ARCH_SI_IA64(imm, flags, isr), current);
1687 int send_sig_fault(int sig, int code, void __user *addr
1688 ___ARCH_SI_TRAPNO(int trapno)
1689 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1690 , struct task_struct *t)
1692 struct kernel_siginfo info;
1694 clear_siginfo(&info);
1695 info.si_signo = sig;
1697 info.si_code = code;
1698 info.si_addr = addr;
1699 #ifdef __ARCH_SI_TRAPNO
1700 info.si_trapno = trapno;
1704 info.si_flags = flags;
1707 return send_sig_info(info.si_signo, &info, t);
1710 int force_sig_mceerr(int code, void __user *addr, short lsb)
1712 struct kernel_siginfo info;
1714 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1715 clear_siginfo(&info);
1716 info.si_signo = SIGBUS;
1718 info.si_code = code;
1719 info.si_addr = addr;
1720 info.si_addr_lsb = lsb;
1721 return force_sig_info(&info);
1724 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1726 struct kernel_siginfo info;
1728 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1729 clear_siginfo(&info);
1730 info.si_signo = SIGBUS;
1732 info.si_code = code;
1733 info.si_addr = addr;
1734 info.si_addr_lsb = lsb;
1735 return send_sig_info(info.si_signo, &info, t);
1737 EXPORT_SYMBOL(send_sig_mceerr);
1739 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1741 struct kernel_siginfo info;
1743 clear_siginfo(&info);
1744 info.si_signo = SIGSEGV;
1746 info.si_code = SEGV_BNDERR;
1747 info.si_addr = addr;
1748 info.si_lower = lower;
1749 info.si_upper = upper;
1750 return force_sig_info(&info);
1754 int force_sig_pkuerr(void __user *addr, u32 pkey)
1756 struct kernel_siginfo info;
1758 clear_siginfo(&info);
1759 info.si_signo = SIGSEGV;
1761 info.si_code = SEGV_PKUERR;
1762 info.si_addr = addr;
1763 info.si_pkey = pkey;
1764 return force_sig_info(&info);
1768 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1770 struct kernel_siginfo info;
1772 clear_siginfo(&info);
1773 info.si_signo = SIGTRAP;
1775 info.si_code = TRAP_PERF;
1776 info.si_addr = addr;
1777 info.si_perf_data = sig_data;
1778 info.si_perf_type = type;
1780 return force_sig_info(&info);
1783 /* For the crazy architectures that include trap information in
1784 * the errno field, instead of an actual errno value.
1786 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1788 struct kernel_siginfo info;
1790 clear_siginfo(&info);
1791 info.si_signo = SIGTRAP;
1792 info.si_errno = errno;
1793 info.si_code = TRAP_HWBKPT;
1794 info.si_addr = addr;
1795 return force_sig_info(&info);
1798 int kill_pgrp(struct pid *pid, int sig, int priv)
1802 read_lock(&tasklist_lock);
1803 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1804 read_unlock(&tasklist_lock);
1808 EXPORT_SYMBOL(kill_pgrp);
1810 int kill_pid(struct pid *pid, int sig, int priv)
1812 return kill_pid_info(sig, __si_special(priv), pid);
1814 EXPORT_SYMBOL(kill_pid);
1817 * These functions support sending signals using preallocated sigqueue
1818 * structures. This is needed "because realtime applications cannot
1819 * afford to lose notifications of asynchronous events, like timer
1820 * expirations or I/O completions". In the case of POSIX Timers
1821 * we allocate the sigqueue structure from the timer_create. If this
1822 * allocation fails we are able to report the failure to the application
1823 * with an EAGAIN error.
1825 struct sigqueue *sigqueue_alloc(void)
1827 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1830 void sigqueue_free(struct sigqueue *q)
1832 unsigned long flags;
1833 spinlock_t *lock = ¤t->sighand->siglock;
1835 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1837 * We must hold ->siglock while testing q->list
1838 * to serialize with collect_signal() or with
1839 * __exit_signal()->flush_sigqueue().
1841 spin_lock_irqsave(lock, flags);
1842 q->flags &= ~SIGQUEUE_PREALLOC;
1844 * If it is queued it will be freed when dequeued,
1845 * like the "regular" sigqueue.
1847 if (!list_empty(&q->list))
1849 spin_unlock_irqrestore(lock, flags);
1855 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1857 int sig = q->info.si_signo;
1858 struct sigpending *pending;
1859 struct task_struct *t;
1860 unsigned long flags;
1863 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1867 t = pid_task(pid, type);
1868 if (!t || !likely(lock_task_sighand(t, &flags)))
1871 ret = 1; /* the signal is ignored */
1872 result = TRACE_SIGNAL_IGNORED;
1873 if (!prepare_signal(sig, t, false))
1877 if (unlikely(!list_empty(&q->list))) {
1879 * If an SI_TIMER entry is already queue just increment
1880 * the overrun count.
1882 BUG_ON(q->info.si_code != SI_TIMER);
1883 q->info.si_overrun++;
1884 result = TRACE_SIGNAL_ALREADY_PENDING;
1887 q->info.si_overrun = 0;
1889 signalfd_notify(t, sig);
1890 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1891 list_add_tail(&q->list, &pending->list);
1892 sigaddset(&pending->signal, sig);
1893 complete_signal(sig, t, type);
1894 result = TRACE_SIGNAL_DELIVERED;
1896 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1897 unlock_task_sighand(t, &flags);
1903 static void do_notify_pidfd(struct task_struct *task)
1907 WARN_ON(task->exit_state == 0);
1908 pid = task_pid(task);
1909 wake_up_all(&pid->wait_pidfd);
1913 * Let a parent know about the death of a child.
1914 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1916 * Returns true if our parent ignored us and so we've switched to
1919 bool do_notify_parent(struct task_struct *tsk, int sig)
1921 struct kernel_siginfo info;
1922 unsigned long flags;
1923 struct sighand_struct *psig;
1924 bool autoreap = false;
1929 /* do_notify_parent_cldstop should have been called instead. */
1930 BUG_ON(task_is_stopped_or_traced(tsk));
1932 BUG_ON(!tsk->ptrace &&
1933 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1935 /* Wake up all pidfd waiters */
1936 do_notify_pidfd(tsk);
1938 if (sig != SIGCHLD) {
1940 * This is only possible if parent == real_parent.
1941 * Check if it has changed security domain.
1943 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1947 clear_siginfo(&info);
1948 info.si_signo = sig;
1951 * We are under tasklist_lock here so our parent is tied to
1952 * us and cannot change.
1954 * task_active_pid_ns will always return the same pid namespace
1955 * until a task passes through release_task.
1957 * write_lock() currently calls preempt_disable() which is the
1958 * same as rcu_read_lock(), but according to Oleg, this is not
1959 * correct to rely on this
1962 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1963 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1967 task_cputime(tsk, &utime, &stime);
1968 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1969 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1971 info.si_status = tsk->exit_code & 0x7f;
1972 if (tsk->exit_code & 0x80)
1973 info.si_code = CLD_DUMPED;
1974 else if (tsk->exit_code & 0x7f)
1975 info.si_code = CLD_KILLED;
1977 info.si_code = CLD_EXITED;
1978 info.si_status = tsk->exit_code >> 8;
1981 psig = tsk->parent->sighand;
1982 spin_lock_irqsave(&psig->siglock, flags);
1983 if (!tsk->ptrace && sig == SIGCHLD &&
1984 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1985 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1987 * We are exiting and our parent doesn't care. POSIX.1
1988 * defines special semantics for setting SIGCHLD to SIG_IGN
1989 * or setting the SA_NOCLDWAIT flag: we should be reaped
1990 * automatically and not left for our parent's wait4 call.
1991 * Rather than having the parent do it as a magic kind of
1992 * signal handler, we just set this to tell do_exit that we
1993 * can be cleaned up without becoming a zombie. Note that
1994 * we still call __wake_up_parent in this case, because a
1995 * blocked sys_wait4 might now return -ECHILD.
1997 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1998 * is implementation-defined: we do (if you don't want
1999 * it, just use SIG_IGN instead).
2002 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2006 * Send with __send_signal as si_pid and si_uid are in the
2007 * parent's namespaces.
2009 if (valid_signal(sig) && sig)
2010 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2011 __wake_up_parent(tsk, tsk->parent);
2012 spin_unlock_irqrestore(&psig->siglock, flags);
2018 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2019 * @tsk: task reporting the state change
2020 * @for_ptracer: the notification is for ptracer
2021 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2023 * Notify @tsk's parent that the stopped/continued state has changed. If
2024 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2025 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2028 * Must be called with tasklist_lock at least read locked.
2030 static void do_notify_parent_cldstop(struct task_struct *tsk,
2031 bool for_ptracer, int why)
2033 struct kernel_siginfo info;
2034 unsigned long flags;
2035 struct task_struct *parent;
2036 struct sighand_struct *sighand;
2040 parent = tsk->parent;
2042 tsk = tsk->group_leader;
2043 parent = tsk->real_parent;
2046 clear_siginfo(&info);
2047 info.si_signo = SIGCHLD;
2050 * see comment in do_notify_parent() about the following 4 lines
2053 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2054 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2057 task_cputime(tsk, &utime, &stime);
2058 info.si_utime = nsec_to_clock_t(utime);
2059 info.si_stime = nsec_to_clock_t(stime);
2064 info.si_status = SIGCONT;
2067 info.si_status = tsk->signal->group_exit_code & 0x7f;
2070 info.si_status = tsk->exit_code & 0x7f;
2076 sighand = parent->sighand;
2077 spin_lock_irqsave(&sighand->siglock, flags);
2078 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2079 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2080 __group_send_sig_info(SIGCHLD, &info, parent);
2082 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2084 __wake_up_parent(tsk, parent);
2085 spin_unlock_irqrestore(&sighand->siglock, flags);
2088 static inline bool may_ptrace_stop(void)
2090 if (!likely(current->ptrace))
2093 * Are we in the middle of do_coredump?
2094 * If so and our tracer is also part of the coredump stopping
2095 * is a deadlock situation, and pointless because our tracer
2096 * is dead so don't allow us to stop.
2097 * If SIGKILL was already sent before the caller unlocked
2098 * ->siglock we must see ->core_state != NULL. Otherwise it
2099 * is safe to enter schedule().
2101 * This is almost outdated, a task with the pending SIGKILL can't
2102 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2103 * after SIGKILL was already dequeued.
2105 if (unlikely(current->mm->core_state) &&
2106 unlikely(current->mm == current->parent->mm))
2113 * Return non-zero if there is a SIGKILL that should be waking us up.
2114 * Called with the siglock held.
2116 static bool sigkill_pending(struct task_struct *tsk)
2118 return sigismember(&tsk->pending.signal, SIGKILL) ||
2119 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2123 * This must be called with current->sighand->siglock held.
2125 * This should be the path for all ptrace stops.
2126 * We always set current->last_siginfo while stopped here.
2127 * That makes it a way to test a stopped process for
2128 * being ptrace-stopped vs being job-control-stopped.
2130 * If we actually decide not to stop at all because the tracer
2131 * is gone, we keep current->exit_code unless clear_code.
2133 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2134 __releases(¤t->sighand->siglock)
2135 __acquires(¤t->sighand->siglock)
2137 bool gstop_done = false;
2139 if (arch_ptrace_stop_needed(exit_code, info)) {
2141 * The arch code has something special to do before a
2142 * ptrace stop. This is allowed to block, e.g. for faults
2143 * on user stack pages. We can't keep the siglock while
2144 * calling arch_ptrace_stop, so we must release it now.
2145 * To preserve proper semantics, we must do this before
2146 * any signal bookkeeping like checking group_stop_count.
2147 * Meanwhile, a SIGKILL could come in before we retake the
2148 * siglock. That must prevent us from sleeping in TASK_TRACED.
2149 * So after regaining the lock, we must check for SIGKILL.
2151 spin_unlock_irq(¤t->sighand->siglock);
2152 arch_ptrace_stop(exit_code, info);
2153 spin_lock_irq(¤t->sighand->siglock);
2154 if (sigkill_pending(current))
2158 set_special_state(TASK_TRACED);
2161 * We're committing to trapping. TRACED should be visible before
2162 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2163 * Also, transition to TRACED and updates to ->jobctl should be
2164 * atomic with respect to siglock and should be done after the arch
2165 * hook as siglock is released and regrabbed across it.
2170 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2172 * set_current_state() smp_wmb();
2174 * wait_task_stopped()
2175 * task_stopped_code()
2176 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2180 current->last_siginfo = info;
2181 current->exit_code = exit_code;
2184 * If @why is CLD_STOPPED, we're trapping to participate in a group
2185 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2186 * across siglock relocks since INTERRUPT was scheduled, PENDING
2187 * could be clear now. We act as if SIGCONT is received after
2188 * TASK_TRACED is entered - ignore it.
2190 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2191 gstop_done = task_participate_group_stop(current);
2193 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2194 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2195 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2196 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2198 /* entering a trap, clear TRAPPING */
2199 task_clear_jobctl_trapping(current);
2201 spin_unlock_irq(¤t->sighand->siglock);
2202 read_lock(&tasklist_lock);
2203 if (may_ptrace_stop()) {
2205 * Notify parents of the stop.
2207 * While ptraced, there are two parents - the ptracer and
2208 * the real_parent of the group_leader. The ptracer should
2209 * know about every stop while the real parent is only
2210 * interested in the completion of group stop. The states
2211 * for the two don't interact with each other. Notify
2212 * separately unless they're gonna be duplicates.
2214 do_notify_parent_cldstop(current, true, why);
2215 if (gstop_done && ptrace_reparented(current))
2216 do_notify_parent_cldstop(current, false, why);
2219 * Don't want to allow preemption here, because
2220 * sys_ptrace() needs this task to be inactive.
2222 * XXX: implement read_unlock_no_resched().
2225 read_unlock(&tasklist_lock);
2226 cgroup_enter_frozen();
2227 preempt_enable_no_resched();
2228 freezable_schedule();
2229 cgroup_leave_frozen(true);
2232 * By the time we got the lock, our tracer went away.
2233 * Don't drop the lock yet, another tracer may come.
2235 * If @gstop_done, the ptracer went away between group stop
2236 * completion and here. During detach, it would have set
2237 * JOBCTL_STOP_PENDING on us and we'll re-enter
2238 * TASK_STOPPED in do_signal_stop() on return, so notifying
2239 * the real parent of the group stop completion is enough.
2242 do_notify_parent_cldstop(current, false, why);
2244 /* tasklist protects us from ptrace_freeze_traced() */
2245 __set_current_state(TASK_RUNNING);
2247 current->exit_code = 0;
2248 read_unlock(&tasklist_lock);
2252 * We are back. Now reacquire the siglock before touching
2253 * last_siginfo, so that we are sure to have synchronized with
2254 * any signal-sending on another CPU that wants to examine it.
2256 spin_lock_irq(¤t->sighand->siglock);
2257 current->last_siginfo = NULL;
2259 /* LISTENING can be set only during STOP traps, clear it */
2260 current->jobctl &= ~JOBCTL_LISTENING;
2263 * Queued signals ignored us while we were stopped for tracing.
2264 * So check for any that we should take before resuming user mode.
2265 * This sets TIF_SIGPENDING, but never clears it.
2267 recalc_sigpending_tsk(current);
2270 static void ptrace_do_notify(int signr, int exit_code, int why)
2272 kernel_siginfo_t info;
2274 clear_siginfo(&info);
2275 info.si_signo = signr;
2276 info.si_code = exit_code;
2277 info.si_pid = task_pid_vnr(current);
2278 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2280 /* Let the debugger run. */
2281 ptrace_stop(exit_code, why, 1, &info);
2284 void ptrace_notify(int exit_code)
2286 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2287 if (unlikely(current->task_works))
2290 spin_lock_irq(¤t->sighand->siglock);
2291 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2292 spin_unlock_irq(¤t->sighand->siglock);
2296 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2297 * @signr: signr causing group stop if initiating
2299 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2300 * and participate in it. If already set, participate in the existing
2301 * group stop. If participated in a group stop (and thus slept), %true is
2302 * returned with siglock released.
2304 * If ptraced, this function doesn't handle stop itself. Instead,
2305 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2306 * untouched. The caller must ensure that INTERRUPT trap handling takes
2307 * places afterwards.
2310 * Must be called with @current->sighand->siglock held, which is released
2314 * %false if group stop is already cancelled or ptrace trap is scheduled.
2315 * %true if participated in group stop.
2317 static bool do_signal_stop(int signr)
2318 __releases(¤t->sighand->siglock)
2320 struct signal_struct *sig = current->signal;
2322 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2323 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2324 struct task_struct *t;
2326 /* signr will be recorded in task->jobctl for retries */
2327 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2329 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2330 unlikely(signal_group_exit(sig)))
2333 * There is no group stop already in progress. We must
2336 * While ptraced, a task may be resumed while group stop is
2337 * still in effect and then receive a stop signal and
2338 * initiate another group stop. This deviates from the
2339 * usual behavior as two consecutive stop signals can't
2340 * cause two group stops when !ptraced. That is why we
2341 * also check !task_is_stopped(t) below.
2343 * The condition can be distinguished by testing whether
2344 * SIGNAL_STOP_STOPPED is already set. Don't generate
2345 * group_exit_code in such case.
2347 * This is not necessary for SIGNAL_STOP_CONTINUED because
2348 * an intervening stop signal is required to cause two
2349 * continued events regardless of ptrace.
2351 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2352 sig->group_exit_code = signr;
2354 sig->group_stop_count = 0;
2356 if (task_set_jobctl_pending(current, signr | gstop))
2357 sig->group_stop_count++;
2360 while_each_thread(current, t) {
2362 * Setting state to TASK_STOPPED for a group
2363 * stop is always done with the siglock held,
2364 * so this check has no races.
2366 if (!task_is_stopped(t) &&
2367 task_set_jobctl_pending(t, signr | gstop)) {
2368 sig->group_stop_count++;
2369 if (likely(!(t->ptrace & PT_SEIZED)))
2370 signal_wake_up(t, 0);
2372 ptrace_trap_notify(t);
2377 if (likely(!current->ptrace)) {
2381 * If there are no other threads in the group, or if there
2382 * is a group stop in progress and we are the last to stop,
2383 * report to the parent.
2385 if (task_participate_group_stop(current))
2386 notify = CLD_STOPPED;
2388 set_special_state(TASK_STOPPED);
2389 spin_unlock_irq(¤t->sighand->siglock);
2392 * Notify the parent of the group stop completion. Because
2393 * we're not holding either the siglock or tasklist_lock
2394 * here, ptracer may attach inbetween; however, this is for
2395 * group stop and should always be delivered to the real
2396 * parent of the group leader. The new ptracer will get
2397 * its notification when this task transitions into
2401 read_lock(&tasklist_lock);
2402 do_notify_parent_cldstop(current, false, notify);
2403 read_unlock(&tasklist_lock);
2406 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2407 cgroup_enter_frozen();
2408 freezable_schedule();
2412 * While ptraced, group stop is handled by STOP trap.
2413 * Schedule it and let the caller deal with it.
2415 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2421 * do_jobctl_trap - take care of ptrace jobctl traps
2423 * When PT_SEIZED, it's used for both group stop and explicit
2424 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2425 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2426 * the stop signal; otherwise, %SIGTRAP.
2428 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2429 * number as exit_code and no siginfo.
2432 * Must be called with @current->sighand->siglock held, which may be
2433 * released and re-acquired before returning with intervening sleep.
2435 static void do_jobctl_trap(void)
2437 struct signal_struct *signal = current->signal;
2438 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2440 if (current->ptrace & PT_SEIZED) {
2441 if (!signal->group_stop_count &&
2442 !(signal->flags & SIGNAL_STOP_STOPPED))
2444 WARN_ON_ONCE(!signr);
2445 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2448 WARN_ON_ONCE(!signr);
2449 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2450 current->exit_code = 0;
2455 * do_freezer_trap - handle the freezer jobctl trap
2457 * Puts the task into frozen state, if only the task is not about to quit.
2458 * In this case it drops JOBCTL_TRAP_FREEZE.
2461 * Must be called with @current->sighand->siglock held,
2462 * which is always released before returning.
2464 static void do_freezer_trap(void)
2465 __releases(¤t->sighand->siglock)
2468 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2469 * let's make another loop to give it a chance to be handled.
2470 * In any case, we'll return back.
2472 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2473 JOBCTL_TRAP_FREEZE) {
2474 spin_unlock_irq(¤t->sighand->siglock);
2479 * Now we're sure that there is no pending fatal signal and no
2480 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2481 * immediately (if there is a non-fatal signal pending), and
2482 * put the task into sleep.
2484 __set_current_state(TASK_INTERRUPTIBLE);
2485 clear_thread_flag(TIF_SIGPENDING);
2486 spin_unlock_irq(¤t->sighand->siglock);
2487 cgroup_enter_frozen();
2488 freezable_schedule();
2491 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2494 * We do not check sig_kernel_stop(signr) but set this marker
2495 * unconditionally because we do not know whether debugger will
2496 * change signr. This flag has no meaning unless we are going
2497 * to stop after return from ptrace_stop(). In this case it will
2498 * be checked in do_signal_stop(), we should only stop if it was
2499 * not cleared by SIGCONT while we were sleeping. See also the
2500 * comment in dequeue_signal().
2502 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2503 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2505 /* We're back. Did the debugger cancel the sig? */
2506 signr = current->exit_code;
2510 current->exit_code = 0;
2513 * Update the siginfo structure if the signal has
2514 * changed. If the debugger wanted something
2515 * specific in the siginfo structure then it should
2516 * have updated *info via PTRACE_SETSIGINFO.
2518 if (signr != info->si_signo) {
2519 clear_siginfo(info);
2520 info->si_signo = signr;
2522 info->si_code = SI_USER;
2524 info->si_pid = task_pid_vnr(current->parent);
2525 info->si_uid = from_kuid_munged(current_user_ns(),
2526 task_uid(current->parent));
2530 /* If the (new) signal is now blocked, requeue it. */
2531 if (sigismember(¤t->blocked, signr)) {
2532 send_signal(signr, info, current, PIDTYPE_PID);
2539 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2541 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2543 case SIL_FAULT_TRAPNO:
2544 case SIL_FAULT_MCEERR:
2545 case SIL_FAULT_BNDERR:
2546 case SIL_FAULT_PKUERR:
2547 case SIL_PERF_EVENT:
2548 ksig->info.si_addr = arch_untagged_si_addr(
2549 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2561 bool get_signal(struct ksignal *ksig)
2563 struct sighand_struct *sighand = current->sighand;
2564 struct signal_struct *signal = current->signal;
2567 if (unlikely(current->task_works))
2571 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2572 * that the arch handlers don't all have to do it. If we get here
2573 * without TIF_SIGPENDING, just exit after running signal work.
2575 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2576 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2577 tracehook_notify_signal();
2578 if (!task_sigpending(current))
2582 if (unlikely(uprobe_deny_signal()))
2586 * Do this once, we can't return to user-mode if freezing() == T.
2587 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2588 * thus do not need another check after return.
2593 spin_lock_irq(&sighand->siglock);
2596 * Every stopped thread goes here after wakeup. Check to see if
2597 * we should notify the parent, prepare_signal(SIGCONT) encodes
2598 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2600 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2603 if (signal->flags & SIGNAL_CLD_CONTINUED)
2604 why = CLD_CONTINUED;
2608 signal->flags &= ~SIGNAL_CLD_MASK;
2610 spin_unlock_irq(&sighand->siglock);
2613 * Notify the parent that we're continuing. This event is
2614 * always per-process and doesn't make whole lot of sense
2615 * for ptracers, who shouldn't consume the state via
2616 * wait(2) either, but, for backward compatibility, notify
2617 * the ptracer of the group leader too unless it's gonna be
2620 read_lock(&tasklist_lock);
2621 do_notify_parent_cldstop(current, false, why);
2623 if (ptrace_reparented(current->group_leader))
2624 do_notify_parent_cldstop(current->group_leader,
2626 read_unlock(&tasklist_lock);
2631 /* Has this task already been marked for death? */
2632 if (signal_group_exit(signal)) {
2633 ksig->info.si_signo = signr = SIGKILL;
2634 sigdelset(¤t->pending.signal, SIGKILL);
2635 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2636 &sighand->action[SIGKILL - 1]);
2637 recalc_sigpending();
2642 struct k_sigaction *ka;
2644 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2648 if (unlikely(current->jobctl &
2649 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2650 if (current->jobctl & JOBCTL_TRAP_MASK) {
2652 spin_unlock_irq(&sighand->siglock);
2653 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2660 * If the task is leaving the frozen state, let's update
2661 * cgroup counters and reset the frozen bit.
2663 if (unlikely(cgroup_task_frozen(current))) {
2664 spin_unlock_irq(&sighand->siglock);
2665 cgroup_leave_frozen(false);
2670 * Signals generated by the execution of an instruction
2671 * need to be delivered before any other pending signals
2672 * so that the instruction pointer in the signal stack
2673 * frame points to the faulting instruction.
2675 signr = dequeue_synchronous_signal(&ksig->info);
2677 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2680 break; /* will return 0 */
2682 if (unlikely(current->ptrace) && signr != SIGKILL) {
2683 signr = ptrace_signal(signr, &ksig->info);
2688 ka = &sighand->action[signr-1];
2690 /* Trace actually delivered signals. */
2691 trace_signal_deliver(signr, &ksig->info, ka);
2693 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2695 if (ka->sa.sa_handler != SIG_DFL) {
2696 /* Run the handler. */
2699 if (ka->sa.sa_flags & SA_ONESHOT)
2700 ka->sa.sa_handler = SIG_DFL;
2702 break; /* will return non-zero "signr" value */
2706 * Now we are doing the default action for this signal.
2708 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2712 * Global init gets no signals it doesn't want.
2713 * Container-init gets no signals it doesn't want from same
2716 * Note that if global/container-init sees a sig_kernel_only()
2717 * signal here, the signal must have been generated internally
2718 * or must have come from an ancestor namespace. In either
2719 * case, the signal cannot be dropped.
2721 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2722 !sig_kernel_only(signr))
2725 if (sig_kernel_stop(signr)) {
2727 * The default action is to stop all threads in
2728 * the thread group. The job control signals
2729 * do nothing in an orphaned pgrp, but SIGSTOP
2730 * always works. Note that siglock needs to be
2731 * dropped during the call to is_orphaned_pgrp()
2732 * because of lock ordering with tasklist_lock.
2733 * This allows an intervening SIGCONT to be posted.
2734 * We need to check for that and bail out if necessary.
2736 if (signr != SIGSTOP) {
2737 spin_unlock_irq(&sighand->siglock);
2739 /* signals can be posted during this window */
2741 if (is_current_pgrp_orphaned())
2744 spin_lock_irq(&sighand->siglock);
2747 if (likely(do_signal_stop(ksig->info.si_signo))) {
2748 /* It released the siglock. */
2753 * We didn't actually stop, due to a race
2754 * with SIGCONT or something like that.
2760 spin_unlock_irq(&sighand->siglock);
2761 if (unlikely(cgroup_task_frozen(current)))
2762 cgroup_leave_frozen(true);
2765 * Anything else is fatal, maybe with a core dump.
2767 current->flags |= PF_SIGNALED;
2769 if (sig_kernel_coredump(signr)) {
2770 if (print_fatal_signals)
2771 print_fatal_signal(ksig->info.si_signo);
2772 proc_coredump_connector(current);
2774 * If it was able to dump core, this kills all
2775 * other threads in the group and synchronizes with
2776 * their demise. If we lost the race with another
2777 * thread getting here, it set group_exit_code
2778 * first and our do_group_exit call below will use
2779 * that value and ignore the one we pass it.
2781 do_coredump(&ksig->info);
2785 * PF_IO_WORKER threads will catch and exit on fatal signals
2786 * themselves. They have cleanup that must be performed, so
2787 * we cannot call do_exit() on their behalf.
2789 if (current->flags & PF_IO_WORKER)
2793 * Death signals, no core dump.
2795 do_group_exit(ksig->info.si_signo);
2798 spin_unlock_irq(&sighand->siglock);
2802 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2803 hide_si_addr_tag_bits(ksig);
2805 return ksig->sig > 0;
2809 * signal_delivered -
2810 * @ksig: kernel signal struct
2811 * @stepping: nonzero if debugger single-step or block-step in use
2813 * This function should be called when a signal has successfully been
2814 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2815 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2816 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2818 static void signal_delivered(struct ksignal *ksig, int stepping)
2822 /* A signal was successfully delivered, and the
2823 saved sigmask was stored on the signal frame,
2824 and will be restored by sigreturn. So we can
2825 simply clear the restore sigmask flag. */
2826 clear_restore_sigmask();
2828 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2829 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2830 sigaddset(&blocked, ksig->sig);
2831 set_current_blocked(&blocked);
2832 tracehook_signal_handler(stepping);
2835 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2838 force_sigsegv(ksig->sig);
2840 signal_delivered(ksig, stepping);
2844 * It could be that complete_signal() picked us to notify about the
2845 * group-wide signal. Other threads should be notified now to take
2846 * the shared signals in @which since we will not.
2848 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2851 struct task_struct *t;
2853 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2854 if (sigisemptyset(&retarget))
2858 while_each_thread(tsk, t) {
2859 if (t->flags & PF_EXITING)
2862 if (!has_pending_signals(&retarget, &t->blocked))
2864 /* Remove the signals this thread can handle. */
2865 sigandsets(&retarget, &retarget, &t->blocked);
2867 if (!task_sigpending(t))
2868 signal_wake_up(t, 0);
2870 if (sigisemptyset(&retarget))
2875 void exit_signals(struct task_struct *tsk)
2881 * @tsk is about to have PF_EXITING set - lock out users which
2882 * expect stable threadgroup.
2884 cgroup_threadgroup_change_begin(tsk);
2886 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2887 tsk->flags |= PF_EXITING;
2888 cgroup_threadgroup_change_end(tsk);
2892 spin_lock_irq(&tsk->sighand->siglock);
2894 * From now this task is not visible for group-wide signals,
2895 * see wants_signal(), do_signal_stop().
2897 tsk->flags |= PF_EXITING;
2899 cgroup_threadgroup_change_end(tsk);
2901 if (!task_sigpending(tsk))
2904 unblocked = tsk->blocked;
2905 signotset(&unblocked);
2906 retarget_shared_pending(tsk, &unblocked);
2908 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2909 task_participate_group_stop(tsk))
2910 group_stop = CLD_STOPPED;
2912 spin_unlock_irq(&tsk->sighand->siglock);
2915 * If group stop has completed, deliver the notification. This
2916 * should always go to the real parent of the group leader.
2918 if (unlikely(group_stop)) {
2919 read_lock(&tasklist_lock);
2920 do_notify_parent_cldstop(tsk, false, group_stop);
2921 read_unlock(&tasklist_lock);
2926 * System call entry points.
2930 * sys_restart_syscall - restart a system call
2932 SYSCALL_DEFINE0(restart_syscall)
2934 struct restart_block *restart = ¤t->restart_block;
2935 return restart->fn(restart);
2938 long do_no_restart_syscall(struct restart_block *param)
2943 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2945 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2946 sigset_t newblocked;
2947 /* A set of now blocked but previously unblocked signals. */
2948 sigandnsets(&newblocked, newset, ¤t->blocked);
2949 retarget_shared_pending(tsk, &newblocked);
2951 tsk->blocked = *newset;
2952 recalc_sigpending();
2956 * set_current_blocked - change current->blocked mask
2959 * It is wrong to change ->blocked directly, this helper should be used
2960 * to ensure the process can't miss a shared signal we are going to block.
2962 void set_current_blocked(sigset_t *newset)
2964 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2965 __set_current_blocked(newset);
2968 void __set_current_blocked(const sigset_t *newset)
2970 struct task_struct *tsk = current;
2973 * In case the signal mask hasn't changed, there is nothing we need
2974 * to do. The current->blocked shouldn't be modified by other task.
2976 if (sigequalsets(&tsk->blocked, newset))
2979 spin_lock_irq(&tsk->sighand->siglock);
2980 __set_task_blocked(tsk, newset);
2981 spin_unlock_irq(&tsk->sighand->siglock);
2985 * This is also useful for kernel threads that want to temporarily
2986 * (or permanently) block certain signals.
2988 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2989 * interface happily blocks "unblockable" signals like SIGKILL
2992 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2994 struct task_struct *tsk = current;
2997 /* Lockless, only current can change ->blocked, never from irq */
2999 *oldset = tsk->blocked;
3003 sigorsets(&newset, &tsk->blocked, set);
3006 sigandnsets(&newset, &tsk->blocked, set);
3015 __set_current_blocked(&newset);
3018 EXPORT_SYMBOL(sigprocmask);
3021 * The api helps set app-provided sigmasks.
3023 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3024 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3026 * Note that it does set_restore_sigmask() in advance, so it must be always
3027 * paired with restore_saved_sigmask_unless() before return from syscall.
3029 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3035 if (sigsetsize != sizeof(sigset_t))
3037 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3040 set_restore_sigmask();
3041 current->saved_sigmask = current->blocked;
3042 set_current_blocked(&kmask);
3047 #ifdef CONFIG_COMPAT
3048 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3055 if (sigsetsize != sizeof(compat_sigset_t))
3057 if (get_compat_sigset(&kmask, umask))
3060 set_restore_sigmask();
3061 current->saved_sigmask = current->blocked;
3062 set_current_blocked(&kmask);
3069 * sys_rt_sigprocmask - change the list of currently blocked signals
3070 * @how: whether to add, remove, or set signals
3071 * @nset: stores pending signals
3072 * @oset: previous value of signal mask if non-null
3073 * @sigsetsize: size of sigset_t type
3075 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3076 sigset_t __user *, oset, size_t, sigsetsize)
3078 sigset_t old_set, new_set;
3081 /* XXX: Don't preclude handling different sized sigset_t's. */
3082 if (sigsetsize != sizeof(sigset_t))
3085 old_set = current->blocked;
3088 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3090 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3092 error = sigprocmask(how, &new_set, NULL);
3098 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3105 #ifdef CONFIG_COMPAT
3106 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3107 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3109 sigset_t old_set = current->blocked;
3111 /* XXX: Don't preclude handling different sized sigset_t's. */
3112 if (sigsetsize != sizeof(sigset_t))
3118 if (get_compat_sigset(&new_set, nset))
3120 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3122 error = sigprocmask(how, &new_set, NULL);
3126 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3130 static void do_sigpending(sigset_t *set)
3132 spin_lock_irq(¤t->sighand->siglock);
3133 sigorsets(set, ¤t->pending.signal,
3134 ¤t->signal->shared_pending.signal);
3135 spin_unlock_irq(¤t->sighand->siglock);
3137 /* Outside the lock because only this thread touches it. */
3138 sigandsets(set, ¤t->blocked, set);
3142 * sys_rt_sigpending - examine a pending signal that has been raised
3144 * @uset: stores pending signals
3145 * @sigsetsize: size of sigset_t type or larger
3147 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3151 if (sigsetsize > sizeof(*uset))
3154 do_sigpending(&set);
3156 if (copy_to_user(uset, &set, sigsetsize))
3162 #ifdef CONFIG_COMPAT
3163 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3164 compat_size_t, sigsetsize)
3168 if (sigsetsize > sizeof(*uset))
3171 do_sigpending(&set);
3173 return put_compat_sigset(uset, &set, sigsetsize);
3177 static const struct {
3178 unsigned char limit, layout;
3180 [SIGILL] = { NSIGILL, SIL_FAULT },
3181 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3182 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3183 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3184 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3186 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3188 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3189 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3190 [SIGSYS] = { NSIGSYS, SIL_SYS },
3193 static bool known_siginfo_layout(unsigned sig, int si_code)
3195 if (si_code == SI_KERNEL)
3197 else if ((si_code > SI_USER)) {
3198 if (sig_specific_sicodes(sig)) {
3199 if (si_code <= sig_sicodes[sig].limit)
3202 else if (si_code <= NSIGPOLL)
3205 else if (si_code >= SI_DETHREAD)
3207 else if (si_code == SI_ASYNCNL)
3212 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3214 enum siginfo_layout layout = SIL_KILL;
3215 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3216 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3217 (si_code <= sig_sicodes[sig].limit)) {
3218 layout = sig_sicodes[sig].layout;
3219 /* Handle the exceptions */
3220 if ((sig == SIGBUS) &&
3221 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3222 layout = SIL_FAULT_MCEERR;
3223 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3224 layout = SIL_FAULT_BNDERR;
3226 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3227 layout = SIL_FAULT_PKUERR;
3229 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3230 layout = SIL_PERF_EVENT;
3231 #ifdef __ARCH_SI_TRAPNO
3232 else if (layout == SIL_FAULT)
3233 layout = SIL_FAULT_TRAPNO;
3236 else if (si_code <= NSIGPOLL)
3239 if (si_code == SI_TIMER)
3241 else if (si_code == SI_SIGIO)
3243 else if (si_code < 0)
3249 static inline char __user *si_expansion(const siginfo_t __user *info)
3251 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3254 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3256 char __user *expansion = si_expansion(to);
3257 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3259 if (clear_user(expansion, SI_EXPANSION_SIZE))
3264 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3265 const siginfo_t __user *from)
3267 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3268 char __user *expansion = si_expansion(from);
3269 char buf[SI_EXPANSION_SIZE];
3272 * An unknown si_code might need more than
3273 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3274 * extra bytes are 0. This guarantees copy_siginfo_to_user
3275 * will return this data to userspace exactly.
3277 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3279 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3287 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3288 const siginfo_t __user *from)
3290 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3292 to->si_signo = signo;
3293 return post_copy_siginfo_from_user(to, from);
3296 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3298 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3300 return post_copy_siginfo_from_user(to, from);
3303 #ifdef CONFIG_COMPAT
3305 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3306 * @to: compat siginfo destination
3307 * @from: kernel siginfo source
3309 * Note: This function does not work properly for the SIGCHLD on x32, but
3310 * fortunately it doesn't have to. The only valid callers for this function are
3311 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3312 * The latter does not care because SIGCHLD will never cause a coredump.
3314 void copy_siginfo_to_external32(struct compat_siginfo *to,
3315 const struct kernel_siginfo *from)
3317 memset(to, 0, sizeof(*to));
3319 to->si_signo = from->si_signo;
3320 to->si_errno = from->si_errno;
3321 to->si_code = from->si_code;
3322 switch(siginfo_layout(from->si_signo, from->si_code)) {
3324 to->si_pid = from->si_pid;
3325 to->si_uid = from->si_uid;
3328 to->si_tid = from->si_tid;
3329 to->si_overrun = from->si_overrun;
3330 to->si_int = from->si_int;
3333 to->si_band = from->si_band;
3334 to->si_fd = from->si_fd;
3337 to->si_addr = ptr_to_compat(from->si_addr);
3339 case SIL_FAULT_TRAPNO:
3340 to->si_addr = ptr_to_compat(from->si_addr);
3341 to->si_trapno = from->si_trapno;
3343 case SIL_FAULT_MCEERR:
3344 to->si_addr = ptr_to_compat(from->si_addr);
3345 to->si_addr_lsb = from->si_addr_lsb;
3347 case SIL_FAULT_BNDERR:
3348 to->si_addr = ptr_to_compat(from->si_addr);
3349 to->si_lower = ptr_to_compat(from->si_lower);
3350 to->si_upper = ptr_to_compat(from->si_upper);
3352 case SIL_FAULT_PKUERR:
3353 to->si_addr = ptr_to_compat(from->si_addr);
3354 to->si_pkey = from->si_pkey;
3356 case SIL_PERF_EVENT:
3357 to->si_addr = ptr_to_compat(from->si_addr);
3358 to->si_perf_data = from->si_perf_data;
3359 to->si_perf_type = from->si_perf_type;
3362 to->si_pid = from->si_pid;
3363 to->si_uid = from->si_uid;
3364 to->si_status = from->si_status;
3365 to->si_utime = from->si_utime;
3366 to->si_stime = from->si_stime;
3369 to->si_pid = from->si_pid;
3370 to->si_uid = from->si_uid;
3371 to->si_int = from->si_int;
3374 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3375 to->si_syscall = from->si_syscall;
3376 to->si_arch = from->si_arch;
3381 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3382 const struct kernel_siginfo *from)
3384 struct compat_siginfo new;
3386 copy_siginfo_to_external32(&new, from);
3387 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3392 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3393 const struct compat_siginfo *from)
3396 to->si_signo = from->si_signo;
3397 to->si_errno = from->si_errno;
3398 to->si_code = from->si_code;
3399 switch(siginfo_layout(from->si_signo, from->si_code)) {
3401 to->si_pid = from->si_pid;
3402 to->si_uid = from->si_uid;
3405 to->si_tid = from->si_tid;
3406 to->si_overrun = from->si_overrun;
3407 to->si_int = from->si_int;
3410 to->si_band = from->si_band;
3411 to->si_fd = from->si_fd;
3414 to->si_addr = compat_ptr(from->si_addr);
3416 case SIL_FAULT_TRAPNO:
3417 to->si_addr = compat_ptr(from->si_addr);
3418 to->si_trapno = from->si_trapno;
3420 case SIL_FAULT_MCEERR:
3421 to->si_addr = compat_ptr(from->si_addr);
3422 to->si_addr_lsb = from->si_addr_lsb;
3424 case SIL_FAULT_BNDERR:
3425 to->si_addr = compat_ptr(from->si_addr);
3426 to->si_lower = compat_ptr(from->si_lower);
3427 to->si_upper = compat_ptr(from->si_upper);
3429 case SIL_FAULT_PKUERR:
3430 to->si_addr = compat_ptr(from->si_addr);
3431 to->si_pkey = from->si_pkey;
3433 case SIL_PERF_EVENT:
3434 to->si_addr = compat_ptr(from->si_addr);
3435 to->si_perf_data = from->si_perf_data;
3436 to->si_perf_type = from->si_perf_type;
3439 to->si_pid = from->si_pid;
3440 to->si_uid = from->si_uid;
3441 to->si_status = from->si_status;
3442 #ifdef CONFIG_X86_X32_ABI
3443 if (in_x32_syscall()) {
3444 to->si_utime = from->_sifields._sigchld_x32._utime;
3445 to->si_stime = from->_sifields._sigchld_x32._stime;
3449 to->si_utime = from->si_utime;
3450 to->si_stime = from->si_stime;
3454 to->si_pid = from->si_pid;
3455 to->si_uid = from->si_uid;
3456 to->si_int = from->si_int;
3459 to->si_call_addr = compat_ptr(from->si_call_addr);
3460 to->si_syscall = from->si_syscall;
3461 to->si_arch = from->si_arch;
3467 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3468 const struct compat_siginfo __user *ufrom)
3470 struct compat_siginfo from;
3472 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3475 from.si_signo = signo;
3476 return post_copy_siginfo_from_user32(to, &from);
3479 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3480 const struct compat_siginfo __user *ufrom)
3482 struct compat_siginfo from;
3484 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3487 return post_copy_siginfo_from_user32(to, &from);
3489 #endif /* CONFIG_COMPAT */
3492 * do_sigtimedwait - wait for queued signals specified in @which
3493 * @which: queued signals to wait for
3494 * @info: if non-null, the signal's siginfo is returned here
3495 * @ts: upper bound on process time suspension
3497 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3498 const struct timespec64 *ts)
3500 ktime_t *to = NULL, timeout = KTIME_MAX;
3501 struct task_struct *tsk = current;
3502 sigset_t mask = *which;
3506 if (!timespec64_valid(ts))
3508 timeout = timespec64_to_ktime(*ts);
3513 * Invert the set of allowed signals to get those we want to block.
3515 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3518 spin_lock_irq(&tsk->sighand->siglock);
3519 sig = dequeue_signal(tsk, &mask, info);
3520 if (!sig && timeout) {
3522 * None ready, temporarily unblock those we're interested
3523 * while we are sleeping in so that we'll be awakened when
3524 * they arrive. Unblocking is always fine, we can avoid
3525 * set_current_blocked().
3527 tsk->real_blocked = tsk->blocked;
3528 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3529 recalc_sigpending();
3530 spin_unlock_irq(&tsk->sighand->siglock);
3532 __set_current_state(TASK_INTERRUPTIBLE);
3533 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3535 spin_lock_irq(&tsk->sighand->siglock);
3536 __set_task_blocked(tsk, &tsk->real_blocked);
3537 sigemptyset(&tsk->real_blocked);
3538 sig = dequeue_signal(tsk, &mask, info);
3540 spin_unlock_irq(&tsk->sighand->siglock);
3544 return ret ? -EINTR : -EAGAIN;
3548 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3550 * @uthese: queued signals to wait for
3551 * @uinfo: if non-null, the signal's siginfo is returned here
3552 * @uts: upper bound on process time suspension
3553 * @sigsetsize: size of sigset_t type
3555 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3556 siginfo_t __user *, uinfo,
3557 const struct __kernel_timespec __user *, uts,
3561 struct timespec64 ts;
3562 kernel_siginfo_t info;
3565 /* XXX: Don't preclude handling different sized sigset_t's. */
3566 if (sigsetsize != sizeof(sigset_t))
3569 if (copy_from_user(&these, uthese, sizeof(these)))
3573 if (get_timespec64(&ts, uts))
3577 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3579 if (ret > 0 && uinfo) {
3580 if (copy_siginfo_to_user(uinfo, &info))
3587 #ifdef CONFIG_COMPAT_32BIT_TIME
3588 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3589 siginfo_t __user *, uinfo,
3590 const struct old_timespec32 __user *, uts,
3594 struct timespec64 ts;
3595 kernel_siginfo_t info;
3598 if (sigsetsize != sizeof(sigset_t))
3601 if (copy_from_user(&these, uthese, sizeof(these)))
3605 if (get_old_timespec32(&ts, uts))
3609 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3611 if (ret > 0 && uinfo) {
3612 if (copy_siginfo_to_user(uinfo, &info))
3620 #ifdef CONFIG_COMPAT
3621 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3622 struct compat_siginfo __user *, uinfo,
3623 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3626 struct timespec64 t;
3627 kernel_siginfo_t info;
3630 if (sigsetsize != sizeof(sigset_t))
3633 if (get_compat_sigset(&s, uthese))
3637 if (get_timespec64(&t, uts))
3641 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3643 if (ret > 0 && uinfo) {
3644 if (copy_siginfo_to_user32(uinfo, &info))
3651 #ifdef CONFIG_COMPAT_32BIT_TIME
3652 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3653 struct compat_siginfo __user *, uinfo,
3654 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3657 struct timespec64 t;
3658 kernel_siginfo_t info;
3661 if (sigsetsize != sizeof(sigset_t))
3664 if (get_compat_sigset(&s, uthese))
3668 if (get_old_timespec32(&t, uts))
3672 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3674 if (ret > 0 && uinfo) {
3675 if (copy_siginfo_to_user32(uinfo, &info))
3684 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3686 clear_siginfo(info);
3687 info->si_signo = sig;
3689 info->si_code = SI_USER;
3690 info->si_pid = task_tgid_vnr(current);
3691 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3695 * sys_kill - send a signal to a process
3696 * @pid: the PID of the process
3697 * @sig: signal to be sent
3699 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3701 struct kernel_siginfo info;
3703 prepare_kill_siginfo(sig, &info);
3705 return kill_something_info(sig, &info, pid);
3709 * Verify that the signaler and signalee either are in the same pid namespace
3710 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3713 static bool access_pidfd_pidns(struct pid *pid)
3715 struct pid_namespace *active = task_active_pid_ns(current);
3716 struct pid_namespace *p = ns_of_pid(pid);
3729 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3730 siginfo_t __user *info)
3732 #ifdef CONFIG_COMPAT
3734 * Avoid hooking up compat syscalls and instead handle necessary
3735 * conversions here. Note, this is a stop-gap measure and should not be
3736 * considered a generic solution.
3738 if (in_compat_syscall())
3739 return copy_siginfo_from_user32(
3740 kinfo, (struct compat_siginfo __user *)info);
3742 return copy_siginfo_from_user(kinfo, info);
3745 static struct pid *pidfd_to_pid(const struct file *file)
3749 pid = pidfd_pid(file);
3753 return tgid_pidfd_to_pid(file);
3757 * sys_pidfd_send_signal - Signal a process through a pidfd
3758 * @pidfd: file descriptor of the process
3759 * @sig: signal to send
3760 * @info: signal info
3761 * @flags: future flags
3763 * The syscall currently only signals via PIDTYPE_PID which covers
3764 * kill(<positive-pid>, <signal>. It does not signal threads or process
3766 * In order to extend the syscall to threads and process groups the @flags
3767 * argument should be used. In essence, the @flags argument will determine
3768 * what is signaled and not the file descriptor itself. Put in other words,
3769 * grouping is a property of the flags argument not a property of the file
3772 * Return: 0 on success, negative errno on failure
3774 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3775 siginfo_t __user *, info, unsigned int, flags)
3780 kernel_siginfo_t kinfo;
3782 /* Enforce flags be set to 0 until we add an extension. */
3790 /* Is this a pidfd? */
3791 pid = pidfd_to_pid(f.file);
3798 if (!access_pidfd_pidns(pid))
3802 ret = copy_siginfo_from_user_any(&kinfo, info);
3807 if (unlikely(sig != kinfo.si_signo))
3810 /* Only allow sending arbitrary signals to yourself. */
3812 if ((task_pid(current) != pid) &&
3813 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3816 prepare_kill_siginfo(sig, &kinfo);
3819 ret = kill_pid_info(sig, &kinfo, pid);
3827 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3829 struct task_struct *p;
3833 p = find_task_by_vpid(pid);
3834 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3835 error = check_kill_permission(sig, info, p);
3837 * The null signal is a permissions and process existence
3838 * probe. No signal is actually delivered.
3840 if (!error && sig) {
3841 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3843 * If lock_task_sighand() failed we pretend the task
3844 * dies after receiving the signal. The window is tiny,
3845 * and the signal is private anyway.
3847 if (unlikely(error == -ESRCH))
3856 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3858 struct kernel_siginfo info;
3860 clear_siginfo(&info);
3861 info.si_signo = sig;
3863 info.si_code = SI_TKILL;
3864 info.si_pid = task_tgid_vnr(current);
3865 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3867 return do_send_specific(tgid, pid, sig, &info);
3871 * sys_tgkill - send signal to one specific thread
3872 * @tgid: the thread group ID of the thread
3873 * @pid: the PID of the thread
3874 * @sig: signal to be sent
3876 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3877 * exists but it's not belonging to the target process anymore. This
3878 * method solves the problem of threads exiting and PIDs getting reused.
3880 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3882 /* This is only valid for single tasks */
3883 if (pid <= 0 || tgid <= 0)
3886 return do_tkill(tgid, pid, sig);
3890 * sys_tkill - send signal to one specific task
3891 * @pid: the PID of the task
3892 * @sig: signal to be sent
3894 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3896 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3898 /* This is only valid for single tasks */
3902 return do_tkill(0, pid, sig);
3905 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3907 /* Not even root can pretend to send signals from the kernel.
3908 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3910 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3911 (task_pid_vnr(current) != pid))
3914 /* POSIX.1b doesn't mention process groups. */
3915 return kill_proc_info(sig, info, pid);
3919 * sys_rt_sigqueueinfo - send signal information to a signal
3920 * @pid: the PID of the thread
3921 * @sig: signal to be sent
3922 * @uinfo: signal info to be sent
3924 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3925 siginfo_t __user *, uinfo)
3927 kernel_siginfo_t info;
3928 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3931 return do_rt_sigqueueinfo(pid, sig, &info);
3934 #ifdef CONFIG_COMPAT
3935 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3938 struct compat_siginfo __user *, uinfo)
3940 kernel_siginfo_t info;
3941 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3944 return do_rt_sigqueueinfo(pid, sig, &info);
3948 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3950 /* This is only valid for single tasks */
3951 if (pid <= 0 || tgid <= 0)
3954 /* Not even root can pretend to send signals from the kernel.
3955 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3957 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3958 (task_pid_vnr(current) != pid))
3961 return do_send_specific(tgid, pid, sig, info);
3964 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3965 siginfo_t __user *, uinfo)
3967 kernel_siginfo_t info;
3968 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3971 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3974 #ifdef CONFIG_COMPAT
3975 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3979 struct compat_siginfo __user *, uinfo)
3981 kernel_siginfo_t info;
3982 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3985 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3990 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3992 void kernel_sigaction(int sig, __sighandler_t action)
3994 spin_lock_irq(¤t->sighand->siglock);
3995 current->sighand->action[sig - 1].sa.sa_handler = action;
3996 if (action == SIG_IGN) {
4000 sigaddset(&mask, sig);
4002 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4003 flush_sigqueue_mask(&mask, ¤t->pending);
4004 recalc_sigpending();
4006 spin_unlock_irq(¤t->sighand->siglock);
4008 EXPORT_SYMBOL(kernel_sigaction);
4010 void __weak sigaction_compat_abi(struct k_sigaction *act,
4011 struct k_sigaction *oact)
4015 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4017 struct task_struct *p = current, *t;
4018 struct k_sigaction *k;
4021 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4024 k = &p->sighand->action[sig-1];
4026 spin_lock_irq(&p->sighand->siglock);
4031 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4032 * e.g. by having an architecture use the bit in their uapi.
4034 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4037 * Clear unknown flag bits in order to allow userspace to detect missing
4038 * support for flag bits and to allow the kernel to use non-uapi bits
4042 act->sa.sa_flags &= UAPI_SA_FLAGS;
4044 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4046 sigaction_compat_abi(act, oact);
4049 sigdelsetmask(&act->sa.sa_mask,
4050 sigmask(SIGKILL) | sigmask(SIGSTOP));
4054 * "Setting a signal action to SIG_IGN for a signal that is
4055 * pending shall cause the pending signal to be discarded,
4056 * whether or not it is blocked."
4058 * "Setting a signal action to SIG_DFL for a signal that is
4059 * pending and whose default action is to ignore the signal
4060 * (for example, SIGCHLD), shall cause the pending signal to
4061 * be discarded, whether or not it is blocked"
4063 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4065 sigaddset(&mask, sig);
4066 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4067 for_each_thread(p, t)
4068 flush_sigqueue_mask(&mask, &t->pending);
4072 spin_unlock_irq(&p->sighand->siglock);
4077 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4080 struct task_struct *t = current;
4083 memset(oss, 0, sizeof(stack_t));
4084 oss->ss_sp = (void __user *) t->sas_ss_sp;
4085 oss->ss_size = t->sas_ss_size;
4086 oss->ss_flags = sas_ss_flags(sp) |
4087 (current->sas_ss_flags & SS_FLAG_BITS);
4091 void __user *ss_sp = ss->ss_sp;
4092 size_t ss_size = ss->ss_size;
4093 unsigned ss_flags = ss->ss_flags;
4096 if (unlikely(on_sig_stack(sp)))
4099 ss_mode = ss_flags & ~SS_FLAG_BITS;
4100 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4104 if (ss_mode == SS_DISABLE) {
4108 if (unlikely(ss_size < min_ss_size))
4112 t->sas_ss_sp = (unsigned long) ss_sp;
4113 t->sas_ss_size = ss_size;
4114 t->sas_ss_flags = ss_flags;
4119 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4123 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4125 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4126 current_user_stack_pointer(),
4128 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4133 int restore_altstack(const stack_t __user *uss)
4136 if (copy_from_user(&new, uss, sizeof(stack_t)))
4138 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4140 /* squash all but EFAULT for now */
4144 int __save_altstack(stack_t __user *uss, unsigned long sp)
4146 struct task_struct *t = current;
4147 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4148 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4149 __put_user(t->sas_ss_size, &uss->ss_size);
4152 if (t->sas_ss_flags & SS_AUTODISARM)
4157 #ifdef CONFIG_COMPAT
4158 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4159 compat_stack_t __user *uoss_ptr)
4165 compat_stack_t uss32;
4166 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4168 uss.ss_sp = compat_ptr(uss32.ss_sp);
4169 uss.ss_flags = uss32.ss_flags;
4170 uss.ss_size = uss32.ss_size;
4172 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4173 compat_user_stack_pointer(),
4174 COMPAT_MINSIGSTKSZ);
4175 if (ret >= 0 && uoss_ptr) {
4177 memset(&old, 0, sizeof(old));
4178 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4179 old.ss_flags = uoss.ss_flags;
4180 old.ss_size = uoss.ss_size;
4181 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4187 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4188 const compat_stack_t __user *, uss_ptr,
4189 compat_stack_t __user *, uoss_ptr)
4191 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4194 int compat_restore_altstack(const compat_stack_t __user *uss)
4196 int err = do_compat_sigaltstack(uss, NULL);
4197 /* squash all but -EFAULT for now */
4198 return err == -EFAULT ? err : 0;
4201 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4204 struct task_struct *t = current;
4205 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4207 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4208 __put_user(t->sas_ss_size, &uss->ss_size);
4211 if (t->sas_ss_flags & SS_AUTODISARM)
4217 #ifdef __ARCH_WANT_SYS_SIGPENDING
4220 * sys_sigpending - examine pending signals
4221 * @uset: where mask of pending signal is returned
4223 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4227 if (sizeof(old_sigset_t) > sizeof(*uset))
4230 do_sigpending(&set);
4232 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4238 #ifdef CONFIG_COMPAT
4239 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4243 do_sigpending(&set);
4245 return put_user(set.sig[0], set32);
4251 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4253 * sys_sigprocmask - examine and change blocked signals
4254 * @how: whether to add, remove, or set signals
4255 * @nset: signals to add or remove (if non-null)
4256 * @oset: previous value of signal mask if non-null
4258 * Some platforms have their own version with special arguments;
4259 * others support only sys_rt_sigprocmask.
4262 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4263 old_sigset_t __user *, oset)
4265 old_sigset_t old_set, new_set;
4266 sigset_t new_blocked;
4268 old_set = current->blocked.sig[0];
4271 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4274 new_blocked = current->blocked;
4278 sigaddsetmask(&new_blocked, new_set);
4281 sigdelsetmask(&new_blocked, new_set);
4284 new_blocked.sig[0] = new_set;
4290 set_current_blocked(&new_blocked);
4294 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4300 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4302 #ifndef CONFIG_ODD_RT_SIGACTION
4304 * sys_rt_sigaction - alter an action taken by a process
4305 * @sig: signal to be sent
4306 * @act: new sigaction
4307 * @oact: used to save the previous sigaction
4308 * @sigsetsize: size of sigset_t type
4310 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4311 const struct sigaction __user *, act,
4312 struct sigaction __user *, oact,
4315 struct k_sigaction new_sa, old_sa;
4318 /* XXX: Don't preclude handling different sized sigset_t's. */
4319 if (sigsetsize != sizeof(sigset_t))
4322 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4325 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4329 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4334 #ifdef CONFIG_COMPAT
4335 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4336 const struct compat_sigaction __user *, act,
4337 struct compat_sigaction __user *, oact,
4338 compat_size_t, sigsetsize)
4340 struct k_sigaction new_ka, old_ka;
4341 #ifdef __ARCH_HAS_SA_RESTORER
4342 compat_uptr_t restorer;
4346 /* XXX: Don't preclude handling different sized sigset_t's. */
4347 if (sigsetsize != sizeof(compat_sigset_t))
4351 compat_uptr_t handler;
4352 ret = get_user(handler, &act->sa_handler);
4353 new_ka.sa.sa_handler = compat_ptr(handler);
4354 #ifdef __ARCH_HAS_SA_RESTORER
4355 ret |= get_user(restorer, &act->sa_restorer);
4356 new_ka.sa.sa_restorer = compat_ptr(restorer);
4358 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4359 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4364 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4366 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4368 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4369 sizeof(oact->sa_mask));
4370 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4371 #ifdef __ARCH_HAS_SA_RESTORER
4372 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4373 &oact->sa_restorer);
4379 #endif /* !CONFIG_ODD_RT_SIGACTION */
4381 #ifdef CONFIG_OLD_SIGACTION
4382 SYSCALL_DEFINE3(sigaction, int, sig,
4383 const struct old_sigaction __user *, act,
4384 struct old_sigaction __user *, oact)
4386 struct k_sigaction new_ka, old_ka;
4391 if (!access_ok(act, sizeof(*act)) ||
4392 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4393 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4394 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4395 __get_user(mask, &act->sa_mask))
4397 #ifdef __ARCH_HAS_KA_RESTORER
4398 new_ka.ka_restorer = NULL;
4400 siginitset(&new_ka.sa.sa_mask, mask);
4403 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4406 if (!access_ok(oact, sizeof(*oact)) ||
4407 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4408 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4409 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4410 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4417 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4418 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4419 const struct compat_old_sigaction __user *, act,
4420 struct compat_old_sigaction __user *, oact)
4422 struct k_sigaction new_ka, old_ka;
4424 compat_old_sigset_t mask;
4425 compat_uptr_t handler, restorer;
4428 if (!access_ok(act, sizeof(*act)) ||
4429 __get_user(handler, &act->sa_handler) ||
4430 __get_user(restorer, &act->sa_restorer) ||
4431 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4432 __get_user(mask, &act->sa_mask))
4435 #ifdef __ARCH_HAS_KA_RESTORER
4436 new_ka.ka_restorer = NULL;
4438 new_ka.sa.sa_handler = compat_ptr(handler);
4439 new_ka.sa.sa_restorer = compat_ptr(restorer);
4440 siginitset(&new_ka.sa.sa_mask, mask);
4443 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4446 if (!access_ok(oact, sizeof(*oact)) ||
4447 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4448 &oact->sa_handler) ||
4449 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4450 &oact->sa_restorer) ||
4451 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4452 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4459 #ifdef CONFIG_SGETMASK_SYSCALL
4462 * For backwards compatibility. Functionality superseded by sigprocmask.
4464 SYSCALL_DEFINE0(sgetmask)
4467 return current->blocked.sig[0];
4470 SYSCALL_DEFINE1(ssetmask, int, newmask)
4472 int old = current->blocked.sig[0];
4475 siginitset(&newset, newmask);
4476 set_current_blocked(&newset);
4480 #endif /* CONFIG_SGETMASK_SYSCALL */
4482 #ifdef __ARCH_WANT_SYS_SIGNAL
4484 * For backwards compatibility. Functionality superseded by sigaction.
4486 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4488 struct k_sigaction new_sa, old_sa;
4491 new_sa.sa.sa_handler = handler;
4492 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4493 sigemptyset(&new_sa.sa.sa_mask);
4495 ret = do_sigaction(sig, &new_sa, &old_sa);
4497 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4499 #endif /* __ARCH_WANT_SYS_SIGNAL */
4501 #ifdef __ARCH_WANT_SYS_PAUSE
4503 SYSCALL_DEFINE0(pause)
4505 while (!signal_pending(current)) {
4506 __set_current_state(TASK_INTERRUPTIBLE);
4509 return -ERESTARTNOHAND;
4514 static int sigsuspend(sigset_t *set)
4516 current->saved_sigmask = current->blocked;
4517 set_current_blocked(set);
4519 while (!signal_pending(current)) {
4520 __set_current_state(TASK_INTERRUPTIBLE);
4523 set_restore_sigmask();
4524 return -ERESTARTNOHAND;
4528 * sys_rt_sigsuspend - replace the signal mask for a value with the
4529 * @unewset value until a signal is received
4530 * @unewset: new signal mask value
4531 * @sigsetsize: size of sigset_t type
4533 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4537 /* XXX: Don't preclude handling different sized sigset_t's. */
4538 if (sigsetsize != sizeof(sigset_t))
4541 if (copy_from_user(&newset, unewset, sizeof(newset)))
4543 return sigsuspend(&newset);
4546 #ifdef CONFIG_COMPAT
4547 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4551 /* XXX: Don't preclude handling different sized sigset_t's. */
4552 if (sigsetsize != sizeof(sigset_t))
4555 if (get_compat_sigset(&newset, unewset))
4557 return sigsuspend(&newset);
4561 #ifdef CONFIG_OLD_SIGSUSPEND
4562 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4565 siginitset(&blocked, mask);
4566 return sigsuspend(&blocked);
4569 #ifdef CONFIG_OLD_SIGSUSPEND3
4570 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4573 siginitset(&blocked, mask);
4574 return sigsuspend(&blocked);
4578 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4583 static inline void siginfo_buildtime_checks(void)
4585 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4587 /* Verify the offsets in the two siginfos match */
4588 #define CHECK_OFFSET(field) \
4589 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4592 CHECK_OFFSET(si_pid);
4593 CHECK_OFFSET(si_uid);
4596 CHECK_OFFSET(si_tid);
4597 CHECK_OFFSET(si_overrun);
4598 CHECK_OFFSET(si_value);
4601 CHECK_OFFSET(si_pid);
4602 CHECK_OFFSET(si_uid);
4603 CHECK_OFFSET(si_value);
4606 CHECK_OFFSET(si_pid);
4607 CHECK_OFFSET(si_uid);
4608 CHECK_OFFSET(si_status);
4609 CHECK_OFFSET(si_utime);
4610 CHECK_OFFSET(si_stime);
4613 CHECK_OFFSET(si_addr);
4614 CHECK_OFFSET(si_trapno);
4615 CHECK_OFFSET(si_addr_lsb);
4616 CHECK_OFFSET(si_lower);
4617 CHECK_OFFSET(si_upper);
4618 CHECK_OFFSET(si_pkey);
4619 CHECK_OFFSET(si_perf_data);
4620 CHECK_OFFSET(si_perf_type);
4623 CHECK_OFFSET(si_band);
4624 CHECK_OFFSET(si_fd);
4627 CHECK_OFFSET(si_call_addr);
4628 CHECK_OFFSET(si_syscall);
4629 CHECK_OFFSET(si_arch);
4633 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4634 offsetof(struct siginfo, si_addr));
4635 if (sizeof(int) == sizeof(void __user *)) {
4636 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4637 sizeof(void __user *));
4639 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4640 sizeof_field(struct siginfo, si_uid)) !=
4641 sizeof(void __user *));
4642 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4643 offsetof(struct siginfo, si_uid));
4645 #ifdef CONFIG_COMPAT
4646 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4647 offsetof(struct compat_siginfo, si_addr));
4648 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4649 sizeof(compat_uptr_t));
4650 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4651 sizeof_field(struct siginfo, si_pid));
4655 void __init signals_init(void)
4657 siginfo_buildtime_checks();
4659 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4662 #ifdef CONFIG_KGDB_KDB
4663 #include <linux/kdb.h>
4665 * kdb_send_sig - Allows kdb to send signals without exposing
4666 * signal internals. This function checks if the required locks are
4667 * available before calling the main signal code, to avoid kdb
4670 void kdb_send_sig(struct task_struct *t, int sig)
4672 static struct task_struct *kdb_prev_t;
4674 if (!spin_trylock(&t->sighand->siglock)) {
4675 kdb_printf("Can't do kill command now.\n"
4676 "The sigmask lock is held somewhere else in "
4677 "kernel, try again later\n");
4680 new_t = kdb_prev_t != t;
4682 if (!task_is_running(t) && new_t) {
4683 spin_unlock(&t->sighand->siglock);
4684 kdb_printf("Process is not RUNNING, sending a signal from "
4685 "kdb risks deadlock\n"
4686 "on the run queue locks. "
4687 "The signal has _not_ been sent.\n"
4688 "Reissue the kill command if you want to risk "
4692 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4693 spin_unlock(&t->sighand->siglock);
4695 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4698 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4700 #endif /* CONFIG_KGDB_KDB */