1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
413 struct sigqueue *q = NULL;
414 struct user_struct *user;
418 * Protect access to @t credentials. This can go away when all
419 * callers hold rcu read lock.
421 * NOTE! A pending signal will hold on to the user refcount,
422 * and we get/put the refcount only when the sigpending count
423 * changes from/to zero.
426 user = __task_cred(t)->user;
427 sigpending = atomic_inc_return(&user->sigpending);
432 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
433 q = kmem_cache_alloc(sigqueue_cachep, flags);
435 print_dropped_signal(sig);
438 if (unlikely(q == NULL)) {
439 if (atomic_dec_and_test(&user->sigpending))
442 INIT_LIST_HEAD(&q->list);
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
454 if (atomic_dec_and_test(&q->user->sigpending))
456 kmem_cache_free(sigqueue_cachep, q);
459 void flush_sigqueue(struct sigpending *queue)
463 sigemptyset(&queue->signal);
464 while (!list_empty(&queue->list)) {
465 q = list_entry(queue->list.next, struct sigqueue , list);
466 list_del_init(&q->list);
472 * Flush all pending signals for this kthread.
474 void flush_signals(struct task_struct *t)
478 spin_lock_irqsave(&t->sighand->siglock, flags);
479 clear_tsk_thread_flag(t, TIF_SIGPENDING);
480 flush_sigqueue(&t->pending);
481 flush_sigqueue(&t->signal->shared_pending);
482 spin_unlock_irqrestore(&t->sighand->siglock, flags);
484 EXPORT_SYMBOL(flush_signals);
486 #ifdef CONFIG_POSIX_TIMERS
487 static void __flush_itimer_signals(struct sigpending *pending)
489 sigset_t signal, retain;
490 struct sigqueue *q, *n;
492 signal = pending->signal;
493 sigemptyset(&retain);
495 list_for_each_entry_safe(q, n, &pending->list, list) {
496 int sig = q->info.si_signo;
498 if (likely(q->info.si_code != SI_TIMER)) {
499 sigaddset(&retain, sig);
501 sigdelset(&signal, sig);
502 list_del_init(&q->list);
507 sigorsets(&pending->signal, &signal, &retain);
510 void flush_itimer_signals(void)
512 struct task_struct *tsk = current;
515 spin_lock_irqsave(&tsk->sighand->siglock, flags);
516 __flush_itimer_signals(&tsk->pending);
517 __flush_itimer_signals(&tsk->signal->shared_pending);
518 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
522 void ignore_signals(struct task_struct *t)
526 for (i = 0; i < _NSIG; ++i)
527 t->sighand->action[i].sa.sa_handler = SIG_IGN;
533 * Flush all handlers for a task.
537 flush_signal_handlers(struct task_struct *t, int force_default)
540 struct k_sigaction *ka = &t->sighand->action[0];
541 for (i = _NSIG ; i != 0 ; i--) {
542 if (force_default || ka->sa.sa_handler != SIG_IGN)
543 ka->sa.sa_handler = SIG_DFL;
545 #ifdef __ARCH_HAS_SA_RESTORER
546 ka->sa.sa_restorer = NULL;
548 sigemptyset(&ka->sa.sa_mask);
553 bool unhandled_signal(struct task_struct *tsk, int sig)
555 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
556 if (is_global_init(tsk))
559 if (handler != SIG_IGN && handler != SIG_DFL)
562 /* if ptraced, let the tracer determine */
566 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 struct sigqueue *q, *first = NULL;
572 * Collect the siginfo appropriate to this signal. Check if
573 * there is another siginfo for the same signal.
575 list_for_each_entry(q, &list->list, list) {
576 if (q->info.si_signo == sig) {
583 sigdelset(&list->signal, sig);
587 list_del_init(&first->list);
588 copy_siginfo(info, &first->info);
591 (first->flags & SIGQUEUE_PREALLOC) &&
592 (info->si_code == SI_TIMER) &&
593 (info->si_sys_private);
595 __sigqueue_free(first);
598 * Ok, it wasn't in the queue. This must be
599 * a fast-pathed signal or we must have been
600 * out of queue space. So zero out the info.
603 info->si_signo = sig;
605 info->si_code = SI_USER;
611 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
612 kernel_siginfo_t *info, bool *resched_timer)
614 int sig = next_signal(pending, mask);
617 collect_signal(sig, pending, info, resched_timer);
622 * Dequeue a signal and return the element to the caller, which is
623 * expected to free it.
625 * All callers have to hold the siglock.
627 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
629 bool resched_timer = false;
632 /* We only dequeue private signals from ourselves, we don't let
633 * signalfd steal them
635 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 signr = __dequeue_signal(&tsk->signal->shared_pending,
638 mask, info, &resched_timer);
639 #ifdef CONFIG_POSIX_TIMERS
643 * itimers are process shared and we restart periodic
644 * itimers in the signal delivery path to prevent DoS
645 * attacks in the high resolution timer case. This is
646 * compliant with the old way of self-restarting
647 * itimers, as the SIGALRM is a legacy signal and only
648 * queued once. Changing the restart behaviour to
649 * restart the timer in the signal dequeue path is
650 * reducing the timer noise on heavy loaded !highres
653 if (unlikely(signr == SIGALRM)) {
654 struct hrtimer *tmr = &tsk->signal->real_timer;
656 if (!hrtimer_is_queued(tmr) &&
657 tsk->signal->it_real_incr != 0) {
658 hrtimer_forward(tmr, tmr->base->get_time(),
659 tsk->signal->it_real_incr);
660 hrtimer_restart(tmr);
670 if (unlikely(sig_kernel_stop(signr))) {
672 * Set a marker that we have dequeued a stop signal. Our
673 * caller might release the siglock and then the pending
674 * stop signal it is about to process is no longer in the
675 * pending bitmasks, but must still be cleared by a SIGCONT
676 * (and overruled by a SIGKILL). So those cases clear this
677 * shared flag after we've set it. Note that this flag may
678 * remain set after the signal we return is ignored or
679 * handled. That doesn't matter because its only purpose
680 * is to alert stop-signal processing code when another
681 * processor has come along and cleared the flag.
683 current->jobctl |= JOBCTL_STOP_DEQUEUED;
685 #ifdef CONFIG_POSIX_TIMERS
688 * Release the siglock to ensure proper locking order
689 * of timer locks outside of siglocks. Note, we leave
690 * irqs disabled here, since the posix-timers code is
691 * about to disable them again anyway.
693 spin_unlock(&tsk->sighand->siglock);
694 posixtimer_rearm(info);
695 spin_lock(&tsk->sighand->siglock);
697 /* Don't expose the si_sys_private value to userspace */
698 info->si_sys_private = 0;
703 EXPORT_SYMBOL_GPL(dequeue_signal);
705 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707 struct task_struct *tsk = current;
708 struct sigpending *pending = &tsk->pending;
709 struct sigqueue *q, *sync = NULL;
712 * Might a synchronous signal be in the queue?
714 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
718 * Return the first synchronous signal in the queue.
720 list_for_each_entry(q, &pending->list, list) {
721 /* Synchronous signals have a positive si_code */
722 if ((q->info.si_code > SI_USER) &&
723 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
731 * Check if there is another siginfo for the same signal.
733 list_for_each_entry_continue(q, &pending->list, list) {
734 if (q->info.si_signo == sync->info.si_signo)
738 sigdelset(&pending->signal, sync->info.si_signo);
741 list_del_init(&sync->list);
742 copy_siginfo(info, &sync->info);
743 __sigqueue_free(sync);
744 return info->si_signo;
748 * Tell a process that it has a new active signal..
750 * NOTE! we rely on the previous spin_lock to
751 * lock interrupts for us! We can only be called with
752 * "siglock" held, and the local interrupt must
753 * have been disabled when that got acquired!
755 * No need to set need_resched since signal event passing
756 * goes through ->blocked
758 void signal_wake_up_state(struct task_struct *t, unsigned int state)
760 set_tsk_thread_flag(t, TIF_SIGPENDING);
762 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
763 * case. We don't check t->state here because there is a race with it
764 * executing another processor and just now entering stopped state.
765 * By using wake_up_state, we ensure the process will wake up and
766 * handle its death signal.
768 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
773 * Remove signals in mask from the pending set and queue.
774 * Returns 1 if any signals were found.
776 * All callers must be holding the siglock.
778 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
780 struct sigqueue *q, *n;
783 sigandsets(&m, mask, &s->signal);
784 if (sigisemptyset(&m))
787 sigandnsets(&s->signal, &s->signal, mask);
788 list_for_each_entry_safe(q, n, &s->list, list) {
789 if (sigismember(mask, q->info.si_signo)) {
790 list_del_init(&q->list);
796 static inline int is_si_special(const struct kernel_siginfo *info)
798 return info <= SEND_SIG_PRIV;
801 static inline bool si_fromuser(const struct kernel_siginfo *info)
803 return info == SEND_SIG_NOINFO ||
804 (!is_si_special(info) && SI_FROMUSER(info));
808 * called with RCU read lock from check_kill_permission()
810 static bool kill_ok_by_cred(struct task_struct *t)
812 const struct cred *cred = current_cred();
813 const struct cred *tcred = __task_cred(t);
815 return uid_eq(cred->euid, tcred->suid) ||
816 uid_eq(cred->euid, tcred->uid) ||
817 uid_eq(cred->uid, tcred->suid) ||
818 uid_eq(cred->uid, tcred->uid) ||
819 ns_capable(tcred->user_ns, CAP_KILL);
823 * Bad permissions for sending the signal
824 * - the caller must hold the RCU read lock
826 static int check_kill_permission(int sig, struct kernel_siginfo *info,
827 struct task_struct *t)
832 if (!valid_signal(sig))
835 if (!si_fromuser(info))
838 error = audit_signal_info(sig, t); /* Let audit system see the signal */
842 if (!same_thread_group(current, t) &&
843 !kill_ok_by_cred(t)) {
846 sid = task_session(t);
848 * We don't return the error if sid == NULL. The
849 * task was unhashed, the caller must notice this.
851 if (!sid || sid == task_session(current))
859 return security_task_kill(t, info, sig, NULL);
863 * ptrace_trap_notify - schedule trap to notify ptracer
864 * @t: tracee wanting to notify tracer
866 * This function schedules sticky ptrace trap which is cleared on the next
867 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
870 * If @t is running, STOP trap will be taken. If trapped for STOP and
871 * ptracer is listening for events, tracee is woken up so that it can
872 * re-trap for the new event. If trapped otherwise, STOP trap will be
873 * eventually taken without returning to userland after the existing traps
874 * are finished by PTRACE_CONT.
877 * Must be called with @task->sighand->siglock held.
879 static void ptrace_trap_notify(struct task_struct *t)
881 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
882 assert_spin_locked(&t->sighand->siglock);
884 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
885 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
889 * Handle magic process-wide effects of stop/continue signals. Unlike
890 * the signal actions, these happen immediately at signal-generation
891 * time regardless of blocking, ignoring, or handling. This does the
892 * actual continuing for SIGCONT, but not the actual stopping for stop
893 * signals. The process stop is done as a signal action for SIG_DFL.
895 * Returns true if the signal should be actually delivered, otherwise
896 * it should be dropped.
898 static bool prepare_signal(int sig, struct task_struct *p, bool force)
900 struct signal_struct *signal = p->signal;
901 struct task_struct *t;
904 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
905 if (!(signal->flags & SIGNAL_GROUP_EXIT))
906 return sig == SIGKILL;
908 * The process is in the middle of dying, nothing to do.
910 } else if (sig_kernel_stop(sig)) {
912 * This is a stop signal. Remove SIGCONT from all queues.
914 siginitset(&flush, sigmask(SIGCONT));
915 flush_sigqueue_mask(&flush, &signal->shared_pending);
916 for_each_thread(p, t)
917 flush_sigqueue_mask(&flush, &t->pending);
918 } else if (sig == SIGCONT) {
921 * Remove all stop signals from all queues, wake all threads.
923 siginitset(&flush, SIG_KERNEL_STOP_MASK);
924 flush_sigqueue_mask(&flush, &signal->shared_pending);
925 for_each_thread(p, t) {
926 flush_sigqueue_mask(&flush, &t->pending);
927 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
928 if (likely(!(t->ptrace & PT_SEIZED)))
929 wake_up_state(t, __TASK_STOPPED);
931 ptrace_trap_notify(t);
935 * Notify the parent with CLD_CONTINUED if we were stopped.
937 * If we were in the middle of a group stop, we pretend it
938 * was already finished, and then continued. Since SIGCHLD
939 * doesn't queue we report only CLD_STOPPED, as if the next
940 * CLD_CONTINUED was dropped.
943 if (signal->flags & SIGNAL_STOP_STOPPED)
944 why |= SIGNAL_CLD_CONTINUED;
945 else if (signal->group_stop_count)
946 why |= SIGNAL_CLD_STOPPED;
950 * The first thread which returns from do_signal_stop()
951 * will take ->siglock, notice SIGNAL_CLD_MASK, and
952 * notify its parent. See get_signal().
954 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
955 signal->group_stop_count = 0;
956 signal->group_exit_code = 0;
960 return !sig_ignored(p, sig, force);
964 * Test if P wants to take SIG. After we've checked all threads with this,
965 * it's equivalent to finding no threads not blocking SIG. Any threads not
966 * blocking SIG were ruled out because they are not running and already
967 * have pending signals. Such threads will dequeue from the shared queue
968 * as soon as they're available, so putting the signal on the shared queue
969 * will be equivalent to sending it to one such thread.
971 static inline bool wants_signal(int sig, struct task_struct *p)
973 if (sigismember(&p->blocked, sig))
976 if (p->flags & PF_EXITING)
982 if (task_is_stopped_or_traced(p))
985 return task_curr(p) || !task_sigpending(p);
988 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
990 struct signal_struct *signal = p->signal;
991 struct task_struct *t;
994 * Now find a thread we can wake up to take the signal off the queue.
996 * If the main thread wants the signal, it gets first crack.
997 * Probably the least surprising to the average bear.
999 if (wants_signal(sig, p))
1001 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1003 * There is just one thread and it does not need to be woken.
1004 * It will dequeue unblocked signals before it runs again.
1009 * Otherwise try to find a suitable thread.
1011 t = signal->curr_target;
1012 while (!wants_signal(sig, t)) {
1014 if (t == signal->curr_target)
1016 * No thread needs to be woken.
1017 * Any eligible threads will see
1018 * the signal in the queue soon.
1022 signal->curr_target = t;
1026 * Found a killable thread. If the signal will be fatal,
1027 * then start taking the whole group down immediately.
1029 if (sig_fatal(p, sig) &&
1030 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1031 !sigismember(&t->real_blocked, sig) &&
1032 (sig == SIGKILL || !p->ptrace)) {
1034 * This signal will be fatal to the whole group.
1036 if (!sig_kernel_coredump(sig)) {
1038 * Start a group exit and wake everybody up.
1039 * This way we don't have other threads
1040 * running and doing things after a slower
1041 * thread has the fatal signal pending.
1043 signal->flags = SIGNAL_GROUP_EXIT;
1044 signal->group_exit_code = sig;
1045 signal->group_stop_count = 0;
1048 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1049 sigaddset(&t->pending.signal, SIGKILL);
1050 signal_wake_up(t, 1);
1051 } while_each_thread(p, t);
1057 * The signal is already in the shared-pending queue.
1058 * Tell the chosen thread to wake up and dequeue it.
1060 signal_wake_up(t, sig == SIGKILL);
1064 static inline bool legacy_queue(struct sigpending *signals, int sig)
1066 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1070 enum pid_type type, bool force)
1072 struct sigpending *pending;
1074 int override_rlimit;
1075 int ret = 0, result;
1077 assert_spin_locked(&t->sighand->siglock);
1079 result = TRACE_SIGNAL_IGNORED;
1080 if (!prepare_signal(sig, t, force))
1083 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085 * Short-circuit ignored signals and support queuing
1086 * exactly one non-rt signal, so that we can get more
1087 * detailed information about the cause of the signal.
1089 result = TRACE_SIGNAL_ALREADY_PENDING;
1090 if (legacy_queue(pending, sig))
1093 result = TRACE_SIGNAL_DELIVERED;
1095 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1097 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1101 * Real-time signals must be queued if sent by sigqueue, or
1102 * some other real-time mechanism. It is implementation
1103 * defined whether kill() does so. We attempt to do so, on
1104 * the principle of least surprise, but since kill is not
1105 * allowed to fail with EAGAIN when low on memory we just
1106 * make sure at least one signal gets delivered and don't
1107 * pass on the info struct.
1110 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1112 override_rlimit = 0;
1114 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1116 list_add_tail(&q->list, &pending->list);
1117 switch ((unsigned long) info) {
1118 case (unsigned long) SEND_SIG_NOINFO:
1119 clear_siginfo(&q->info);
1120 q->info.si_signo = sig;
1121 q->info.si_errno = 0;
1122 q->info.si_code = SI_USER;
1123 q->info.si_pid = task_tgid_nr_ns(current,
1124 task_active_pid_ns(t));
1127 from_kuid_munged(task_cred_xxx(t, user_ns),
1131 case (unsigned long) SEND_SIG_PRIV:
1132 clear_siginfo(&q->info);
1133 q->info.si_signo = sig;
1134 q->info.si_errno = 0;
1135 q->info.si_code = SI_KERNEL;
1140 copy_siginfo(&q->info, info);
1143 } else if (!is_si_special(info) &&
1144 sig >= SIGRTMIN && info->si_code != SI_USER) {
1146 * Queue overflow, abort. We may abort if the
1147 * signal was rt and sent by user using something
1148 * other than kill().
1150 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1155 * This is a silent loss of information. We still
1156 * send the signal, but the *info bits are lost.
1158 result = TRACE_SIGNAL_LOSE_INFO;
1162 signalfd_notify(t, sig);
1163 sigaddset(&pending->signal, sig);
1165 /* Let multiprocess signals appear after on-going forks */
1166 if (type > PIDTYPE_TGID) {
1167 struct multiprocess_signals *delayed;
1168 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1169 sigset_t *signal = &delayed->signal;
1170 /* Can't queue both a stop and a continue signal */
1172 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1173 else if (sig_kernel_stop(sig))
1174 sigdelset(signal, SIGCONT);
1175 sigaddset(signal, sig);
1179 complete_signal(sig, t, type);
1181 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1188 switch (siginfo_layout(info->si_signo, info->si_code)) {
1197 case SIL_FAULT_TRAPNO:
1198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
1201 case SIL_PERF_EVENT:
1209 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1212 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1215 if (info == SEND_SIG_NOINFO) {
1216 /* Force if sent from an ancestor pid namespace */
1217 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 } else if (info == SEND_SIG_PRIV) {
1219 /* Don't ignore kernel generated signals */
1221 } else if (has_si_pid_and_uid(info)) {
1222 /* SIGKILL and SIGSTOP is special or has ids */
1223 struct user_namespace *t_user_ns;
1226 t_user_ns = task_cred_xxx(t, user_ns);
1227 if (current_user_ns() != t_user_ns) {
1228 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 info->si_uid = from_kuid_munged(t_user_ns, uid);
1233 /* A kernel generated signal? */
1234 force = (info->si_code == SI_KERNEL);
1236 /* From an ancestor pid namespace? */
1237 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1242 return __send_signal(sig, info, t, type, force);
1245 static void print_fatal_signal(int signr)
1247 struct pt_regs *regs = signal_pt_regs();
1248 pr_info("potentially unexpected fatal signal %d.\n", signr);
1250 #if defined(__i386__) && !defined(__arch_um__)
1251 pr_info("code at %08lx: ", regs->ip);
1254 for (i = 0; i < 16; i++) {
1257 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1259 pr_cont("%02x ", insn);
1269 static int __init setup_print_fatal_signals(char *str)
1271 get_option (&str, &print_fatal_signals);
1276 __setup("print-fatal-signals=", setup_print_fatal_signals);
1279 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1281 return send_signal(sig, info, p, PIDTYPE_TGID);
1284 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1287 unsigned long flags;
1290 if (lock_task_sighand(p, &flags)) {
1291 ret = send_signal(sig, info, p, type);
1292 unlock_task_sighand(p, &flags);
1299 * Force a signal that the process can't ignore: if necessary
1300 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1302 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303 * since we do not want to have a signal handler that was blocked
1304 * be invoked when user space had explicitly blocked it.
1306 * We don't want to have recursive SIGSEGV's etc, for example,
1307 * that is why we also clear SIGNAL_UNKILLABLE.
1310 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1312 unsigned long int flags;
1313 int ret, blocked, ignored;
1314 struct k_sigaction *action;
1315 int sig = info->si_signo;
1317 spin_lock_irqsave(&t->sighand->siglock, flags);
1318 action = &t->sighand->action[sig-1];
1319 ignored = action->sa.sa_handler == SIG_IGN;
1320 blocked = sigismember(&t->blocked, sig);
1321 if (blocked || ignored) {
1322 action->sa.sa_handler = SIG_DFL;
1324 sigdelset(&t->blocked, sig);
1325 recalc_sigpending_and_wake(t);
1329 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 * debugging to leave init killable.
1332 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1333 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1334 ret = send_signal(sig, info, t, PIDTYPE_PID);
1335 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1340 int force_sig_info(struct kernel_siginfo *info)
1342 return force_sig_info_to_task(info, current);
1346 * Nuke all other threads in the group.
1348 int zap_other_threads(struct task_struct *p)
1350 struct task_struct *t = p;
1353 p->signal->group_stop_count = 0;
1355 while_each_thread(p, t) {
1356 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1359 /* Don't bother with already dead threads */
1362 sigaddset(&t->pending.signal, SIGKILL);
1363 signal_wake_up(t, 1);
1369 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 unsigned long *flags)
1372 struct sighand_struct *sighand;
1376 sighand = rcu_dereference(tsk->sighand);
1377 if (unlikely(sighand == NULL))
1381 * This sighand can be already freed and even reused, but
1382 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1383 * initializes ->siglock: this slab can't go away, it has
1384 * the same object type, ->siglock can't be reinitialized.
1386 * We need to ensure that tsk->sighand is still the same
1387 * after we take the lock, we can race with de_thread() or
1388 * __exit_signal(). In the latter case the next iteration
1389 * must see ->sighand == NULL.
1391 spin_lock_irqsave(&sighand->siglock, *flags);
1392 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1394 spin_unlock_irqrestore(&sighand->siglock, *flags);
1402 * send signal info to all the members of a group
1404 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 struct task_struct *p, enum pid_type type)
1410 ret = check_kill_permission(sig, info, p);
1414 ret = do_send_sig_info(sig, info, p, type);
1420 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1421 * control characters do (^C, ^Z etc)
1422 * - the caller must hold at least a readlock on tasklist_lock
1424 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1426 struct task_struct *p = NULL;
1427 int retval, success;
1431 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1432 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1436 return success ? 0 : retval;
1439 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1442 struct task_struct *p;
1446 p = pid_task(pid, PIDTYPE_PID);
1448 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1450 if (likely(!p || error != -ESRCH))
1454 * The task was unhashed in between, try again. If it
1455 * is dead, pid_task() will return NULL, if we race with
1456 * de_thread() it will find the new leader.
1461 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1465 error = kill_pid_info(sig, info, find_vpid(pid));
1470 static inline bool kill_as_cred_perm(const struct cred *cred,
1471 struct task_struct *target)
1473 const struct cred *pcred = __task_cred(target);
1475 return uid_eq(cred->euid, pcred->suid) ||
1476 uid_eq(cred->euid, pcred->uid) ||
1477 uid_eq(cred->uid, pcred->suid) ||
1478 uid_eq(cred->uid, pcred->uid);
1482 * The usb asyncio usage of siginfo is wrong. The glibc support
1483 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484 * AKA after the generic fields:
1485 * kernel_pid_t si_pid;
1486 * kernel_uid32_t si_uid;
1487 * sigval_t si_value;
1489 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490 * after the generic fields is:
1491 * void __user *si_addr;
1493 * This is a practical problem when there is a 64bit big endian kernel
1494 * and a 32bit userspace. As the 32bit address will encoded in the low
1495 * 32bits of the pointer. Those low 32bits will be stored at higher
1496 * address than appear in a 32 bit pointer. So userspace will not
1497 * see the address it was expecting for it's completions.
1499 * There is nothing in the encoding that can allow
1500 * copy_siginfo_to_user32 to detect this confusion of formats, so
1501 * handle this by requiring the caller of kill_pid_usb_asyncio to
1502 * notice when this situration takes place and to store the 32bit
1503 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1506 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 struct pid *pid, const struct cred *cred)
1509 struct kernel_siginfo info;
1510 struct task_struct *p;
1511 unsigned long flags;
1514 if (!valid_signal(sig))
1517 clear_siginfo(&info);
1518 info.si_signo = sig;
1519 info.si_errno = errno;
1520 info.si_code = SI_ASYNCIO;
1521 *((sigval_t *)&info.si_pid) = addr;
1524 p = pid_task(pid, PIDTYPE_PID);
1529 if (!kill_as_cred_perm(cred, p)) {
1533 ret = security_task_kill(p, &info, sig, cred);
1538 if (lock_task_sighand(p, &flags)) {
1539 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1540 unlock_task_sighand(p, &flags);
1548 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1551 * kill_something_info() interprets pid in interesting ways just like kill(2).
1553 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554 * is probably wrong. Should make it like BSD or SYSV.
1557 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1562 return kill_proc_info(sig, info, pid);
1564 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1568 read_lock(&tasklist_lock);
1570 ret = __kill_pgrp_info(sig, info,
1571 pid ? find_vpid(-pid) : task_pgrp(current));
1573 int retval = 0, count = 0;
1574 struct task_struct * p;
1576 for_each_process(p) {
1577 if (task_pid_vnr(p) > 1 &&
1578 !same_thread_group(p, current)) {
1579 int err = group_send_sig_info(sig, info, p,
1586 ret = count ? retval : -ESRCH;
1588 read_unlock(&tasklist_lock);
1594 * These are for backward compatibility with the rest of the kernel source.
1597 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1600 * Make sure legacy kernel users don't send in bad values
1601 * (normal paths check this in check_kill_permission).
1603 if (!valid_signal(sig))
1606 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1608 EXPORT_SYMBOL(send_sig_info);
1610 #define __si_special(priv) \
1611 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1614 send_sig(int sig, struct task_struct *p, int priv)
1616 return send_sig_info(sig, __si_special(priv), p);
1618 EXPORT_SYMBOL(send_sig);
1620 void force_sig(int sig)
1622 struct kernel_siginfo info;
1624 clear_siginfo(&info);
1625 info.si_signo = sig;
1627 info.si_code = SI_KERNEL;
1630 force_sig_info(&info);
1632 EXPORT_SYMBOL(force_sig);
1635 * When things go south during signal handling, we
1636 * will force a SIGSEGV. And if the signal that caused
1637 * the problem was already a SIGSEGV, we'll want to
1638 * make sure we don't even try to deliver the signal..
1640 void force_sigsegv(int sig)
1642 struct task_struct *p = current;
1644 if (sig == SIGSEGV) {
1645 unsigned long flags;
1646 spin_lock_irqsave(&p->sighand->siglock, flags);
1647 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1653 int force_sig_fault_to_task(int sig, int code, void __user *addr
1654 ___ARCH_SI_TRAPNO(int trapno)
1655 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 , struct task_struct *t)
1658 struct kernel_siginfo info;
1660 clear_siginfo(&info);
1661 info.si_signo = sig;
1663 info.si_code = code;
1664 info.si_addr = addr;
1665 #ifdef __ARCH_SI_TRAPNO
1666 info.si_trapno = trapno;
1670 info.si_flags = flags;
1673 return force_sig_info_to_task(&info, t);
1676 int force_sig_fault(int sig, int code, void __user *addr
1677 ___ARCH_SI_TRAPNO(int trapno)
1678 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1680 return force_sig_fault_to_task(sig, code, addr
1681 ___ARCH_SI_TRAPNO(trapno)
1682 ___ARCH_SI_IA64(imm, flags, isr), current);
1685 int send_sig_fault(int sig, int code, void __user *addr
1686 ___ARCH_SI_TRAPNO(int trapno)
1687 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 , struct task_struct *t)
1690 struct kernel_siginfo info;
1692 clear_siginfo(&info);
1693 info.si_signo = sig;
1695 info.si_code = code;
1696 info.si_addr = addr;
1697 #ifdef __ARCH_SI_TRAPNO
1698 info.si_trapno = trapno;
1702 info.si_flags = flags;
1705 return send_sig_info(info.si_signo, &info, t);
1708 int force_sig_mceerr(int code, void __user *addr, short lsb)
1710 struct kernel_siginfo info;
1712 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 clear_siginfo(&info);
1714 info.si_signo = SIGBUS;
1716 info.si_code = code;
1717 info.si_addr = addr;
1718 info.si_addr_lsb = lsb;
1719 return force_sig_info(&info);
1722 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1724 struct kernel_siginfo info;
1726 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 clear_siginfo(&info);
1728 info.si_signo = SIGBUS;
1730 info.si_code = code;
1731 info.si_addr = addr;
1732 info.si_addr_lsb = lsb;
1733 return send_sig_info(info.si_signo, &info, t);
1735 EXPORT_SYMBOL(send_sig_mceerr);
1737 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1739 struct kernel_siginfo info;
1741 clear_siginfo(&info);
1742 info.si_signo = SIGSEGV;
1744 info.si_code = SEGV_BNDERR;
1745 info.si_addr = addr;
1746 info.si_lower = lower;
1747 info.si_upper = upper;
1748 return force_sig_info(&info);
1752 int force_sig_pkuerr(void __user *addr, u32 pkey)
1754 struct kernel_siginfo info;
1756 clear_siginfo(&info);
1757 info.si_signo = SIGSEGV;
1759 info.si_code = SEGV_PKUERR;
1760 info.si_addr = addr;
1761 info.si_pkey = pkey;
1762 return force_sig_info(&info);
1766 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1768 struct kernel_siginfo info;
1770 clear_siginfo(&info);
1771 info.si_signo = SIGTRAP;
1773 info.si_code = TRAP_PERF;
1774 info.si_addr = addr;
1775 info.si_perf_data = sig_data;
1776 info.si_perf_type = type;
1778 return force_sig_info(&info);
1781 /* For the crazy architectures that include trap information in
1782 * the errno field, instead of an actual errno value.
1784 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1786 struct kernel_siginfo info;
1788 clear_siginfo(&info);
1789 info.si_signo = SIGTRAP;
1790 info.si_errno = errno;
1791 info.si_code = TRAP_HWBKPT;
1792 info.si_addr = addr;
1793 return force_sig_info(&info);
1796 int kill_pgrp(struct pid *pid, int sig, int priv)
1800 read_lock(&tasklist_lock);
1801 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1802 read_unlock(&tasklist_lock);
1806 EXPORT_SYMBOL(kill_pgrp);
1808 int kill_pid(struct pid *pid, int sig, int priv)
1810 return kill_pid_info(sig, __si_special(priv), pid);
1812 EXPORT_SYMBOL(kill_pid);
1815 * These functions support sending signals using preallocated sigqueue
1816 * structures. This is needed "because realtime applications cannot
1817 * afford to lose notifications of asynchronous events, like timer
1818 * expirations or I/O completions". In the case of POSIX Timers
1819 * we allocate the sigqueue structure from the timer_create. If this
1820 * allocation fails we are able to report the failure to the application
1821 * with an EAGAIN error.
1823 struct sigqueue *sigqueue_alloc(void)
1825 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1828 q->flags |= SIGQUEUE_PREALLOC;
1833 void sigqueue_free(struct sigqueue *q)
1835 unsigned long flags;
1836 spinlock_t *lock = ¤t->sighand->siglock;
1838 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1840 * We must hold ->siglock while testing q->list
1841 * to serialize with collect_signal() or with
1842 * __exit_signal()->flush_sigqueue().
1844 spin_lock_irqsave(lock, flags);
1845 q->flags &= ~SIGQUEUE_PREALLOC;
1847 * If it is queued it will be freed when dequeued,
1848 * like the "regular" sigqueue.
1850 if (!list_empty(&q->list))
1852 spin_unlock_irqrestore(lock, flags);
1858 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1860 int sig = q->info.si_signo;
1861 struct sigpending *pending;
1862 struct task_struct *t;
1863 unsigned long flags;
1866 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1870 t = pid_task(pid, type);
1871 if (!t || !likely(lock_task_sighand(t, &flags)))
1874 ret = 1; /* the signal is ignored */
1875 result = TRACE_SIGNAL_IGNORED;
1876 if (!prepare_signal(sig, t, false))
1880 if (unlikely(!list_empty(&q->list))) {
1882 * If an SI_TIMER entry is already queue just increment
1883 * the overrun count.
1885 BUG_ON(q->info.si_code != SI_TIMER);
1886 q->info.si_overrun++;
1887 result = TRACE_SIGNAL_ALREADY_PENDING;
1890 q->info.si_overrun = 0;
1892 signalfd_notify(t, sig);
1893 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1894 list_add_tail(&q->list, &pending->list);
1895 sigaddset(&pending->signal, sig);
1896 complete_signal(sig, t, type);
1897 result = TRACE_SIGNAL_DELIVERED;
1899 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1900 unlock_task_sighand(t, &flags);
1906 static void do_notify_pidfd(struct task_struct *task)
1910 WARN_ON(task->exit_state == 0);
1911 pid = task_pid(task);
1912 wake_up_all(&pid->wait_pidfd);
1916 * Let a parent know about the death of a child.
1917 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1919 * Returns true if our parent ignored us and so we've switched to
1922 bool do_notify_parent(struct task_struct *tsk, int sig)
1924 struct kernel_siginfo info;
1925 unsigned long flags;
1926 struct sighand_struct *psig;
1927 bool autoreap = false;
1932 /* do_notify_parent_cldstop should have been called instead. */
1933 BUG_ON(task_is_stopped_or_traced(tsk));
1935 BUG_ON(!tsk->ptrace &&
1936 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1938 /* Wake up all pidfd waiters */
1939 do_notify_pidfd(tsk);
1941 if (sig != SIGCHLD) {
1943 * This is only possible if parent == real_parent.
1944 * Check if it has changed security domain.
1946 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1950 clear_siginfo(&info);
1951 info.si_signo = sig;
1954 * We are under tasklist_lock here so our parent is tied to
1955 * us and cannot change.
1957 * task_active_pid_ns will always return the same pid namespace
1958 * until a task passes through release_task.
1960 * write_lock() currently calls preempt_disable() which is the
1961 * same as rcu_read_lock(), but according to Oleg, this is not
1962 * correct to rely on this
1965 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1966 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1970 task_cputime(tsk, &utime, &stime);
1971 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1972 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1974 info.si_status = tsk->exit_code & 0x7f;
1975 if (tsk->exit_code & 0x80)
1976 info.si_code = CLD_DUMPED;
1977 else if (tsk->exit_code & 0x7f)
1978 info.si_code = CLD_KILLED;
1980 info.si_code = CLD_EXITED;
1981 info.si_status = tsk->exit_code >> 8;
1984 psig = tsk->parent->sighand;
1985 spin_lock_irqsave(&psig->siglock, flags);
1986 if (!tsk->ptrace && sig == SIGCHLD &&
1987 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1988 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1990 * We are exiting and our parent doesn't care. POSIX.1
1991 * defines special semantics for setting SIGCHLD to SIG_IGN
1992 * or setting the SA_NOCLDWAIT flag: we should be reaped
1993 * automatically and not left for our parent's wait4 call.
1994 * Rather than having the parent do it as a magic kind of
1995 * signal handler, we just set this to tell do_exit that we
1996 * can be cleaned up without becoming a zombie. Note that
1997 * we still call __wake_up_parent in this case, because a
1998 * blocked sys_wait4 might now return -ECHILD.
2000 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2001 * is implementation-defined: we do (if you don't want
2002 * it, just use SIG_IGN instead).
2005 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2009 * Send with __send_signal as si_pid and si_uid are in the
2010 * parent's namespaces.
2012 if (valid_signal(sig) && sig)
2013 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2014 __wake_up_parent(tsk, tsk->parent);
2015 spin_unlock_irqrestore(&psig->siglock, flags);
2021 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2022 * @tsk: task reporting the state change
2023 * @for_ptracer: the notification is for ptracer
2024 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2026 * Notify @tsk's parent that the stopped/continued state has changed. If
2027 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2028 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2031 * Must be called with tasklist_lock at least read locked.
2033 static void do_notify_parent_cldstop(struct task_struct *tsk,
2034 bool for_ptracer, int why)
2036 struct kernel_siginfo info;
2037 unsigned long flags;
2038 struct task_struct *parent;
2039 struct sighand_struct *sighand;
2043 parent = tsk->parent;
2045 tsk = tsk->group_leader;
2046 parent = tsk->real_parent;
2049 clear_siginfo(&info);
2050 info.si_signo = SIGCHLD;
2053 * see comment in do_notify_parent() about the following 4 lines
2056 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2057 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2060 task_cputime(tsk, &utime, &stime);
2061 info.si_utime = nsec_to_clock_t(utime);
2062 info.si_stime = nsec_to_clock_t(stime);
2067 info.si_status = SIGCONT;
2070 info.si_status = tsk->signal->group_exit_code & 0x7f;
2073 info.si_status = tsk->exit_code & 0x7f;
2079 sighand = parent->sighand;
2080 spin_lock_irqsave(&sighand->siglock, flags);
2081 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2082 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2083 __group_send_sig_info(SIGCHLD, &info, parent);
2085 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2087 __wake_up_parent(tsk, parent);
2088 spin_unlock_irqrestore(&sighand->siglock, flags);
2091 static inline bool may_ptrace_stop(void)
2093 if (!likely(current->ptrace))
2096 * Are we in the middle of do_coredump?
2097 * If so and our tracer is also part of the coredump stopping
2098 * is a deadlock situation, and pointless because our tracer
2099 * is dead so don't allow us to stop.
2100 * If SIGKILL was already sent before the caller unlocked
2101 * ->siglock we must see ->core_state != NULL. Otherwise it
2102 * is safe to enter schedule().
2104 * This is almost outdated, a task with the pending SIGKILL can't
2105 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2106 * after SIGKILL was already dequeued.
2108 if (unlikely(current->mm->core_state) &&
2109 unlikely(current->mm == current->parent->mm))
2116 * Return non-zero if there is a SIGKILL that should be waking us up.
2117 * Called with the siglock held.
2119 static bool sigkill_pending(struct task_struct *tsk)
2121 return sigismember(&tsk->pending.signal, SIGKILL) ||
2122 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2126 * This must be called with current->sighand->siglock held.
2128 * This should be the path for all ptrace stops.
2129 * We always set current->last_siginfo while stopped here.
2130 * That makes it a way to test a stopped process for
2131 * being ptrace-stopped vs being job-control-stopped.
2133 * If we actually decide not to stop at all because the tracer
2134 * is gone, we keep current->exit_code unless clear_code.
2136 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2137 __releases(¤t->sighand->siglock)
2138 __acquires(¤t->sighand->siglock)
2140 bool gstop_done = false;
2142 if (arch_ptrace_stop_needed(exit_code, info)) {
2144 * The arch code has something special to do before a
2145 * ptrace stop. This is allowed to block, e.g. for faults
2146 * on user stack pages. We can't keep the siglock while
2147 * calling arch_ptrace_stop, so we must release it now.
2148 * To preserve proper semantics, we must do this before
2149 * any signal bookkeeping like checking group_stop_count.
2150 * Meanwhile, a SIGKILL could come in before we retake the
2151 * siglock. That must prevent us from sleeping in TASK_TRACED.
2152 * So after regaining the lock, we must check for SIGKILL.
2154 spin_unlock_irq(¤t->sighand->siglock);
2155 arch_ptrace_stop(exit_code, info);
2156 spin_lock_irq(¤t->sighand->siglock);
2157 if (sigkill_pending(current))
2161 set_special_state(TASK_TRACED);
2164 * We're committing to trapping. TRACED should be visible before
2165 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2166 * Also, transition to TRACED and updates to ->jobctl should be
2167 * atomic with respect to siglock and should be done after the arch
2168 * hook as siglock is released and regrabbed across it.
2173 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2175 * set_current_state() smp_wmb();
2177 * wait_task_stopped()
2178 * task_stopped_code()
2179 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2183 current->last_siginfo = info;
2184 current->exit_code = exit_code;
2187 * If @why is CLD_STOPPED, we're trapping to participate in a group
2188 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2189 * across siglock relocks since INTERRUPT was scheduled, PENDING
2190 * could be clear now. We act as if SIGCONT is received after
2191 * TASK_TRACED is entered - ignore it.
2193 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2194 gstop_done = task_participate_group_stop(current);
2196 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2197 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2198 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2199 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2201 /* entering a trap, clear TRAPPING */
2202 task_clear_jobctl_trapping(current);
2204 spin_unlock_irq(¤t->sighand->siglock);
2205 read_lock(&tasklist_lock);
2206 if (may_ptrace_stop()) {
2208 * Notify parents of the stop.
2210 * While ptraced, there are two parents - the ptracer and
2211 * the real_parent of the group_leader. The ptracer should
2212 * know about every stop while the real parent is only
2213 * interested in the completion of group stop. The states
2214 * for the two don't interact with each other. Notify
2215 * separately unless they're gonna be duplicates.
2217 do_notify_parent_cldstop(current, true, why);
2218 if (gstop_done && ptrace_reparented(current))
2219 do_notify_parent_cldstop(current, false, why);
2222 * Don't want to allow preemption here, because
2223 * sys_ptrace() needs this task to be inactive.
2225 * XXX: implement read_unlock_no_resched().
2228 read_unlock(&tasklist_lock);
2229 cgroup_enter_frozen();
2230 preempt_enable_no_resched();
2231 freezable_schedule();
2232 cgroup_leave_frozen(true);
2235 * By the time we got the lock, our tracer went away.
2236 * Don't drop the lock yet, another tracer may come.
2238 * If @gstop_done, the ptracer went away between group stop
2239 * completion and here. During detach, it would have set
2240 * JOBCTL_STOP_PENDING on us and we'll re-enter
2241 * TASK_STOPPED in do_signal_stop() on return, so notifying
2242 * the real parent of the group stop completion is enough.
2245 do_notify_parent_cldstop(current, false, why);
2247 /* tasklist protects us from ptrace_freeze_traced() */
2248 __set_current_state(TASK_RUNNING);
2250 current->exit_code = 0;
2251 read_unlock(&tasklist_lock);
2255 * We are back. Now reacquire the siglock before touching
2256 * last_siginfo, so that we are sure to have synchronized with
2257 * any signal-sending on another CPU that wants to examine it.
2259 spin_lock_irq(¤t->sighand->siglock);
2260 current->last_siginfo = NULL;
2262 /* LISTENING can be set only during STOP traps, clear it */
2263 current->jobctl &= ~JOBCTL_LISTENING;
2266 * Queued signals ignored us while we were stopped for tracing.
2267 * So check for any that we should take before resuming user mode.
2268 * This sets TIF_SIGPENDING, but never clears it.
2270 recalc_sigpending_tsk(current);
2273 static void ptrace_do_notify(int signr, int exit_code, int why)
2275 kernel_siginfo_t info;
2277 clear_siginfo(&info);
2278 info.si_signo = signr;
2279 info.si_code = exit_code;
2280 info.si_pid = task_pid_vnr(current);
2281 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2283 /* Let the debugger run. */
2284 ptrace_stop(exit_code, why, 1, &info);
2287 void ptrace_notify(int exit_code)
2289 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2290 if (unlikely(current->task_works))
2293 spin_lock_irq(¤t->sighand->siglock);
2294 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2295 spin_unlock_irq(¤t->sighand->siglock);
2299 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2300 * @signr: signr causing group stop if initiating
2302 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2303 * and participate in it. If already set, participate in the existing
2304 * group stop. If participated in a group stop (and thus slept), %true is
2305 * returned with siglock released.
2307 * If ptraced, this function doesn't handle stop itself. Instead,
2308 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2309 * untouched. The caller must ensure that INTERRUPT trap handling takes
2310 * places afterwards.
2313 * Must be called with @current->sighand->siglock held, which is released
2317 * %false if group stop is already cancelled or ptrace trap is scheduled.
2318 * %true if participated in group stop.
2320 static bool do_signal_stop(int signr)
2321 __releases(¤t->sighand->siglock)
2323 struct signal_struct *sig = current->signal;
2325 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2326 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2327 struct task_struct *t;
2329 /* signr will be recorded in task->jobctl for retries */
2330 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2332 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2333 unlikely(signal_group_exit(sig)))
2336 * There is no group stop already in progress. We must
2339 * While ptraced, a task may be resumed while group stop is
2340 * still in effect and then receive a stop signal and
2341 * initiate another group stop. This deviates from the
2342 * usual behavior as two consecutive stop signals can't
2343 * cause two group stops when !ptraced. That is why we
2344 * also check !task_is_stopped(t) below.
2346 * The condition can be distinguished by testing whether
2347 * SIGNAL_STOP_STOPPED is already set. Don't generate
2348 * group_exit_code in such case.
2350 * This is not necessary for SIGNAL_STOP_CONTINUED because
2351 * an intervening stop signal is required to cause two
2352 * continued events regardless of ptrace.
2354 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2355 sig->group_exit_code = signr;
2357 sig->group_stop_count = 0;
2359 if (task_set_jobctl_pending(current, signr | gstop))
2360 sig->group_stop_count++;
2363 while_each_thread(current, t) {
2365 * Setting state to TASK_STOPPED for a group
2366 * stop is always done with the siglock held,
2367 * so this check has no races.
2369 if (!task_is_stopped(t) &&
2370 task_set_jobctl_pending(t, signr | gstop)) {
2371 sig->group_stop_count++;
2372 if (likely(!(t->ptrace & PT_SEIZED)))
2373 signal_wake_up(t, 0);
2375 ptrace_trap_notify(t);
2380 if (likely(!current->ptrace)) {
2384 * If there are no other threads in the group, or if there
2385 * is a group stop in progress and we are the last to stop,
2386 * report to the parent.
2388 if (task_participate_group_stop(current))
2389 notify = CLD_STOPPED;
2391 set_special_state(TASK_STOPPED);
2392 spin_unlock_irq(¤t->sighand->siglock);
2395 * Notify the parent of the group stop completion. Because
2396 * we're not holding either the siglock or tasklist_lock
2397 * here, ptracer may attach inbetween; however, this is for
2398 * group stop and should always be delivered to the real
2399 * parent of the group leader. The new ptracer will get
2400 * its notification when this task transitions into
2404 read_lock(&tasklist_lock);
2405 do_notify_parent_cldstop(current, false, notify);
2406 read_unlock(&tasklist_lock);
2409 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2410 cgroup_enter_frozen();
2411 freezable_schedule();
2415 * While ptraced, group stop is handled by STOP trap.
2416 * Schedule it and let the caller deal with it.
2418 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2424 * do_jobctl_trap - take care of ptrace jobctl traps
2426 * When PT_SEIZED, it's used for both group stop and explicit
2427 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2428 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2429 * the stop signal; otherwise, %SIGTRAP.
2431 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2432 * number as exit_code and no siginfo.
2435 * Must be called with @current->sighand->siglock held, which may be
2436 * released and re-acquired before returning with intervening sleep.
2438 static void do_jobctl_trap(void)
2440 struct signal_struct *signal = current->signal;
2441 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2443 if (current->ptrace & PT_SEIZED) {
2444 if (!signal->group_stop_count &&
2445 !(signal->flags & SIGNAL_STOP_STOPPED))
2447 WARN_ON_ONCE(!signr);
2448 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2451 WARN_ON_ONCE(!signr);
2452 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2453 current->exit_code = 0;
2458 * do_freezer_trap - handle the freezer jobctl trap
2460 * Puts the task into frozen state, if only the task is not about to quit.
2461 * In this case it drops JOBCTL_TRAP_FREEZE.
2464 * Must be called with @current->sighand->siglock held,
2465 * which is always released before returning.
2467 static void do_freezer_trap(void)
2468 __releases(¤t->sighand->siglock)
2471 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2472 * let's make another loop to give it a chance to be handled.
2473 * In any case, we'll return back.
2475 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2476 JOBCTL_TRAP_FREEZE) {
2477 spin_unlock_irq(¤t->sighand->siglock);
2482 * Now we're sure that there is no pending fatal signal and no
2483 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2484 * immediately (if there is a non-fatal signal pending), and
2485 * put the task into sleep.
2487 __set_current_state(TASK_INTERRUPTIBLE);
2488 clear_thread_flag(TIF_SIGPENDING);
2489 spin_unlock_irq(¤t->sighand->siglock);
2490 cgroup_enter_frozen();
2491 freezable_schedule();
2494 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2497 * We do not check sig_kernel_stop(signr) but set this marker
2498 * unconditionally because we do not know whether debugger will
2499 * change signr. This flag has no meaning unless we are going
2500 * to stop after return from ptrace_stop(). In this case it will
2501 * be checked in do_signal_stop(), we should only stop if it was
2502 * not cleared by SIGCONT while we were sleeping. See also the
2503 * comment in dequeue_signal().
2505 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2506 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2508 /* We're back. Did the debugger cancel the sig? */
2509 signr = current->exit_code;
2513 current->exit_code = 0;
2516 * Update the siginfo structure if the signal has
2517 * changed. If the debugger wanted something
2518 * specific in the siginfo structure then it should
2519 * have updated *info via PTRACE_SETSIGINFO.
2521 if (signr != info->si_signo) {
2522 clear_siginfo(info);
2523 info->si_signo = signr;
2525 info->si_code = SI_USER;
2527 info->si_pid = task_pid_vnr(current->parent);
2528 info->si_uid = from_kuid_munged(current_user_ns(),
2529 task_uid(current->parent));
2533 /* If the (new) signal is now blocked, requeue it. */
2534 if (sigismember(¤t->blocked, signr)) {
2535 send_signal(signr, info, current, PIDTYPE_PID);
2542 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2544 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2546 case SIL_FAULT_TRAPNO:
2547 case SIL_FAULT_MCEERR:
2548 case SIL_FAULT_BNDERR:
2549 case SIL_FAULT_PKUERR:
2550 case SIL_PERF_EVENT:
2551 ksig->info.si_addr = arch_untagged_si_addr(
2552 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2564 bool get_signal(struct ksignal *ksig)
2566 struct sighand_struct *sighand = current->sighand;
2567 struct signal_struct *signal = current->signal;
2570 if (unlikely(current->task_works))
2574 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2575 * that the arch handlers don't all have to do it. If we get here
2576 * without TIF_SIGPENDING, just exit after running signal work.
2578 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2579 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2580 tracehook_notify_signal();
2581 if (!task_sigpending(current))
2585 if (unlikely(uprobe_deny_signal()))
2589 * Do this once, we can't return to user-mode if freezing() == T.
2590 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2591 * thus do not need another check after return.
2596 spin_lock_irq(&sighand->siglock);
2599 * Every stopped thread goes here after wakeup. Check to see if
2600 * we should notify the parent, prepare_signal(SIGCONT) encodes
2601 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2603 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2606 if (signal->flags & SIGNAL_CLD_CONTINUED)
2607 why = CLD_CONTINUED;
2611 signal->flags &= ~SIGNAL_CLD_MASK;
2613 spin_unlock_irq(&sighand->siglock);
2616 * Notify the parent that we're continuing. This event is
2617 * always per-process and doesn't make whole lot of sense
2618 * for ptracers, who shouldn't consume the state via
2619 * wait(2) either, but, for backward compatibility, notify
2620 * the ptracer of the group leader too unless it's gonna be
2623 read_lock(&tasklist_lock);
2624 do_notify_parent_cldstop(current, false, why);
2626 if (ptrace_reparented(current->group_leader))
2627 do_notify_parent_cldstop(current->group_leader,
2629 read_unlock(&tasklist_lock);
2634 /* Has this task already been marked for death? */
2635 if (signal_group_exit(signal)) {
2636 ksig->info.si_signo = signr = SIGKILL;
2637 sigdelset(¤t->pending.signal, SIGKILL);
2638 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2639 &sighand->action[SIGKILL - 1]);
2640 recalc_sigpending();
2645 struct k_sigaction *ka;
2647 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2651 if (unlikely(current->jobctl &
2652 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2653 if (current->jobctl & JOBCTL_TRAP_MASK) {
2655 spin_unlock_irq(&sighand->siglock);
2656 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2663 * If the task is leaving the frozen state, let's update
2664 * cgroup counters and reset the frozen bit.
2666 if (unlikely(cgroup_task_frozen(current))) {
2667 spin_unlock_irq(&sighand->siglock);
2668 cgroup_leave_frozen(false);
2673 * Signals generated by the execution of an instruction
2674 * need to be delivered before any other pending signals
2675 * so that the instruction pointer in the signal stack
2676 * frame points to the faulting instruction.
2678 signr = dequeue_synchronous_signal(&ksig->info);
2680 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2683 break; /* will return 0 */
2685 if (unlikely(current->ptrace) && signr != SIGKILL) {
2686 signr = ptrace_signal(signr, &ksig->info);
2691 ka = &sighand->action[signr-1];
2693 /* Trace actually delivered signals. */
2694 trace_signal_deliver(signr, &ksig->info, ka);
2696 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2698 if (ka->sa.sa_handler != SIG_DFL) {
2699 /* Run the handler. */
2702 if (ka->sa.sa_flags & SA_ONESHOT)
2703 ka->sa.sa_handler = SIG_DFL;
2705 break; /* will return non-zero "signr" value */
2709 * Now we are doing the default action for this signal.
2711 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2715 * Global init gets no signals it doesn't want.
2716 * Container-init gets no signals it doesn't want from same
2719 * Note that if global/container-init sees a sig_kernel_only()
2720 * signal here, the signal must have been generated internally
2721 * or must have come from an ancestor namespace. In either
2722 * case, the signal cannot be dropped.
2724 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2725 !sig_kernel_only(signr))
2728 if (sig_kernel_stop(signr)) {
2730 * The default action is to stop all threads in
2731 * the thread group. The job control signals
2732 * do nothing in an orphaned pgrp, but SIGSTOP
2733 * always works. Note that siglock needs to be
2734 * dropped during the call to is_orphaned_pgrp()
2735 * because of lock ordering with tasklist_lock.
2736 * This allows an intervening SIGCONT to be posted.
2737 * We need to check for that and bail out if necessary.
2739 if (signr != SIGSTOP) {
2740 spin_unlock_irq(&sighand->siglock);
2742 /* signals can be posted during this window */
2744 if (is_current_pgrp_orphaned())
2747 spin_lock_irq(&sighand->siglock);
2750 if (likely(do_signal_stop(ksig->info.si_signo))) {
2751 /* It released the siglock. */
2756 * We didn't actually stop, due to a race
2757 * with SIGCONT or something like that.
2763 spin_unlock_irq(&sighand->siglock);
2764 if (unlikely(cgroup_task_frozen(current)))
2765 cgroup_leave_frozen(true);
2768 * Anything else is fatal, maybe with a core dump.
2770 current->flags |= PF_SIGNALED;
2772 if (sig_kernel_coredump(signr)) {
2773 if (print_fatal_signals)
2774 print_fatal_signal(ksig->info.si_signo);
2775 proc_coredump_connector(current);
2777 * If it was able to dump core, this kills all
2778 * other threads in the group and synchronizes with
2779 * their demise. If we lost the race with another
2780 * thread getting here, it set group_exit_code
2781 * first and our do_group_exit call below will use
2782 * that value and ignore the one we pass it.
2784 do_coredump(&ksig->info);
2788 * PF_IO_WORKER threads will catch and exit on fatal signals
2789 * themselves. They have cleanup that must be performed, so
2790 * we cannot call do_exit() on their behalf.
2792 if (current->flags & PF_IO_WORKER)
2796 * Death signals, no core dump.
2798 do_group_exit(ksig->info.si_signo);
2801 spin_unlock_irq(&sighand->siglock);
2805 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2806 hide_si_addr_tag_bits(ksig);
2808 return ksig->sig > 0;
2812 * signal_delivered -
2813 * @ksig: kernel signal struct
2814 * @stepping: nonzero if debugger single-step or block-step in use
2816 * This function should be called when a signal has successfully been
2817 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2818 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2819 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2821 static void signal_delivered(struct ksignal *ksig, int stepping)
2825 /* A signal was successfully delivered, and the
2826 saved sigmask was stored on the signal frame,
2827 and will be restored by sigreturn. So we can
2828 simply clear the restore sigmask flag. */
2829 clear_restore_sigmask();
2831 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2832 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2833 sigaddset(&blocked, ksig->sig);
2834 set_current_blocked(&blocked);
2835 tracehook_signal_handler(stepping);
2838 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2841 force_sigsegv(ksig->sig);
2843 signal_delivered(ksig, stepping);
2847 * It could be that complete_signal() picked us to notify about the
2848 * group-wide signal. Other threads should be notified now to take
2849 * the shared signals in @which since we will not.
2851 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2854 struct task_struct *t;
2856 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2857 if (sigisemptyset(&retarget))
2861 while_each_thread(tsk, t) {
2862 if (t->flags & PF_EXITING)
2865 if (!has_pending_signals(&retarget, &t->blocked))
2867 /* Remove the signals this thread can handle. */
2868 sigandsets(&retarget, &retarget, &t->blocked);
2870 if (!task_sigpending(t))
2871 signal_wake_up(t, 0);
2873 if (sigisemptyset(&retarget))
2878 void exit_signals(struct task_struct *tsk)
2884 * @tsk is about to have PF_EXITING set - lock out users which
2885 * expect stable threadgroup.
2887 cgroup_threadgroup_change_begin(tsk);
2889 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2890 tsk->flags |= PF_EXITING;
2891 cgroup_threadgroup_change_end(tsk);
2895 spin_lock_irq(&tsk->sighand->siglock);
2897 * From now this task is not visible for group-wide signals,
2898 * see wants_signal(), do_signal_stop().
2900 tsk->flags |= PF_EXITING;
2902 cgroup_threadgroup_change_end(tsk);
2904 if (!task_sigpending(tsk))
2907 unblocked = tsk->blocked;
2908 signotset(&unblocked);
2909 retarget_shared_pending(tsk, &unblocked);
2911 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2912 task_participate_group_stop(tsk))
2913 group_stop = CLD_STOPPED;
2915 spin_unlock_irq(&tsk->sighand->siglock);
2918 * If group stop has completed, deliver the notification. This
2919 * should always go to the real parent of the group leader.
2921 if (unlikely(group_stop)) {
2922 read_lock(&tasklist_lock);
2923 do_notify_parent_cldstop(tsk, false, group_stop);
2924 read_unlock(&tasklist_lock);
2929 * System call entry points.
2933 * sys_restart_syscall - restart a system call
2935 SYSCALL_DEFINE0(restart_syscall)
2937 struct restart_block *restart = ¤t->restart_block;
2938 return restart->fn(restart);
2941 long do_no_restart_syscall(struct restart_block *param)
2946 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2948 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2949 sigset_t newblocked;
2950 /* A set of now blocked but previously unblocked signals. */
2951 sigandnsets(&newblocked, newset, ¤t->blocked);
2952 retarget_shared_pending(tsk, &newblocked);
2954 tsk->blocked = *newset;
2955 recalc_sigpending();
2959 * set_current_blocked - change current->blocked mask
2962 * It is wrong to change ->blocked directly, this helper should be used
2963 * to ensure the process can't miss a shared signal we are going to block.
2965 void set_current_blocked(sigset_t *newset)
2967 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2968 __set_current_blocked(newset);
2971 void __set_current_blocked(const sigset_t *newset)
2973 struct task_struct *tsk = current;
2976 * In case the signal mask hasn't changed, there is nothing we need
2977 * to do. The current->blocked shouldn't be modified by other task.
2979 if (sigequalsets(&tsk->blocked, newset))
2982 spin_lock_irq(&tsk->sighand->siglock);
2983 __set_task_blocked(tsk, newset);
2984 spin_unlock_irq(&tsk->sighand->siglock);
2988 * This is also useful for kernel threads that want to temporarily
2989 * (or permanently) block certain signals.
2991 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2992 * interface happily blocks "unblockable" signals like SIGKILL
2995 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2997 struct task_struct *tsk = current;
3000 /* Lockless, only current can change ->blocked, never from irq */
3002 *oldset = tsk->blocked;
3006 sigorsets(&newset, &tsk->blocked, set);
3009 sigandnsets(&newset, &tsk->blocked, set);
3018 __set_current_blocked(&newset);
3021 EXPORT_SYMBOL(sigprocmask);
3024 * The api helps set app-provided sigmasks.
3026 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3027 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3029 * Note that it does set_restore_sigmask() in advance, so it must be always
3030 * paired with restore_saved_sigmask_unless() before return from syscall.
3032 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3038 if (sigsetsize != sizeof(sigset_t))
3040 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3043 set_restore_sigmask();
3044 current->saved_sigmask = current->blocked;
3045 set_current_blocked(&kmask);
3050 #ifdef CONFIG_COMPAT
3051 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3058 if (sigsetsize != sizeof(compat_sigset_t))
3060 if (get_compat_sigset(&kmask, umask))
3063 set_restore_sigmask();
3064 current->saved_sigmask = current->blocked;
3065 set_current_blocked(&kmask);
3072 * sys_rt_sigprocmask - change the list of currently blocked signals
3073 * @how: whether to add, remove, or set signals
3074 * @nset: stores pending signals
3075 * @oset: previous value of signal mask if non-null
3076 * @sigsetsize: size of sigset_t type
3078 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3079 sigset_t __user *, oset, size_t, sigsetsize)
3081 sigset_t old_set, new_set;
3084 /* XXX: Don't preclude handling different sized sigset_t's. */
3085 if (sigsetsize != sizeof(sigset_t))
3088 old_set = current->blocked;
3091 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3093 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3095 error = sigprocmask(how, &new_set, NULL);
3101 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3108 #ifdef CONFIG_COMPAT
3109 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3110 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3112 sigset_t old_set = current->blocked;
3114 /* XXX: Don't preclude handling different sized sigset_t's. */
3115 if (sigsetsize != sizeof(sigset_t))
3121 if (get_compat_sigset(&new_set, nset))
3123 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3125 error = sigprocmask(how, &new_set, NULL);
3129 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3133 static void do_sigpending(sigset_t *set)
3135 spin_lock_irq(¤t->sighand->siglock);
3136 sigorsets(set, ¤t->pending.signal,
3137 ¤t->signal->shared_pending.signal);
3138 spin_unlock_irq(¤t->sighand->siglock);
3140 /* Outside the lock because only this thread touches it. */
3141 sigandsets(set, ¤t->blocked, set);
3145 * sys_rt_sigpending - examine a pending signal that has been raised
3147 * @uset: stores pending signals
3148 * @sigsetsize: size of sigset_t type or larger
3150 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3154 if (sigsetsize > sizeof(*uset))
3157 do_sigpending(&set);
3159 if (copy_to_user(uset, &set, sigsetsize))
3165 #ifdef CONFIG_COMPAT
3166 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3167 compat_size_t, sigsetsize)
3171 if (sigsetsize > sizeof(*uset))
3174 do_sigpending(&set);
3176 return put_compat_sigset(uset, &set, sigsetsize);
3180 static const struct {
3181 unsigned char limit, layout;
3183 [SIGILL] = { NSIGILL, SIL_FAULT },
3184 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3185 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3186 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3187 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3189 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3191 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3192 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3193 [SIGSYS] = { NSIGSYS, SIL_SYS },
3196 static bool known_siginfo_layout(unsigned sig, int si_code)
3198 if (si_code == SI_KERNEL)
3200 else if ((si_code > SI_USER)) {
3201 if (sig_specific_sicodes(sig)) {
3202 if (si_code <= sig_sicodes[sig].limit)
3205 else if (si_code <= NSIGPOLL)
3208 else if (si_code >= SI_DETHREAD)
3210 else if (si_code == SI_ASYNCNL)
3215 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3217 enum siginfo_layout layout = SIL_KILL;
3218 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3219 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3220 (si_code <= sig_sicodes[sig].limit)) {
3221 layout = sig_sicodes[sig].layout;
3222 /* Handle the exceptions */
3223 if ((sig == SIGBUS) &&
3224 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3225 layout = SIL_FAULT_MCEERR;
3226 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3227 layout = SIL_FAULT_BNDERR;
3229 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3230 layout = SIL_FAULT_PKUERR;
3232 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3233 layout = SIL_PERF_EVENT;
3234 #ifdef __ARCH_SI_TRAPNO
3235 else if (layout == SIL_FAULT)
3236 layout = SIL_FAULT_TRAPNO;
3239 else if (si_code <= NSIGPOLL)
3242 if (si_code == SI_TIMER)
3244 else if (si_code == SI_SIGIO)
3246 else if (si_code < 0)
3252 static inline char __user *si_expansion(const siginfo_t __user *info)
3254 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3257 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3259 char __user *expansion = si_expansion(to);
3260 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3262 if (clear_user(expansion, SI_EXPANSION_SIZE))
3267 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3268 const siginfo_t __user *from)
3270 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3271 char __user *expansion = si_expansion(from);
3272 char buf[SI_EXPANSION_SIZE];
3275 * An unknown si_code might need more than
3276 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3277 * extra bytes are 0. This guarantees copy_siginfo_to_user
3278 * will return this data to userspace exactly.
3280 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3282 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3290 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3291 const siginfo_t __user *from)
3293 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3295 to->si_signo = signo;
3296 return post_copy_siginfo_from_user(to, from);
3299 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3301 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3303 return post_copy_siginfo_from_user(to, from);
3306 #ifdef CONFIG_COMPAT
3308 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3309 * @to: compat siginfo destination
3310 * @from: kernel siginfo source
3312 * Note: This function does not work properly for the SIGCHLD on x32, but
3313 * fortunately it doesn't have to. The only valid callers for this function are
3314 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3315 * The latter does not care because SIGCHLD will never cause a coredump.
3317 void copy_siginfo_to_external32(struct compat_siginfo *to,
3318 const struct kernel_siginfo *from)
3320 memset(to, 0, sizeof(*to));
3322 to->si_signo = from->si_signo;
3323 to->si_errno = from->si_errno;
3324 to->si_code = from->si_code;
3325 switch(siginfo_layout(from->si_signo, from->si_code)) {
3327 to->si_pid = from->si_pid;
3328 to->si_uid = from->si_uid;
3331 to->si_tid = from->si_tid;
3332 to->si_overrun = from->si_overrun;
3333 to->si_int = from->si_int;
3336 to->si_band = from->si_band;
3337 to->si_fd = from->si_fd;
3340 to->si_addr = ptr_to_compat(from->si_addr);
3342 case SIL_FAULT_TRAPNO:
3343 to->si_addr = ptr_to_compat(from->si_addr);
3344 to->si_trapno = from->si_trapno;
3346 case SIL_FAULT_MCEERR:
3347 to->si_addr = ptr_to_compat(from->si_addr);
3348 to->si_addr_lsb = from->si_addr_lsb;
3350 case SIL_FAULT_BNDERR:
3351 to->si_addr = ptr_to_compat(from->si_addr);
3352 to->si_lower = ptr_to_compat(from->si_lower);
3353 to->si_upper = ptr_to_compat(from->si_upper);
3355 case SIL_FAULT_PKUERR:
3356 to->si_addr = ptr_to_compat(from->si_addr);
3357 to->si_pkey = from->si_pkey;
3359 case SIL_PERF_EVENT:
3360 to->si_addr = ptr_to_compat(from->si_addr);
3361 to->si_perf_data = from->si_perf_data;
3362 to->si_perf_type = from->si_perf_type;
3365 to->si_pid = from->si_pid;
3366 to->si_uid = from->si_uid;
3367 to->si_status = from->si_status;
3368 to->si_utime = from->si_utime;
3369 to->si_stime = from->si_stime;
3372 to->si_pid = from->si_pid;
3373 to->si_uid = from->si_uid;
3374 to->si_int = from->si_int;
3377 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3378 to->si_syscall = from->si_syscall;
3379 to->si_arch = from->si_arch;
3384 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3385 const struct kernel_siginfo *from)
3387 struct compat_siginfo new;
3389 copy_siginfo_to_external32(&new, from);
3390 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3395 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3396 const struct compat_siginfo *from)
3399 to->si_signo = from->si_signo;
3400 to->si_errno = from->si_errno;
3401 to->si_code = from->si_code;
3402 switch(siginfo_layout(from->si_signo, from->si_code)) {
3404 to->si_pid = from->si_pid;
3405 to->si_uid = from->si_uid;
3408 to->si_tid = from->si_tid;
3409 to->si_overrun = from->si_overrun;
3410 to->si_int = from->si_int;
3413 to->si_band = from->si_band;
3414 to->si_fd = from->si_fd;
3417 to->si_addr = compat_ptr(from->si_addr);
3419 case SIL_FAULT_TRAPNO:
3420 to->si_addr = compat_ptr(from->si_addr);
3421 to->si_trapno = from->si_trapno;
3423 case SIL_FAULT_MCEERR:
3424 to->si_addr = compat_ptr(from->si_addr);
3425 to->si_addr_lsb = from->si_addr_lsb;
3427 case SIL_FAULT_BNDERR:
3428 to->si_addr = compat_ptr(from->si_addr);
3429 to->si_lower = compat_ptr(from->si_lower);
3430 to->si_upper = compat_ptr(from->si_upper);
3432 case SIL_FAULT_PKUERR:
3433 to->si_addr = compat_ptr(from->si_addr);
3434 to->si_pkey = from->si_pkey;
3436 case SIL_PERF_EVENT:
3437 to->si_addr = compat_ptr(from->si_addr);
3438 to->si_perf_data = from->si_perf_data;
3439 to->si_perf_type = from->si_perf_type;
3442 to->si_pid = from->si_pid;
3443 to->si_uid = from->si_uid;
3444 to->si_status = from->si_status;
3445 #ifdef CONFIG_X86_X32_ABI
3446 if (in_x32_syscall()) {
3447 to->si_utime = from->_sifields._sigchld_x32._utime;
3448 to->si_stime = from->_sifields._sigchld_x32._stime;
3452 to->si_utime = from->si_utime;
3453 to->si_stime = from->si_stime;
3457 to->si_pid = from->si_pid;
3458 to->si_uid = from->si_uid;
3459 to->si_int = from->si_int;
3462 to->si_call_addr = compat_ptr(from->si_call_addr);
3463 to->si_syscall = from->si_syscall;
3464 to->si_arch = from->si_arch;
3470 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3471 const struct compat_siginfo __user *ufrom)
3473 struct compat_siginfo from;
3475 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3478 from.si_signo = signo;
3479 return post_copy_siginfo_from_user32(to, &from);
3482 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3483 const struct compat_siginfo __user *ufrom)
3485 struct compat_siginfo from;
3487 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3490 return post_copy_siginfo_from_user32(to, &from);
3492 #endif /* CONFIG_COMPAT */
3495 * do_sigtimedwait - wait for queued signals specified in @which
3496 * @which: queued signals to wait for
3497 * @info: if non-null, the signal's siginfo is returned here
3498 * @ts: upper bound on process time suspension
3500 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3501 const struct timespec64 *ts)
3503 ktime_t *to = NULL, timeout = KTIME_MAX;
3504 struct task_struct *tsk = current;
3505 sigset_t mask = *which;
3509 if (!timespec64_valid(ts))
3511 timeout = timespec64_to_ktime(*ts);
3516 * Invert the set of allowed signals to get those we want to block.
3518 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3521 spin_lock_irq(&tsk->sighand->siglock);
3522 sig = dequeue_signal(tsk, &mask, info);
3523 if (!sig && timeout) {
3525 * None ready, temporarily unblock those we're interested
3526 * while we are sleeping in so that we'll be awakened when
3527 * they arrive. Unblocking is always fine, we can avoid
3528 * set_current_blocked().
3530 tsk->real_blocked = tsk->blocked;
3531 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3532 recalc_sigpending();
3533 spin_unlock_irq(&tsk->sighand->siglock);
3535 __set_current_state(TASK_INTERRUPTIBLE);
3536 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3538 spin_lock_irq(&tsk->sighand->siglock);
3539 __set_task_blocked(tsk, &tsk->real_blocked);
3540 sigemptyset(&tsk->real_blocked);
3541 sig = dequeue_signal(tsk, &mask, info);
3543 spin_unlock_irq(&tsk->sighand->siglock);
3547 return ret ? -EINTR : -EAGAIN;
3551 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3553 * @uthese: queued signals to wait for
3554 * @uinfo: if non-null, the signal's siginfo is returned here
3555 * @uts: upper bound on process time suspension
3556 * @sigsetsize: size of sigset_t type
3558 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3559 siginfo_t __user *, uinfo,
3560 const struct __kernel_timespec __user *, uts,
3564 struct timespec64 ts;
3565 kernel_siginfo_t info;
3568 /* XXX: Don't preclude handling different sized sigset_t's. */
3569 if (sigsetsize != sizeof(sigset_t))
3572 if (copy_from_user(&these, uthese, sizeof(these)))
3576 if (get_timespec64(&ts, uts))
3580 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3582 if (ret > 0 && uinfo) {
3583 if (copy_siginfo_to_user(uinfo, &info))
3590 #ifdef CONFIG_COMPAT_32BIT_TIME
3591 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3592 siginfo_t __user *, uinfo,
3593 const struct old_timespec32 __user *, uts,
3597 struct timespec64 ts;
3598 kernel_siginfo_t info;
3601 if (sigsetsize != sizeof(sigset_t))
3604 if (copy_from_user(&these, uthese, sizeof(these)))
3608 if (get_old_timespec32(&ts, uts))
3612 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3614 if (ret > 0 && uinfo) {
3615 if (copy_siginfo_to_user(uinfo, &info))
3623 #ifdef CONFIG_COMPAT
3624 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3625 struct compat_siginfo __user *, uinfo,
3626 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3629 struct timespec64 t;
3630 kernel_siginfo_t info;
3633 if (sigsetsize != sizeof(sigset_t))
3636 if (get_compat_sigset(&s, uthese))
3640 if (get_timespec64(&t, uts))
3644 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3646 if (ret > 0 && uinfo) {
3647 if (copy_siginfo_to_user32(uinfo, &info))
3654 #ifdef CONFIG_COMPAT_32BIT_TIME
3655 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3656 struct compat_siginfo __user *, uinfo,
3657 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3660 struct timespec64 t;
3661 kernel_siginfo_t info;
3664 if (sigsetsize != sizeof(sigset_t))
3667 if (get_compat_sigset(&s, uthese))
3671 if (get_old_timespec32(&t, uts))
3675 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3677 if (ret > 0 && uinfo) {
3678 if (copy_siginfo_to_user32(uinfo, &info))
3687 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3689 clear_siginfo(info);
3690 info->si_signo = sig;
3692 info->si_code = SI_USER;
3693 info->si_pid = task_tgid_vnr(current);
3694 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3698 * sys_kill - send a signal to a process
3699 * @pid: the PID of the process
3700 * @sig: signal to be sent
3702 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3704 struct kernel_siginfo info;
3706 prepare_kill_siginfo(sig, &info);
3708 return kill_something_info(sig, &info, pid);
3712 * Verify that the signaler and signalee either are in the same pid namespace
3713 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3716 static bool access_pidfd_pidns(struct pid *pid)
3718 struct pid_namespace *active = task_active_pid_ns(current);
3719 struct pid_namespace *p = ns_of_pid(pid);
3732 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3733 siginfo_t __user *info)
3735 #ifdef CONFIG_COMPAT
3737 * Avoid hooking up compat syscalls and instead handle necessary
3738 * conversions here. Note, this is a stop-gap measure and should not be
3739 * considered a generic solution.
3741 if (in_compat_syscall())
3742 return copy_siginfo_from_user32(
3743 kinfo, (struct compat_siginfo __user *)info);
3745 return copy_siginfo_from_user(kinfo, info);
3748 static struct pid *pidfd_to_pid(const struct file *file)
3752 pid = pidfd_pid(file);
3756 return tgid_pidfd_to_pid(file);
3760 * sys_pidfd_send_signal - Signal a process through a pidfd
3761 * @pidfd: file descriptor of the process
3762 * @sig: signal to send
3763 * @info: signal info
3764 * @flags: future flags
3766 * The syscall currently only signals via PIDTYPE_PID which covers
3767 * kill(<positive-pid>, <signal>. It does not signal threads or process
3769 * In order to extend the syscall to threads and process groups the @flags
3770 * argument should be used. In essence, the @flags argument will determine
3771 * what is signaled and not the file descriptor itself. Put in other words,
3772 * grouping is a property of the flags argument not a property of the file
3775 * Return: 0 on success, negative errno on failure
3777 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3778 siginfo_t __user *, info, unsigned int, flags)
3783 kernel_siginfo_t kinfo;
3785 /* Enforce flags be set to 0 until we add an extension. */
3793 /* Is this a pidfd? */
3794 pid = pidfd_to_pid(f.file);
3801 if (!access_pidfd_pidns(pid))
3805 ret = copy_siginfo_from_user_any(&kinfo, info);
3810 if (unlikely(sig != kinfo.si_signo))
3813 /* Only allow sending arbitrary signals to yourself. */
3815 if ((task_pid(current) != pid) &&
3816 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3819 prepare_kill_siginfo(sig, &kinfo);
3822 ret = kill_pid_info(sig, &kinfo, pid);
3830 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3832 struct task_struct *p;
3836 p = find_task_by_vpid(pid);
3837 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3838 error = check_kill_permission(sig, info, p);
3840 * The null signal is a permissions and process existence
3841 * probe. No signal is actually delivered.
3843 if (!error && sig) {
3844 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3846 * If lock_task_sighand() failed we pretend the task
3847 * dies after receiving the signal. The window is tiny,
3848 * and the signal is private anyway.
3850 if (unlikely(error == -ESRCH))
3859 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3861 struct kernel_siginfo info;
3863 clear_siginfo(&info);
3864 info.si_signo = sig;
3866 info.si_code = SI_TKILL;
3867 info.si_pid = task_tgid_vnr(current);
3868 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3870 return do_send_specific(tgid, pid, sig, &info);
3874 * sys_tgkill - send signal to one specific thread
3875 * @tgid: the thread group ID of the thread
3876 * @pid: the PID of the thread
3877 * @sig: signal to be sent
3879 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3880 * exists but it's not belonging to the target process anymore. This
3881 * method solves the problem of threads exiting and PIDs getting reused.
3883 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3885 /* This is only valid for single tasks */
3886 if (pid <= 0 || tgid <= 0)
3889 return do_tkill(tgid, pid, sig);
3893 * sys_tkill - send signal to one specific task
3894 * @pid: the PID of the task
3895 * @sig: signal to be sent
3897 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3899 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3901 /* This is only valid for single tasks */
3905 return do_tkill(0, pid, sig);
3908 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3910 /* Not even root can pretend to send signals from the kernel.
3911 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3913 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3914 (task_pid_vnr(current) != pid))
3917 /* POSIX.1b doesn't mention process groups. */
3918 return kill_proc_info(sig, info, pid);
3922 * sys_rt_sigqueueinfo - send signal information to a signal
3923 * @pid: the PID of the thread
3924 * @sig: signal to be sent
3925 * @uinfo: signal info to be sent
3927 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3928 siginfo_t __user *, uinfo)
3930 kernel_siginfo_t info;
3931 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3934 return do_rt_sigqueueinfo(pid, sig, &info);
3937 #ifdef CONFIG_COMPAT
3938 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3941 struct compat_siginfo __user *, uinfo)
3943 kernel_siginfo_t info;
3944 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3947 return do_rt_sigqueueinfo(pid, sig, &info);
3951 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3953 /* This is only valid for single tasks */
3954 if (pid <= 0 || tgid <= 0)
3957 /* Not even root can pretend to send signals from the kernel.
3958 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3960 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3961 (task_pid_vnr(current) != pid))
3964 return do_send_specific(tgid, pid, sig, info);
3967 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3968 siginfo_t __user *, uinfo)
3970 kernel_siginfo_t info;
3971 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3974 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3977 #ifdef CONFIG_COMPAT
3978 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3982 struct compat_siginfo __user *, uinfo)
3984 kernel_siginfo_t info;
3985 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3988 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3993 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3995 void kernel_sigaction(int sig, __sighandler_t action)
3997 spin_lock_irq(¤t->sighand->siglock);
3998 current->sighand->action[sig - 1].sa.sa_handler = action;
3999 if (action == SIG_IGN) {
4003 sigaddset(&mask, sig);
4005 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4006 flush_sigqueue_mask(&mask, ¤t->pending);
4007 recalc_sigpending();
4009 spin_unlock_irq(¤t->sighand->siglock);
4011 EXPORT_SYMBOL(kernel_sigaction);
4013 void __weak sigaction_compat_abi(struct k_sigaction *act,
4014 struct k_sigaction *oact)
4018 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4020 struct task_struct *p = current, *t;
4021 struct k_sigaction *k;
4024 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4027 k = &p->sighand->action[sig-1];
4029 spin_lock_irq(&p->sighand->siglock);
4034 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4035 * e.g. by having an architecture use the bit in their uapi.
4037 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4040 * Clear unknown flag bits in order to allow userspace to detect missing
4041 * support for flag bits and to allow the kernel to use non-uapi bits
4045 act->sa.sa_flags &= UAPI_SA_FLAGS;
4047 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4049 sigaction_compat_abi(act, oact);
4052 sigdelsetmask(&act->sa.sa_mask,
4053 sigmask(SIGKILL) | sigmask(SIGSTOP));
4057 * "Setting a signal action to SIG_IGN for a signal that is
4058 * pending shall cause the pending signal to be discarded,
4059 * whether or not it is blocked."
4061 * "Setting a signal action to SIG_DFL for a signal that is
4062 * pending and whose default action is to ignore the signal
4063 * (for example, SIGCHLD), shall cause the pending signal to
4064 * be discarded, whether or not it is blocked"
4066 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4068 sigaddset(&mask, sig);
4069 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4070 for_each_thread(p, t)
4071 flush_sigqueue_mask(&mask, &t->pending);
4075 spin_unlock_irq(&p->sighand->siglock);
4080 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4083 struct task_struct *t = current;
4086 memset(oss, 0, sizeof(stack_t));
4087 oss->ss_sp = (void __user *) t->sas_ss_sp;
4088 oss->ss_size = t->sas_ss_size;
4089 oss->ss_flags = sas_ss_flags(sp) |
4090 (current->sas_ss_flags & SS_FLAG_BITS);
4094 void __user *ss_sp = ss->ss_sp;
4095 size_t ss_size = ss->ss_size;
4096 unsigned ss_flags = ss->ss_flags;
4099 if (unlikely(on_sig_stack(sp)))
4102 ss_mode = ss_flags & ~SS_FLAG_BITS;
4103 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4107 if (ss_mode == SS_DISABLE) {
4111 if (unlikely(ss_size < min_ss_size))
4115 t->sas_ss_sp = (unsigned long) ss_sp;
4116 t->sas_ss_size = ss_size;
4117 t->sas_ss_flags = ss_flags;
4122 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4126 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4128 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4129 current_user_stack_pointer(),
4131 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4136 int restore_altstack(const stack_t __user *uss)
4139 if (copy_from_user(&new, uss, sizeof(stack_t)))
4141 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4143 /* squash all but EFAULT for now */
4147 int __save_altstack(stack_t __user *uss, unsigned long sp)
4149 struct task_struct *t = current;
4150 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4151 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4152 __put_user(t->sas_ss_size, &uss->ss_size);
4155 if (t->sas_ss_flags & SS_AUTODISARM)
4160 #ifdef CONFIG_COMPAT
4161 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4162 compat_stack_t __user *uoss_ptr)
4168 compat_stack_t uss32;
4169 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4171 uss.ss_sp = compat_ptr(uss32.ss_sp);
4172 uss.ss_flags = uss32.ss_flags;
4173 uss.ss_size = uss32.ss_size;
4175 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4176 compat_user_stack_pointer(),
4177 COMPAT_MINSIGSTKSZ);
4178 if (ret >= 0 && uoss_ptr) {
4180 memset(&old, 0, sizeof(old));
4181 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4182 old.ss_flags = uoss.ss_flags;
4183 old.ss_size = uoss.ss_size;
4184 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4190 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4191 const compat_stack_t __user *, uss_ptr,
4192 compat_stack_t __user *, uoss_ptr)
4194 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4197 int compat_restore_altstack(const compat_stack_t __user *uss)
4199 int err = do_compat_sigaltstack(uss, NULL);
4200 /* squash all but -EFAULT for now */
4201 return err == -EFAULT ? err : 0;
4204 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4207 struct task_struct *t = current;
4208 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4210 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4211 __put_user(t->sas_ss_size, &uss->ss_size);
4214 if (t->sas_ss_flags & SS_AUTODISARM)
4220 #ifdef __ARCH_WANT_SYS_SIGPENDING
4223 * sys_sigpending - examine pending signals
4224 * @uset: where mask of pending signal is returned
4226 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4230 if (sizeof(old_sigset_t) > sizeof(*uset))
4233 do_sigpending(&set);
4235 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4241 #ifdef CONFIG_COMPAT
4242 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4246 do_sigpending(&set);
4248 return put_user(set.sig[0], set32);
4254 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4256 * sys_sigprocmask - examine and change blocked signals
4257 * @how: whether to add, remove, or set signals
4258 * @nset: signals to add or remove (if non-null)
4259 * @oset: previous value of signal mask if non-null
4261 * Some platforms have their own version with special arguments;
4262 * others support only sys_rt_sigprocmask.
4265 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4266 old_sigset_t __user *, oset)
4268 old_sigset_t old_set, new_set;
4269 sigset_t new_blocked;
4271 old_set = current->blocked.sig[0];
4274 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4277 new_blocked = current->blocked;
4281 sigaddsetmask(&new_blocked, new_set);
4284 sigdelsetmask(&new_blocked, new_set);
4287 new_blocked.sig[0] = new_set;
4293 set_current_blocked(&new_blocked);
4297 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4303 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4305 #ifndef CONFIG_ODD_RT_SIGACTION
4307 * sys_rt_sigaction - alter an action taken by a process
4308 * @sig: signal to be sent
4309 * @act: new sigaction
4310 * @oact: used to save the previous sigaction
4311 * @sigsetsize: size of sigset_t type
4313 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4314 const struct sigaction __user *, act,
4315 struct sigaction __user *, oact,
4318 struct k_sigaction new_sa, old_sa;
4321 /* XXX: Don't preclude handling different sized sigset_t's. */
4322 if (sigsetsize != sizeof(sigset_t))
4325 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4328 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4332 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4337 #ifdef CONFIG_COMPAT
4338 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4339 const struct compat_sigaction __user *, act,
4340 struct compat_sigaction __user *, oact,
4341 compat_size_t, sigsetsize)
4343 struct k_sigaction new_ka, old_ka;
4344 #ifdef __ARCH_HAS_SA_RESTORER
4345 compat_uptr_t restorer;
4349 /* XXX: Don't preclude handling different sized sigset_t's. */
4350 if (sigsetsize != sizeof(compat_sigset_t))
4354 compat_uptr_t handler;
4355 ret = get_user(handler, &act->sa_handler);
4356 new_ka.sa.sa_handler = compat_ptr(handler);
4357 #ifdef __ARCH_HAS_SA_RESTORER
4358 ret |= get_user(restorer, &act->sa_restorer);
4359 new_ka.sa.sa_restorer = compat_ptr(restorer);
4361 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4362 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4367 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4369 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4371 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4372 sizeof(oact->sa_mask));
4373 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4374 #ifdef __ARCH_HAS_SA_RESTORER
4375 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4376 &oact->sa_restorer);
4382 #endif /* !CONFIG_ODD_RT_SIGACTION */
4384 #ifdef CONFIG_OLD_SIGACTION
4385 SYSCALL_DEFINE3(sigaction, int, sig,
4386 const struct old_sigaction __user *, act,
4387 struct old_sigaction __user *, oact)
4389 struct k_sigaction new_ka, old_ka;
4394 if (!access_ok(act, sizeof(*act)) ||
4395 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4396 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4397 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4398 __get_user(mask, &act->sa_mask))
4400 #ifdef __ARCH_HAS_KA_RESTORER
4401 new_ka.ka_restorer = NULL;
4403 siginitset(&new_ka.sa.sa_mask, mask);
4406 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4409 if (!access_ok(oact, sizeof(*oact)) ||
4410 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4411 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4412 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4413 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4420 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4421 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4422 const struct compat_old_sigaction __user *, act,
4423 struct compat_old_sigaction __user *, oact)
4425 struct k_sigaction new_ka, old_ka;
4427 compat_old_sigset_t mask;
4428 compat_uptr_t handler, restorer;
4431 if (!access_ok(act, sizeof(*act)) ||
4432 __get_user(handler, &act->sa_handler) ||
4433 __get_user(restorer, &act->sa_restorer) ||
4434 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4435 __get_user(mask, &act->sa_mask))
4438 #ifdef __ARCH_HAS_KA_RESTORER
4439 new_ka.ka_restorer = NULL;
4441 new_ka.sa.sa_handler = compat_ptr(handler);
4442 new_ka.sa.sa_restorer = compat_ptr(restorer);
4443 siginitset(&new_ka.sa.sa_mask, mask);
4446 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4449 if (!access_ok(oact, sizeof(*oact)) ||
4450 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4451 &oact->sa_handler) ||
4452 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4453 &oact->sa_restorer) ||
4454 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4455 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4462 #ifdef CONFIG_SGETMASK_SYSCALL
4465 * For backwards compatibility. Functionality superseded by sigprocmask.
4467 SYSCALL_DEFINE0(sgetmask)
4470 return current->blocked.sig[0];
4473 SYSCALL_DEFINE1(ssetmask, int, newmask)
4475 int old = current->blocked.sig[0];
4478 siginitset(&newset, newmask);
4479 set_current_blocked(&newset);
4483 #endif /* CONFIG_SGETMASK_SYSCALL */
4485 #ifdef __ARCH_WANT_SYS_SIGNAL
4487 * For backwards compatibility. Functionality superseded by sigaction.
4489 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4491 struct k_sigaction new_sa, old_sa;
4494 new_sa.sa.sa_handler = handler;
4495 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4496 sigemptyset(&new_sa.sa.sa_mask);
4498 ret = do_sigaction(sig, &new_sa, &old_sa);
4500 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4502 #endif /* __ARCH_WANT_SYS_SIGNAL */
4504 #ifdef __ARCH_WANT_SYS_PAUSE
4506 SYSCALL_DEFINE0(pause)
4508 while (!signal_pending(current)) {
4509 __set_current_state(TASK_INTERRUPTIBLE);
4512 return -ERESTARTNOHAND;
4517 static int sigsuspend(sigset_t *set)
4519 current->saved_sigmask = current->blocked;
4520 set_current_blocked(set);
4522 while (!signal_pending(current)) {
4523 __set_current_state(TASK_INTERRUPTIBLE);
4526 set_restore_sigmask();
4527 return -ERESTARTNOHAND;
4531 * sys_rt_sigsuspend - replace the signal mask for a value with the
4532 * @unewset value until a signal is received
4533 * @unewset: new signal mask value
4534 * @sigsetsize: size of sigset_t type
4536 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4540 /* XXX: Don't preclude handling different sized sigset_t's. */
4541 if (sigsetsize != sizeof(sigset_t))
4544 if (copy_from_user(&newset, unewset, sizeof(newset)))
4546 return sigsuspend(&newset);
4549 #ifdef CONFIG_COMPAT
4550 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4554 /* XXX: Don't preclude handling different sized sigset_t's. */
4555 if (sigsetsize != sizeof(sigset_t))
4558 if (get_compat_sigset(&newset, unewset))
4560 return sigsuspend(&newset);
4564 #ifdef CONFIG_OLD_SIGSUSPEND
4565 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4568 siginitset(&blocked, mask);
4569 return sigsuspend(&blocked);
4572 #ifdef CONFIG_OLD_SIGSUSPEND3
4573 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4576 siginitset(&blocked, mask);
4577 return sigsuspend(&blocked);
4581 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4586 static inline void siginfo_buildtime_checks(void)
4588 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4590 /* Verify the offsets in the two siginfos match */
4591 #define CHECK_OFFSET(field) \
4592 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4595 CHECK_OFFSET(si_pid);
4596 CHECK_OFFSET(si_uid);
4599 CHECK_OFFSET(si_tid);
4600 CHECK_OFFSET(si_overrun);
4601 CHECK_OFFSET(si_value);
4604 CHECK_OFFSET(si_pid);
4605 CHECK_OFFSET(si_uid);
4606 CHECK_OFFSET(si_value);
4609 CHECK_OFFSET(si_pid);
4610 CHECK_OFFSET(si_uid);
4611 CHECK_OFFSET(si_status);
4612 CHECK_OFFSET(si_utime);
4613 CHECK_OFFSET(si_stime);
4616 CHECK_OFFSET(si_addr);
4617 CHECK_OFFSET(si_trapno);
4618 CHECK_OFFSET(si_addr_lsb);
4619 CHECK_OFFSET(si_lower);
4620 CHECK_OFFSET(si_upper);
4621 CHECK_OFFSET(si_pkey);
4622 CHECK_OFFSET(si_perf_data);
4623 CHECK_OFFSET(si_perf_type);
4626 CHECK_OFFSET(si_band);
4627 CHECK_OFFSET(si_fd);
4630 CHECK_OFFSET(si_call_addr);
4631 CHECK_OFFSET(si_syscall);
4632 CHECK_OFFSET(si_arch);
4636 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4637 offsetof(struct siginfo, si_addr));
4638 if (sizeof(int) == sizeof(void __user *)) {
4639 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4640 sizeof(void __user *));
4642 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4643 sizeof_field(struct siginfo, si_uid)) !=
4644 sizeof(void __user *));
4645 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4646 offsetof(struct siginfo, si_uid));
4648 #ifdef CONFIG_COMPAT
4649 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4650 offsetof(struct compat_siginfo, si_addr));
4651 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4652 sizeof(compat_uptr_t));
4653 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4654 sizeof_field(struct siginfo, si_pid));
4658 void __init signals_init(void)
4660 siginfo_buildtime_checks();
4662 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4665 #ifdef CONFIG_KGDB_KDB
4666 #include <linux/kdb.h>
4668 * kdb_send_sig - Allows kdb to send signals without exposing
4669 * signal internals. This function checks if the required locks are
4670 * available before calling the main signal code, to avoid kdb
4673 void kdb_send_sig(struct task_struct *t, int sig)
4675 static struct task_struct *kdb_prev_t;
4677 if (!spin_trylock(&t->sighand->siglock)) {
4678 kdb_printf("Can't do kill command now.\n"
4679 "The sigmask lock is held somewhere else in "
4680 "kernel, try again later\n");
4683 new_t = kdb_prev_t != t;
4685 if (t->state != TASK_RUNNING && new_t) {
4686 spin_unlock(&t->sighand->siglock);
4687 kdb_printf("Process is not RUNNING, sending a signal from "
4688 "kdb risks deadlock\n"
4689 "on the run queue locks. "
4690 "The signal has _not_ been sent.\n"
4691 "Reissue the kill command if you want to risk "
4695 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4696 spin_unlock(&t->sighand->siglock);
4698 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4701 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4703 #endif /* CONFIG_KGDB_KDB */