1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 /* Have the new thread join an on-going signal group stop */
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
418 * Protect access to @t credentials. This can go away when all
419 * callers hold rcu read lock.
422 user = get_uid(__task_cred(t)->user);
423 atomic_inc(&user->sigpending);
426 if (override_rlimit ||
427 atomic_read(&user->sigpending) <=
428 task_rlimit(t, RLIMIT_SIGPENDING)) {
429 q = kmem_cache_alloc(sigqueue_cachep, flags);
431 print_dropped_signal(sig);
434 if (unlikely(q == NULL)) {
435 atomic_dec(&user->sigpending);
438 INIT_LIST_HEAD(&q->list);
446 static void __sigqueue_free(struct sigqueue *q)
448 if (q->flags & SIGQUEUE_PREALLOC)
450 atomic_dec(&q->user->sigpending);
452 kmem_cache_free(sigqueue_cachep, q);
455 void flush_sigqueue(struct sigpending *queue)
459 sigemptyset(&queue->signal);
460 while (!list_empty(&queue->list)) {
461 q = list_entry(queue->list.next, struct sigqueue , list);
462 list_del_init(&q->list);
468 * Flush all pending signals for this kthread.
470 void flush_signals(struct task_struct *t)
474 spin_lock_irqsave(&t->sighand->siglock, flags);
475 clear_tsk_thread_flag(t, TIF_SIGPENDING);
476 flush_sigqueue(&t->pending);
477 flush_sigqueue(&t->signal->shared_pending);
478 spin_unlock_irqrestore(&t->sighand->siglock, flags);
480 EXPORT_SYMBOL(flush_signals);
482 #ifdef CONFIG_POSIX_TIMERS
483 static void __flush_itimer_signals(struct sigpending *pending)
485 sigset_t signal, retain;
486 struct sigqueue *q, *n;
488 signal = pending->signal;
489 sigemptyset(&retain);
491 list_for_each_entry_safe(q, n, &pending->list, list) {
492 int sig = q->info.si_signo;
494 if (likely(q->info.si_code != SI_TIMER)) {
495 sigaddset(&retain, sig);
497 sigdelset(&signal, sig);
498 list_del_init(&q->list);
503 sigorsets(&pending->signal, &signal, &retain);
506 void flush_itimer_signals(void)
508 struct task_struct *tsk = current;
511 spin_lock_irqsave(&tsk->sighand->siglock, flags);
512 __flush_itimer_signals(&tsk->pending);
513 __flush_itimer_signals(&tsk->signal->shared_pending);
514 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
518 void ignore_signals(struct task_struct *t)
522 for (i = 0; i < _NSIG; ++i)
523 t->sighand->action[i].sa.sa_handler = SIG_IGN;
529 * Flush all handlers for a task.
533 flush_signal_handlers(struct task_struct *t, int force_default)
536 struct k_sigaction *ka = &t->sighand->action[0];
537 for (i = _NSIG ; i != 0 ; i--) {
538 if (force_default || ka->sa.sa_handler != SIG_IGN)
539 ka->sa.sa_handler = SIG_DFL;
541 #ifdef __ARCH_HAS_SA_RESTORER
542 ka->sa.sa_restorer = NULL;
544 sigemptyset(&ka->sa.sa_mask);
549 bool unhandled_signal(struct task_struct *tsk, int sig)
551 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
552 if (is_global_init(tsk))
555 if (handler != SIG_IGN && handler != SIG_DFL)
558 /* if ptraced, let the tracer determine */
562 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
565 struct sigqueue *q, *first = NULL;
568 * Collect the siginfo appropriate to this signal. Check if
569 * there is another siginfo for the same signal.
571 list_for_each_entry(q, &list->list, list) {
572 if (q->info.si_signo == sig) {
579 sigdelset(&list->signal, sig);
583 list_del_init(&first->list);
584 copy_siginfo(info, &first->info);
587 (first->flags & SIGQUEUE_PREALLOC) &&
588 (info->si_code == SI_TIMER) &&
589 (info->si_sys_private);
591 __sigqueue_free(first);
594 * Ok, it wasn't in the queue. This must be
595 * a fast-pathed signal or we must have been
596 * out of queue space. So zero out the info.
599 info->si_signo = sig;
601 info->si_code = SI_USER;
607 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
608 kernel_siginfo_t *info, bool *resched_timer)
610 int sig = next_signal(pending, mask);
613 collect_signal(sig, pending, info, resched_timer);
618 * Dequeue a signal and return the element to the caller, which is
619 * expected to free it.
621 * All callers have to hold the siglock.
623 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
625 bool resched_timer = false;
628 /* We only dequeue private signals from ourselves, we don't let
629 * signalfd steal them
631 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
633 signr = __dequeue_signal(&tsk->signal->shared_pending,
634 mask, info, &resched_timer);
635 #ifdef CONFIG_POSIX_TIMERS
639 * itimers are process shared and we restart periodic
640 * itimers in the signal delivery path to prevent DoS
641 * attacks in the high resolution timer case. This is
642 * compliant with the old way of self-restarting
643 * itimers, as the SIGALRM is a legacy signal and only
644 * queued once. Changing the restart behaviour to
645 * restart the timer in the signal dequeue path is
646 * reducing the timer noise on heavy loaded !highres
649 if (unlikely(signr == SIGALRM)) {
650 struct hrtimer *tmr = &tsk->signal->real_timer;
652 if (!hrtimer_is_queued(tmr) &&
653 tsk->signal->it_real_incr != 0) {
654 hrtimer_forward(tmr, tmr->base->get_time(),
655 tsk->signal->it_real_incr);
656 hrtimer_restart(tmr);
666 if (unlikely(sig_kernel_stop(signr))) {
668 * Set a marker that we have dequeued a stop signal. Our
669 * caller might release the siglock and then the pending
670 * stop signal it is about to process is no longer in the
671 * pending bitmasks, but must still be cleared by a SIGCONT
672 * (and overruled by a SIGKILL). So those cases clear this
673 * shared flag after we've set it. Note that this flag may
674 * remain set after the signal we return is ignored or
675 * handled. That doesn't matter because its only purpose
676 * is to alert stop-signal processing code when another
677 * processor has come along and cleared the flag.
679 current->jobctl |= JOBCTL_STOP_DEQUEUED;
681 #ifdef CONFIG_POSIX_TIMERS
684 * Release the siglock to ensure proper locking order
685 * of timer locks outside of siglocks. Note, we leave
686 * irqs disabled here, since the posix-timers code is
687 * about to disable them again anyway.
689 spin_unlock(&tsk->sighand->siglock);
690 posixtimer_rearm(info);
691 spin_lock(&tsk->sighand->siglock);
693 /* Don't expose the si_sys_private value to userspace */
694 info->si_sys_private = 0;
699 EXPORT_SYMBOL_GPL(dequeue_signal);
701 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
703 struct task_struct *tsk = current;
704 struct sigpending *pending = &tsk->pending;
705 struct sigqueue *q, *sync = NULL;
708 * Might a synchronous signal be in the queue?
710 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
714 * Return the first synchronous signal in the queue.
716 list_for_each_entry(q, &pending->list, list) {
717 /* Synchronous signals have a postive si_code */
718 if ((q->info.si_code > SI_USER) &&
719 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
727 * Check if there is another siginfo for the same signal.
729 list_for_each_entry_continue(q, &pending->list, list) {
730 if (q->info.si_signo == sync->info.si_signo)
734 sigdelset(&pending->signal, sync->info.si_signo);
737 list_del_init(&sync->list);
738 copy_siginfo(info, &sync->info);
739 __sigqueue_free(sync);
740 return info->si_signo;
744 * Tell a process that it has a new active signal..
746 * NOTE! we rely on the previous spin_lock to
747 * lock interrupts for us! We can only be called with
748 * "siglock" held, and the local interrupt must
749 * have been disabled when that got acquired!
751 * No need to set need_resched since signal event passing
752 * goes through ->blocked
754 void signal_wake_up_state(struct task_struct *t, unsigned int state)
756 set_tsk_thread_flag(t, TIF_SIGPENDING);
758 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
759 * case. We don't check t->state here because there is a race with it
760 * executing another processor and just now entering stopped state.
761 * By using wake_up_state, we ensure the process will wake up and
762 * handle its death signal.
764 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
769 * Remove signals in mask from the pending set and queue.
770 * Returns 1 if any signals were found.
772 * All callers must be holding the siglock.
774 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
776 struct sigqueue *q, *n;
779 sigandsets(&m, mask, &s->signal);
780 if (sigisemptyset(&m))
783 sigandnsets(&s->signal, &s->signal, mask);
784 list_for_each_entry_safe(q, n, &s->list, list) {
785 if (sigismember(mask, q->info.si_signo)) {
786 list_del_init(&q->list);
792 static inline int is_si_special(const struct kernel_siginfo *info)
794 return info <= SEND_SIG_PRIV;
797 static inline bool si_fromuser(const struct kernel_siginfo *info)
799 return info == SEND_SIG_NOINFO ||
800 (!is_si_special(info) && SI_FROMUSER(info));
804 * called with RCU read lock from check_kill_permission()
806 static bool kill_ok_by_cred(struct task_struct *t)
808 const struct cred *cred = current_cred();
809 const struct cred *tcred = __task_cred(t);
811 return uid_eq(cred->euid, tcred->suid) ||
812 uid_eq(cred->euid, tcred->uid) ||
813 uid_eq(cred->uid, tcred->suid) ||
814 uid_eq(cred->uid, tcred->uid) ||
815 ns_capable(tcred->user_ns, CAP_KILL);
819 * Bad permissions for sending the signal
820 * - the caller must hold the RCU read lock
822 static int check_kill_permission(int sig, struct kernel_siginfo *info,
823 struct task_struct *t)
828 if (!valid_signal(sig))
831 if (!si_fromuser(info))
834 error = audit_signal_info(sig, t); /* Let audit system see the signal */
838 if (!same_thread_group(current, t) &&
839 !kill_ok_by_cred(t)) {
842 sid = task_session(t);
844 * We don't return the error if sid == NULL. The
845 * task was unhashed, the caller must notice this.
847 if (!sid || sid == task_session(current))
855 return security_task_kill(t, info, sig, NULL);
859 * ptrace_trap_notify - schedule trap to notify ptracer
860 * @t: tracee wanting to notify tracer
862 * This function schedules sticky ptrace trap which is cleared on the next
863 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
866 * If @t is running, STOP trap will be taken. If trapped for STOP and
867 * ptracer is listening for events, tracee is woken up so that it can
868 * re-trap for the new event. If trapped otherwise, STOP trap will be
869 * eventually taken without returning to userland after the existing traps
870 * are finished by PTRACE_CONT.
873 * Must be called with @task->sighand->siglock held.
875 static void ptrace_trap_notify(struct task_struct *t)
877 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
878 assert_spin_locked(&t->sighand->siglock);
880 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
881 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
885 * Handle magic process-wide effects of stop/continue signals. Unlike
886 * the signal actions, these happen immediately at signal-generation
887 * time regardless of blocking, ignoring, or handling. This does the
888 * actual continuing for SIGCONT, but not the actual stopping for stop
889 * signals. The process stop is done as a signal action for SIG_DFL.
891 * Returns true if the signal should be actually delivered, otherwise
892 * it should be dropped.
894 static bool prepare_signal(int sig, struct task_struct *p, bool force)
896 struct signal_struct *signal = p->signal;
897 struct task_struct *t;
900 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
901 if (!(signal->flags & SIGNAL_GROUP_EXIT))
902 return sig == SIGKILL;
904 * The process is in the middle of dying, nothing to do.
906 } else if (sig_kernel_stop(sig)) {
908 * This is a stop signal. Remove SIGCONT from all queues.
910 siginitset(&flush, sigmask(SIGCONT));
911 flush_sigqueue_mask(&flush, &signal->shared_pending);
912 for_each_thread(p, t)
913 flush_sigqueue_mask(&flush, &t->pending);
914 } else if (sig == SIGCONT) {
917 * Remove all stop signals from all queues, wake all threads.
919 siginitset(&flush, SIG_KERNEL_STOP_MASK);
920 flush_sigqueue_mask(&flush, &signal->shared_pending);
921 for_each_thread(p, t) {
922 flush_sigqueue_mask(&flush, &t->pending);
923 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
924 if (likely(!(t->ptrace & PT_SEIZED)))
925 wake_up_state(t, __TASK_STOPPED);
927 ptrace_trap_notify(t);
931 * Notify the parent with CLD_CONTINUED if we were stopped.
933 * If we were in the middle of a group stop, we pretend it
934 * was already finished, and then continued. Since SIGCHLD
935 * doesn't queue we report only CLD_STOPPED, as if the next
936 * CLD_CONTINUED was dropped.
939 if (signal->flags & SIGNAL_STOP_STOPPED)
940 why |= SIGNAL_CLD_CONTINUED;
941 else if (signal->group_stop_count)
942 why |= SIGNAL_CLD_STOPPED;
946 * The first thread which returns from do_signal_stop()
947 * will take ->siglock, notice SIGNAL_CLD_MASK, and
948 * notify its parent. See get_signal().
950 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
951 signal->group_stop_count = 0;
952 signal->group_exit_code = 0;
956 return !sig_ignored(p, sig, force);
960 * Test if P wants to take SIG. After we've checked all threads with this,
961 * it's equivalent to finding no threads not blocking SIG. Any threads not
962 * blocking SIG were ruled out because they are not running and already
963 * have pending signals. Such threads will dequeue from the shared queue
964 * as soon as they're available, so putting the signal on the shared queue
965 * will be equivalent to sending it to one such thread.
967 static inline bool wants_signal(int sig, struct task_struct *p)
969 if (sigismember(&p->blocked, sig))
972 if (p->flags & PF_EXITING)
978 if (task_is_stopped_or_traced(p))
981 return task_curr(p) || !signal_pending(p);
984 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
986 struct signal_struct *signal = p->signal;
987 struct task_struct *t;
990 * Now find a thread we can wake up to take the signal off the queue.
992 * If the main thread wants the signal, it gets first crack.
993 * Probably the least surprising to the average bear.
995 if (wants_signal(sig, p))
997 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
999 * There is just one thread and it does not need to be woken.
1000 * It will dequeue unblocked signals before it runs again.
1005 * Otherwise try to find a suitable thread.
1007 t = signal->curr_target;
1008 while (!wants_signal(sig, t)) {
1010 if (t == signal->curr_target)
1012 * No thread needs to be woken.
1013 * Any eligible threads will see
1014 * the signal in the queue soon.
1018 signal->curr_target = t;
1022 * Found a killable thread. If the signal will be fatal,
1023 * then start taking the whole group down immediately.
1025 if (sig_fatal(p, sig) &&
1026 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1027 !sigismember(&t->real_blocked, sig) &&
1028 (sig == SIGKILL || !p->ptrace)) {
1030 * This signal will be fatal to the whole group.
1032 if (!sig_kernel_coredump(sig)) {
1034 * Start a group exit and wake everybody up.
1035 * This way we don't have other threads
1036 * running and doing things after a slower
1037 * thread has the fatal signal pending.
1039 signal->flags = SIGNAL_GROUP_EXIT;
1040 signal->group_exit_code = sig;
1041 signal->group_stop_count = 0;
1044 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1045 sigaddset(&t->pending.signal, SIGKILL);
1046 signal_wake_up(t, 1);
1047 } while_each_thread(p, t);
1053 * The signal is already in the shared-pending queue.
1054 * Tell the chosen thread to wake up and dequeue it.
1056 signal_wake_up(t, sig == SIGKILL);
1060 static inline bool legacy_queue(struct sigpending *signals, int sig)
1062 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1065 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1066 enum pid_type type, bool force)
1068 struct sigpending *pending;
1070 int override_rlimit;
1071 int ret = 0, result;
1073 assert_spin_locked(&t->sighand->siglock);
1075 result = TRACE_SIGNAL_IGNORED;
1076 if (!prepare_signal(sig, t, force))
1079 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1081 * Short-circuit ignored signals and support queuing
1082 * exactly one non-rt signal, so that we can get more
1083 * detailed information about the cause of the signal.
1085 result = TRACE_SIGNAL_ALREADY_PENDING;
1086 if (legacy_queue(pending, sig))
1089 result = TRACE_SIGNAL_DELIVERED;
1091 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1093 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1097 * Real-time signals must be queued if sent by sigqueue, or
1098 * some other real-time mechanism. It is implementation
1099 * defined whether kill() does so. We attempt to do so, on
1100 * the principle of least surprise, but since kill is not
1101 * allowed to fail with EAGAIN when low on memory we just
1102 * make sure at least one signal gets delivered and don't
1103 * pass on the info struct.
1106 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1108 override_rlimit = 0;
1110 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1112 list_add_tail(&q->list, &pending->list);
1113 switch ((unsigned long) info) {
1114 case (unsigned long) SEND_SIG_NOINFO:
1115 clear_siginfo(&q->info);
1116 q->info.si_signo = sig;
1117 q->info.si_errno = 0;
1118 q->info.si_code = SI_USER;
1119 q->info.si_pid = task_tgid_nr_ns(current,
1120 task_active_pid_ns(t));
1123 from_kuid_munged(task_cred_xxx(t, user_ns),
1127 case (unsigned long) SEND_SIG_PRIV:
1128 clear_siginfo(&q->info);
1129 q->info.si_signo = sig;
1130 q->info.si_errno = 0;
1131 q->info.si_code = SI_KERNEL;
1136 copy_siginfo(&q->info, info);
1139 } else if (!is_si_special(info) &&
1140 sig >= SIGRTMIN && info->si_code != SI_USER) {
1142 * Queue overflow, abort. We may abort if the
1143 * signal was rt and sent by user using something
1144 * other than kill().
1146 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1151 * This is a silent loss of information. We still
1152 * send the signal, but the *info bits are lost.
1154 result = TRACE_SIGNAL_LOSE_INFO;
1158 signalfd_notify(t, sig);
1159 sigaddset(&pending->signal, sig);
1161 /* Let multiprocess signals appear after on-going forks */
1162 if (type > PIDTYPE_TGID) {
1163 struct multiprocess_signals *delayed;
1164 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1165 sigset_t *signal = &delayed->signal;
1166 /* Can't queue both a stop and a continue signal */
1168 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1169 else if (sig_kernel_stop(sig))
1170 sigdelset(signal, SIGCONT);
1171 sigaddset(signal, sig);
1175 complete_signal(sig, t, type);
1177 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1181 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1184 switch (siginfo_layout(info->si_signo, info->si_code)) {
1193 case SIL_FAULT_MCEERR:
1194 case SIL_FAULT_BNDERR:
1195 case SIL_FAULT_PKUERR:
1203 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1206 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1209 if (info == SEND_SIG_NOINFO) {
1210 /* Force if sent from an ancestor pid namespace */
1211 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1212 } else if (info == SEND_SIG_PRIV) {
1213 /* Don't ignore kernel generated signals */
1215 } else if (has_si_pid_and_uid(info)) {
1216 /* SIGKILL and SIGSTOP is special or has ids */
1217 struct user_namespace *t_user_ns;
1220 t_user_ns = task_cred_xxx(t, user_ns);
1221 if (current_user_ns() != t_user_ns) {
1222 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1223 info->si_uid = from_kuid_munged(t_user_ns, uid);
1227 /* A kernel generated signal? */
1228 force = (info->si_code == SI_KERNEL);
1230 /* From an ancestor pid namespace? */
1231 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1236 return __send_signal(sig, info, t, type, force);
1239 static void print_fatal_signal(int signr)
1241 struct pt_regs *regs = signal_pt_regs();
1242 pr_info("potentially unexpected fatal signal %d.\n", signr);
1244 #if defined(__i386__) && !defined(__arch_um__)
1245 pr_info("code at %08lx: ", regs->ip);
1248 for (i = 0; i < 16; i++) {
1251 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1253 pr_cont("%02x ", insn);
1263 static int __init setup_print_fatal_signals(char *str)
1265 get_option (&str, &print_fatal_signals);
1270 __setup("print-fatal-signals=", setup_print_fatal_signals);
1273 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1275 return send_signal(sig, info, p, PIDTYPE_TGID);
1278 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1281 unsigned long flags;
1284 if (lock_task_sighand(p, &flags)) {
1285 ret = send_signal(sig, info, p, type);
1286 unlock_task_sighand(p, &flags);
1293 * Force a signal that the process can't ignore: if necessary
1294 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1296 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1297 * since we do not want to have a signal handler that was blocked
1298 * be invoked when user space had explicitly blocked it.
1300 * We don't want to have recursive SIGSEGV's etc, for example,
1301 * that is why we also clear SIGNAL_UNKILLABLE.
1304 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1306 unsigned long int flags;
1307 int ret, blocked, ignored;
1308 struct k_sigaction *action;
1309 int sig = info->si_signo;
1311 spin_lock_irqsave(&t->sighand->siglock, flags);
1312 action = &t->sighand->action[sig-1];
1313 ignored = action->sa.sa_handler == SIG_IGN;
1314 blocked = sigismember(&t->blocked, sig);
1315 if (blocked || ignored) {
1316 action->sa.sa_handler = SIG_DFL;
1318 sigdelset(&t->blocked, sig);
1319 recalc_sigpending_and_wake(t);
1323 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1324 * debugging to leave init killable.
1326 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1327 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1328 ret = send_signal(sig, info, t, PIDTYPE_PID);
1329 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1334 int force_sig_info(struct kernel_siginfo *info)
1336 return force_sig_info_to_task(info, current);
1340 * Nuke all other threads in the group.
1342 int zap_other_threads(struct task_struct *p)
1344 struct task_struct *t = p;
1347 p->signal->group_stop_count = 0;
1349 while_each_thread(p, t) {
1350 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1353 /* Don't bother with already dead threads */
1356 sigaddset(&t->pending.signal, SIGKILL);
1357 signal_wake_up(t, 1);
1363 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1364 unsigned long *flags)
1366 struct sighand_struct *sighand;
1370 sighand = rcu_dereference(tsk->sighand);
1371 if (unlikely(sighand == NULL))
1375 * This sighand can be already freed and even reused, but
1376 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1377 * initializes ->siglock: this slab can't go away, it has
1378 * the same object type, ->siglock can't be reinitialized.
1380 * We need to ensure that tsk->sighand is still the same
1381 * after we take the lock, we can race with de_thread() or
1382 * __exit_signal(). In the latter case the next iteration
1383 * must see ->sighand == NULL.
1385 spin_lock_irqsave(&sighand->siglock, *flags);
1386 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1388 spin_unlock_irqrestore(&sighand->siglock, *flags);
1396 * send signal info to all the members of a group
1398 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1399 struct task_struct *p, enum pid_type type)
1404 ret = check_kill_permission(sig, info, p);
1408 ret = do_send_sig_info(sig, info, p, type);
1414 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1415 * control characters do (^C, ^Z etc)
1416 * - the caller must hold at least a readlock on tasklist_lock
1418 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1420 struct task_struct *p = NULL;
1421 int retval, success;
1425 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1426 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1429 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1430 return success ? 0 : retval;
1433 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1436 struct task_struct *p;
1440 p = pid_task(pid, PIDTYPE_PID);
1442 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1444 if (likely(!p || error != -ESRCH))
1448 * The task was unhashed in between, try again. If it
1449 * is dead, pid_task() will return NULL, if we race with
1450 * de_thread() it will find the new leader.
1455 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1459 error = kill_pid_info(sig, info, find_vpid(pid));
1464 static inline bool kill_as_cred_perm(const struct cred *cred,
1465 struct task_struct *target)
1467 const struct cred *pcred = __task_cred(target);
1469 return uid_eq(cred->euid, pcred->suid) ||
1470 uid_eq(cred->euid, pcred->uid) ||
1471 uid_eq(cred->uid, pcred->suid) ||
1472 uid_eq(cred->uid, pcred->uid);
1476 * The usb asyncio usage of siginfo is wrong. The glibc support
1477 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1478 * AKA after the generic fields:
1479 * kernel_pid_t si_pid;
1480 * kernel_uid32_t si_uid;
1481 * sigval_t si_value;
1483 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1484 * after the generic fields is:
1485 * void __user *si_addr;
1487 * This is a practical problem when there is a 64bit big endian kernel
1488 * and a 32bit userspace. As the 32bit address will encoded in the low
1489 * 32bits of the pointer. Those low 32bits will be stored at higher
1490 * address than appear in a 32 bit pointer. So userspace will not
1491 * see the address it was expecting for it's completions.
1493 * There is nothing in the encoding that can allow
1494 * copy_siginfo_to_user32 to detect this confusion of formats, so
1495 * handle this by requiring the caller of kill_pid_usb_asyncio to
1496 * notice when this situration takes place and to store the 32bit
1497 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1500 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1501 struct pid *pid, const struct cred *cred)
1503 struct kernel_siginfo info;
1504 struct task_struct *p;
1505 unsigned long flags;
1508 clear_siginfo(&info);
1509 info.si_signo = sig;
1510 info.si_errno = errno;
1511 info.si_code = SI_ASYNCIO;
1512 *((sigval_t *)&info.si_pid) = addr;
1514 if (!valid_signal(sig))
1518 p = pid_task(pid, PIDTYPE_PID);
1523 if (!kill_as_cred_perm(cred, p)) {
1527 ret = security_task_kill(p, &info, sig, cred);
1532 if (lock_task_sighand(p, &flags)) {
1533 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1534 unlock_task_sighand(p, &flags);
1542 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1545 * kill_something_info() interprets pid in interesting ways just like kill(2).
1547 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1548 * is probably wrong. Should make it like BSD or SYSV.
1551 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1557 ret = kill_pid_info(sig, info, find_vpid(pid));
1562 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1566 read_lock(&tasklist_lock);
1568 ret = __kill_pgrp_info(sig, info,
1569 pid ? find_vpid(-pid) : task_pgrp(current));
1571 int retval = 0, count = 0;
1572 struct task_struct * p;
1574 for_each_process(p) {
1575 if (task_pid_vnr(p) > 1 &&
1576 !same_thread_group(p, current)) {
1577 int err = group_send_sig_info(sig, info, p,
1584 ret = count ? retval : -ESRCH;
1586 read_unlock(&tasklist_lock);
1592 * These are for backward compatibility with the rest of the kernel source.
1595 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1598 * Make sure legacy kernel users don't send in bad values
1599 * (normal paths check this in check_kill_permission).
1601 if (!valid_signal(sig))
1604 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1606 EXPORT_SYMBOL(send_sig_info);
1608 #define __si_special(priv) \
1609 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1612 send_sig(int sig, struct task_struct *p, int priv)
1614 return send_sig_info(sig, __si_special(priv), p);
1616 EXPORT_SYMBOL(send_sig);
1618 void force_sig(int sig)
1620 struct kernel_siginfo info;
1622 clear_siginfo(&info);
1623 info.si_signo = sig;
1625 info.si_code = SI_KERNEL;
1628 force_sig_info(&info);
1630 EXPORT_SYMBOL(force_sig);
1633 * When things go south during signal handling, we
1634 * will force a SIGSEGV. And if the signal that caused
1635 * the problem was already a SIGSEGV, we'll want to
1636 * make sure we don't even try to deliver the signal..
1638 void force_sigsegv(int sig)
1640 struct task_struct *p = current;
1642 if (sig == SIGSEGV) {
1643 unsigned long flags;
1644 spin_lock_irqsave(&p->sighand->siglock, flags);
1645 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1646 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1651 int force_sig_fault_to_task(int sig, int code, void __user *addr
1652 ___ARCH_SI_TRAPNO(int trapno)
1653 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1654 , struct task_struct *t)
1656 struct kernel_siginfo info;
1658 clear_siginfo(&info);
1659 info.si_signo = sig;
1661 info.si_code = code;
1662 info.si_addr = addr;
1663 #ifdef __ARCH_SI_TRAPNO
1664 info.si_trapno = trapno;
1668 info.si_flags = flags;
1671 return force_sig_info_to_task(&info, t);
1674 int force_sig_fault(int sig, int code, void __user *addr
1675 ___ARCH_SI_TRAPNO(int trapno)
1676 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1678 return force_sig_fault_to_task(sig, code, addr
1679 ___ARCH_SI_TRAPNO(trapno)
1680 ___ARCH_SI_IA64(imm, flags, isr), current);
1683 int send_sig_fault(int sig, int code, void __user *addr
1684 ___ARCH_SI_TRAPNO(int trapno)
1685 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1686 , struct task_struct *t)
1688 struct kernel_siginfo info;
1690 clear_siginfo(&info);
1691 info.si_signo = sig;
1693 info.si_code = code;
1694 info.si_addr = addr;
1695 #ifdef __ARCH_SI_TRAPNO
1696 info.si_trapno = trapno;
1700 info.si_flags = flags;
1703 return send_sig_info(info.si_signo, &info, t);
1706 int force_sig_mceerr(int code, void __user *addr, short lsb)
1708 struct kernel_siginfo info;
1710 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1711 clear_siginfo(&info);
1712 info.si_signo = SIGBUS;
1714 info.si_code = code;
1715 info.si_addr = addr;
1716 info.si_addr_lsb = lsb;
1717 return force_sig_info(&info);
1720 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1722 struct kernel_siginfo info;
1724 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1725 clear_siginfo(&info);
1726 info.si_signo = SIGBUS;
1728 info.si_code = code;
1729 info.si_addr = addr;
1730 info.si_addr_lsb = lsb;
1731 return send_sig_info(info.si_signo, &info, t);
1733 EXPORT_SYMBOL(send_sig_mceerr);
1735 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1737 struct kernel_siginfo info;
1739 clear_siginfo(&info);
1740 info.si_signo = SIGSEGV;
1742 info.si_code = SEGV_BNDERR;
1743 info.si_addr = addr;
1744 info.si_lower = lower;
1745 info.si_upper = upper;
1746 return force_sig_info(&info);
1750 int force_sig_pkuerr(void __user *addr, u32 pkey)
1752 struct kernel_siginfo info;
1754 clear_siginfo(&info);
1755 info.si_signo = SIGSEGV;
1757 info.si_code = SEGV_PKUERR;
1758 info.si_addr = addr;
1759 info.si_pkey = pkey;
1760 return force_sig_info(&info);
1764 /* For the crazy architectures that include trap information in
1765 * the errno field, instead of an actual errno value.
1767 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1769 struct kernel_siginfo info;
1771 clear_siginfo(&info);
1772 info.si_signo = SIGTRAP;
1773 info.si_errno = errno;
1774 info.si_code = TRAP_HWBKPT;
1775 info.si_addr = addr;
1776 return force_sig_info(&info);
1779 int kill_pgrp(struct pid *pid, int sig, int priv)
1783 read_lock(&tasklist_lock);
1784 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1785 read_unlock(&tasklist_lock);
1789 EXPORT_SYMBOL(kill_pgrp);
1791 int kill_pid(struct pid *pid, int sig, int priv)
1793 return kill_pid_info(sig, __si_special(priv), pid);
1795 EXPORT_SYMBOL(kill_pid);
1798 * These functions support sending signals using preallocated sigqueue
1799 * structures. This is needed "because realtime applications cannot
1800 * afford to lose notifications of asynchronous events, like timer
1801 * expirations or I/O completions". In the case of POSIX Timers
1802 * we allocate the sigqueue structure from the timer_create. If this
1803 * allocation fails we are able to report the failure to the application
1804 * with an EAGAIN error.
1806 struct sigqueue *sigqueue_alloc(void)
1808 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1811 q->flags |= SIGQUEUE_PREALLOC;
1816 void sigqueue_free(struct sigqueue *q)
1818 unsigned long flags;
1819 spinlock_t *lock = ¤t->sighand->siglock;
1821 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1823 * We must hold ->siglock while testing q->list
1824 * to serialize with collect_signal() or with
1825 * __exit_signal()->flush_sigqueue().
1827 spin_lock_irqsave(lock, flags);
1828 q->flags &= ~SIGQUEUE_PREALLOC;
1830 * If it is queued it will be freed when dequeued,
1831 * like the "regular" sigqueue.
1833 if (!list_empty(&q->list))
1835 spin_unlock_irqrestore(lock, flags);
1841 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1843 int sig = q->info.si_signo;
1844 struct sigpending *pending;
1845 struct task_struct *t;
1846 unsigned long flags;
1849 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1853 t = pid_task(pid, type);
1854 if (!t || !likely(lock_task_sighand(t, &flags)))
1857 ret = 1; /* the signal is ignored */
1858 result = TRACE_SIGNAL_IGNORED;
1859 if (!prepare_signal(sig, t, false))
1863 if (unlikely(!list_empty(&q->list))) {
1865 * If an SI_TIMER entry is already queue just increment
1866 * the overrun count.
1868 BUG_ON(q->info.si_code != SI_TIMER);
1869 q->info.si_overrun++;
1870 result = TRACE_SIGNAL_ALREADY_PENDING;
1873 q->info.si_overrun = 0;
1875 signalfd_notify(t, sig);
1876 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1877 list_add_tail(&q->list, &pending->list);
1878 sigaddset(&pending->signal, sig);
1879 complete_signal(sig, t, type);
1880 result = TRACE_SIGNAL_DELIVERED;
1882 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1883 unlock_task_sighand(t, &flags);
1889 static void do_notify_pidfd(struct task_struct *task)
1893 WARN_ON(task->exit_state == 0);
1894 pid = task_pid(task);
1895 wake_up_all(&pid->wait_pidfd);
1899 * Let a parent know about the death of a child.
1900 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1902 * Returns true if our parent ignored us and so we've switched to
1905 bool do_notify_parent(struct task_struct *tsk, int sig)
1907 struct kernel_siginfo info;
1908 unsigned long flags;
1909 struct sighand_struct *psig;
1910 bool autoreap = false;
1915 /* do_notify_parent_cldstop should have been called instead. */
1916 BUG_ON(task_is_stopped_or_traced(tsk));
1918 BUG_ON(!tsk->ptrace &&
1919 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1921 /* Wake up all pidfd waiters */
1922 do_notify_pidfd(tsk);
1924 if (sig != SIGCHLD) {
1926 * This is only possible if parent == real_parent.
1927 * Check if it has changed security domain.
1929 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1933 clear_siginfo(&info);
1934 info.si_signo = sig;
1937 * We are under tasklist_lock here so our parent is tied to
1938 * us and cannot change.
1940 * task_active_pid_ns will always return the same pid namespace
1941 * until a task passes through release_task.
1943 * write_lock() currently calls preempt_disable() which is the
1944 * same as rcu_read_lock(), but according to Oleg, this is not
1945 * correct to rely on this
1948 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1949 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1953 task_cputime(tsk, &utime, &stime);
1954 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1955 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1957 info.si_status = tsk->exit_code & 0x7f;
1958 if (tsk->exit_code & 0x80)
1959 info.si_code = CLD_DUMPED;
1960 else if (tsk->exit_code & 0x7f)
1961 info.si_code = CLD_KILLED;
1963 info.si_code = CLD_EXITED;
1964 info.si_status = tsk->exit_code >> 8;
1967 psig = tsk->parent->sighand;
1968 spin_lock_irqsave(&psig->siglock, flags);
1969 if (!tsk->ptrace && sig == SIGCHLD &&
1970 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1971 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1973 * We are exiting and our parent doesn't care. POSIX.1
1974 * defines special semantics for setting SIGCHLD to SIG_IGN
1975 * or setting the SA_NOCLDWAIT flag: we should be reaped
1976 * automatically and not left for our parent's wait4 call.
1977 * Rather than having the parent do it as a magic kind of
1978 * signal handler, we just set this to tell do_exit that we
1979 * can be cleaned up without becoming a zombie. Note that
1980 * we still call __wake_up_parent in this case, because a
1981 * blocked sys_wait4 might now return -ECHILD.
1983 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1984 * is implementation-defined: we do (if you don't want
1985 * it, just use SIG_IGN instead).
1988 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1991 if (valid_signal(sig) && sig)
1992 __group_send_sig_info(sig, &info, tsk->parent);
1993 __wake_up_parent(tsk, tsk->parent);
1994 spin_unlock_irqrestore(&psig->siglock, flags);
2000 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2001 * @tsk: task reporting the state change
2002 * @for_ptracer: the notification is for ptracer
2003 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2005 * Notify @tsk's parent that the stopped/continued state has changed. If
2006 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2007 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2010 * Must be called with tasklist_lock at least read locked.
2012 static void do_notify_parent_cldstop(struct task_struct *tsk,
2013 bool for_ptracer, int why)
2015 struct kernel_siginfo info;
2016 unsigned long flags;
2017 struct task_struct *parent;
2018 struct sighand_struct *sighand;
2022 parent = tsk->parent;
2024 tsk = tsk->group_leader;
2025 parent = tsk->real_parent;
2028 clear_siginfo(&info);
2029 info.si_signo = SIGCHLD;
2032 * see comment in do_notify_parent() about the following 4 lines
2035 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2036 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2039 task_cputime(tsk, &utime, &stime);
2040 info.si_utime = nsec_to_clock_t(utime);
2041 info.si_stime = nsec_to_clock_t(stime);
2046 info.si_status = SIGCONT;
2049 info.si_status = tsk->signal->group_exit_code & 0x7f;
2052 info.si_status = tsk->exit_code & 0x7f;
2058 sighand = parent->sighand;
2059 spin_lock_irqsave(&sighand->siglock, flags);
2060 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2061 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2062 __group_send_sig_info(SIGCHLD, &info, parent);
2064 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2066 __wake_up_parent(tsk, parent);
2067 spin_unlock_irqrestore(&sighand->siglock, flags);
2070 static inline bool may_ptrace_stop(void)
2072 if (!likely(current->ptrace))
2075 * Are we in the middle of do_coredump?
2076 * If so and our tracer is also part of the coredump stopping
2077 * is a deadlock situation, and pointless because our tracer
2078 * is dead so don't allow us to stop.
2079 * If SIGKILL was already sent before the caller unlocked
2080 * ->siglock we must see ->core_state != NULL. Otherwise it
2081 * is safe to enter schedule().
2083 * This is almost outdated, a task with the pending SIGKILL can't
2084 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2085 * after SIGKILL was already dequeued.
2087 if (unlikely(current->mm->core_state) &&
2088 unlikely(current->mm == current->parent->mm))
2095 * Return non-zero if there is a SIGKILL that should be waking us up.
2096 * Called with the siglock held.
2098 static bool sigkill_pending(struct task_struct *tsk)
2100 return sigismember(&tsk->pending.signal, SIGKILL) ||
2101 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2105 * This must be called with current->sighand->siglock held.
2107 * This should be the path for all ptrace stops.
2108 * We always set current->last_siginfo while stopped here.
2109 * That makes it a way to test a stopped process for
2110 * being ptrace-stopped vs being job-control-stopped.
2112 * If we actually decide not to stop at all because the tracer
2113 * is gone, we keep current->exit_code unless clear_code.
2115 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2116 __releases(¤t->sighand->siglock)
2117 __acquires(¤t->sighand->siglock)
2119 bool gstop_done = false;
2121 if (arch_ptrace_stop_needed(exit_code, info)) {
2123 * The arch code has something special to do before a
2124 * ptrace stop. This is allowed to block, e.g. for faults
2125 * on user stack pages. We can't keep the siglock while
2126 * calling arch_ptrace_stop, so we must release it now.
2127 * To preserve proper semantics, we must do this before
2128 * any signal bookkeeping like checking group_stop_count.
2129 * Meanwhile, a SIGKILL could come in before we retake the
2130 * siglock. That must prevent us from sleeping in TASK_TRACED.
2131 * So after regaining the lock, we must check for SIGKILL.
2133 spin_unlock_irq(¤t->sighand->siglock);
2134 arch_ptrace_stop(exit_code, info);
2135 spin_lock_irq(¤t->sighand->siglock);
2136 if (sigkill_pending(current))
2140 set_special_state(TASK_TRACED);
2143 * We're committing to trapping. TRACED should be visible before
2144 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2145 * Also, transition to TRACED and updates to ->jobctl should be
2146 * atomic with respect to siglock and should be done after the arch
2147 * hook as siglock is released and regrabbed across it.
2152 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2154 * set_current_state() smp_wmb();
2156 * wait_task_stopped()
2157 * task_stopped_code()
2158 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2162 current->last_siginfo = info;
2163 current->exit_code = exit_code;
2166 * If @why is CLD_STOPPED, we're trapping to participate in a group
2167 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2168 * across siglock relocks since INTERRUPT was scheduled, PENDING
2169 * could be clear now. We act as if SIGCONT is received after
2170 * TASK_TRACED is entered - ignore it.
2172 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2173 gstop_done = task_participate_group_stop(current);
2175 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2176 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2177 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2178 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2180 /* entering a trap, clear TRAPPING */
2181 task_clear_jobctl_trapping(current);
2183 spin_unlock_irq(¤t->sighand->siglock);
2184 read_lock(&tasklist_lock);
2185 if (may_ptrace_stop()) {
2187 * Notify parents of the stop.
2189 * While ptraced, there are two parents - the ptracer and
2190 * the real_parent of the group_leader. The ptracer should
2191 * know about every stop while the real parent is only
2192 * interested in the completion of group stop. The states
2193 * for the two don't interact with each other. Notify
2194 * separately unless they're gonna be duplicates.
2196 do_notify_parent_cldstop(current, true, why);
2197 if (gstop_done && ptrace_reparented(current))
2198 do_notify_parent_cldstop(current, false, why);
2201 * Don't want to allow preemption here, because
2202 * sys_ptrace() needs this task to be inactive.
2204 * XXX: implement read_unlock_no_resched().
2207 read_unlock(&tasklist_lock);
2208 cgroup_enter_frozen();
2209 preempt_enable_no_resched();
2210 freezable_schedule();
2211 cgroup_leave_frozen(true);
2214 * By the time we got the lock, our tracer went away.
2215 * Don't drop the lock yet, another tracer may come.
2217 * If @gstop_done, the ptracer went away between group stop
2218 * completion and here. During detach, it would have set
2219 * JOBCTL_STOP_PENDING on us and we'll re-enter
2220 * TASK_STOPPED in do_signal_stop() on return, so notifying
2221 * the real parent of the group stop completion is enough.
2224 do_notify_parent_cldstop(current, false, why);
2226 /* tasklist protects us from ptrace_freeze_traced() */
2227 __set_current_state(TASK_RUNNING);
2229 current->exit_code = 0;
2230 read_unlock(&tasklist_lock);
2234 * We are back. Now reacquire the siglock before touching
2235 * last_siginfo, so that we are sure to have synchronized with
2236 * any signal-sending on another CPU that wants to examine it.
2238 spin_lock_irq(¤t->sighand->siglock);
2239 current->last_siginfo = NULL;
2241 /* LISTENING can be set only during STOP traps, clear it */
2242 current->jobctl &= ~JOBCTL_LISTENING;
2245 * Queued signals ignored us while we were stopped for tracing.
2246 * So check for any that we should take before resuming user mode.
2247 * This sets TIF_SIGPENDING, but never clears it.
2249 recalc_sigpending_tsk(current);
2252 static void ptrace_do_notify(int signr, int exit_code, int why)
2254 kernel_siginfo_t info;
2256 clear_siginfo(&info);
2257 info.si_signo = signr;
2258 info.si_code = exit_code;
2259 info.si_pid = task_pid_vnr(current);
2260 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2262 /* Let the debugger run. */
2263 ptrace_stop(exit_code, why, 1, &info);
2266 void ptrace_notify(int exit_code)
2268 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2269 if (unlikely(current->task_works))
2272 spin_lock_irq(¤t->sighand->siglock);
2273 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2274 spin_unlock_irq(¤t->sighand->siglock);
2278 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2279 * @signr: signr causing group stop if initiating
2281 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2282 * and participate in it. If already set, participate in the existing
2283 * group stop. If participated in a group stop (and thus slept), %true is
2284 * returned with siglock released.
2286 * If ptraced, this function doesn't handle stop itself. Instead,
2287 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2288 * untouched. The caller must ensure that INTERRUPT trap handling takes
2289 * places afterwards.
2292 * Must be called with @current->sighand->siglock held, which is released
2296 * %false if group stop is already cancelled or ptrace trap is scheduled.
2297 * %true if participated in group stop.
2299 static bool do_signal_stop(int signr)
2300 __releases(¤t->sighand->siglock)
2302 struct signal_struct *sig = current->signal;
2304 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2305 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2306 struct task_struct *t;
2308 /* signr will be recorded in task->jobctl for retries */
2309 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2311 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2312 unlikely(signal_group_exit(sig)))
2315 * There is no group stop already in progress. We must
2318 * While ptraced, a task may be resumed while group stop is
2319 * still in effect and then receive a stop signal and
2320 * initiate another group stop. This deviates from the
2321 * usual behavior as two consecutive stop signals can't
2322 * cause two group stops when !ptraced. That is why we
2323 * also check !task_is_stopped(t) below.
2325 * The condition can be distinguished by testing whether
2326 * SIGNAL_STOP_STOPPED is already set. Don't generate
2327 * group_exit_code in such case.
2329 * This is not necessary for SIGNAL_STOP_CONTINUED because
2330 * an intervening stop signal is required to cause two
2331 * continued events regardless of ptrace.
2333 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2334 sig->group_exit_code = signr;
2336 sig->group_stop_count = 0;
2338 if (task_set_jobctl_pending(current, signr | gstop))
2339 sig->group_stop_count++;
2342 while_each_thread(current, t) {
2344 * Setting state to TASK_STOPPED for a group
2345 * stop is always done with the siglock held,
2346 * so this check has no races.
2348 if (!task_is_stopped(t) &&
2349 task_set_jobctl_pending(t, signr | gstop)) {
2350 sig->group_stop_count++;
2351 if (likely(!(t->ptrace & PT_SEIZED)))
2352 signal_wake_up(t, 0);
2354 ptrace_trap_notify(t);
2359 if (likely(!current->ptrace)) {
2363 * If there are no other threads in the group, or if there
2364 * is a group stop in progress and we are the last to stop,
2365 * report to the parent.
2367 if (task_participate_group_stop(current))
2368 notify = CLD_STOPPED;
2370 set_special_state(TASK_STOPPED);
2371 spin_unlock_irq(¤t->sighand->siglock);
2374 * Notify the parent of the group stop completion. Because
2375 * we're not holding either the siglock or tasklist_lock
2376 * here, ptracer may attach inbetween; however, this is for
2377 * group stop and should always be delivered to the real
2378 * parent of the group leader. The new ptracer will get
2379 * its notification when this task transitions into
2383 read_lock(&tasklist_lock);
2384 do_notify_parent_cldstop(current, false, notify);
2385 read_unlock(&tasklist_lock);
2388 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2389 cgroup_enter_frozen();
2390 freezable_schedule();
2394 * While ptraced, group stop is handled by STOP trap.
2395 * Schedule it and let the caller deal with it.
2397 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2403 * do_jobctl_trap - take care of ptrace jobctl traps
2405 * When PT_SEIZED, it's used for both group stop and explicit
2406 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2407 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2408 * the stop signal; otherwise, %SIGTRAP.
2410 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2411 * number as exit_code and no siginfo.
2414 * Must be called with @current->sighand->siglock held, which may be
2415 * released and re-acquired before returning with intervening sleep.
2417 static void do_jobctl_trap(void)
2419 struct signal_struct *signal = current->signal;
2420 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2422 if (current->ptrace & PT_SEIZED) {
2423 if (!signal->group_stop_count &&
2424 !(signal->flags & SIGNAL_STOP_STOPPED))
2426 WARN_ON_ONCE(!signr);
2427 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2430 WARN_ON_ONCE(!signr);
2431 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2432 current->exit_code = 0;
2437 * do_freezer_trap - handle the freezer jobctl trap
2439 * Puts the task into frozen state, if only the task is not about to quit.
2440 * In this case it drops JOBCTL_TRAP_FREEZE.
2443 * Must be called with @current->sighand->siglock held,
2444 * which is always released before returning.
2446 static void do_freezer_trap(void)
2447 __releases(¤t->sighand->siglock)
2450 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2451 * let's make another loop to give it a chance to be handled.
2452 * In any case, we'll return back.
2454 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2455 JOBCTL_TRAP_FREEZE) {
2456 spin_unlock_irq(¤t->sighand->siglock);
2461 * Now we're sure that there is no pending fatal signal and no
2462 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2463 * immediately (if there is a non-fatal signal pending), and
2464 * put the task into sleep.
2466 __set_current_state(TASK_INTERRUPTIBLE);
2467 clear_thread_flag(TIF_SIGPENDING);
2468 spin_unlock_irq(¤t->sighand->siglock);
2469 cgroup_enter_frozen();
2470 freezable_schedule();
2473 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2476 * We do not check sig_kernel_stop(signr) but set this marker
2477 * unconditionally because we do not know whether debugger will
2478 * change signr. This flag has no meaning unless we are going
2479 * to stop after return from ptrace_stop(). In this case it will
2480 * be checked in do_signal_stop(), we should only stop if it was
2481 * not cleared by SIGCONT while we were sleeping. See also the
2482 * comment in dequeue_signal().
2484 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2485 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2487 /* We're back. Did the debugger cancel the sig? */
2488 signr = current->exit_code;
2492 current->exit_code = 0;
2495 * Update the siginfo structure if the signal has
2496 * changed. If the debugger wanted something
2497 * specific in the siginfo structure then it should
2498 * have updated *info via PTRACE_SETSIGINFO.
2500 if (signr != info->si_signo) {
2501 clear_siginfo(info);
2502 info->si_signo = signr;
2504 info->si_code = SI_USER;
2506 info->si_pid = task_pid_vnr(current->parent);
2507 info->si_uid = from_kuid_munged(current_user_ns(),
2508 task_uid(current->parent));
2512 /* If the (new) signal is now blocked, requeue it. */
2513 if (sigismember(¤t->blocked, signr)) {
2514 send_signal(signr, info, current, PIDTYPE_PID);
2521 bool get_signal(struct ksignal *ksig)
2523 struct sighand_struct *sighand = current->sighand;
2524 struct signal_struct *signal = current->signal;
2527 if (unlikely(current->task_works))
2530 if (unlikely(uprobe_deny_signal()))
2534 * Do this once, we can't return to user-mode if freezing() == T.
2535 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2536 * thus do not need another check after return.
2541 spin_lock_irq(&sighand->siglock);
2543 * Every stopped thread goes here after wakeup. Check to see if
2544 * we should notify the parent, prepare_signal(SIGCONT) encodes
2545 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2547 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2550 if (signal->flags & SIGNAL_CLD_CONTINUED)
2551 why = CLD_CONTINUED;
2555 signal->flags &= ~SIGNAL_CLD_MASK;
2557 spin_unlock_irq(&sighand->siglock);
2560 * Notify the parent that we're continuing. This event is
2561 * always per-process and doesn't make whole lot of sense
2562 * for ptracers, who shouldn't consume the state via
2563 * wait(2) either, but, for backward compatibility, notify
2564 * the ptracer of the group leader too unless it's gonna be
2567 read_lock(&tasklist_lock);
2568 do_notify_parent_cldstop(current, false, why);
2570 if (ptrace_reparented(current->group_leader))
2571 do_notify_parent_cldstop(current->group_leader,
2573 read_unlock(&tasklist_lock);
2578 /* Has this task already been marked for death? */
2579 if (signal_group_exit(signal)) {
2580 ksig->info.si_signo = signr = SIGKILL;
2581 sigdelset(¤t->pending.signal, SIGKILL);
2582 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2583 &sighand->action[SIGKILL - 1]);
2584 recalc_sigpending();
2589 struct k_sigaction *ka;
2591 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2595 if (unlikely(current->jobctl &
2596 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2597 if (current->jobctl & JOBCTL_TRAP_MASK) {
2599 spin_unlock_irq(&sighand->siglock);
2600 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2607 * If the task is leaving the frozen state, let's update
2608 * cgroup counters and reset the frozen bit.
2610 if (unlikely(cgroup_task_frozen(current))) {
2611 spin_unlock_irq(&sighand->siglock);
2612 cgroup_leave_frozen(false);
2617 * Signals generated by the execution of an instruction
2618 * need to be delivered before any other pending signals
2619 * so that the instruction pointer in the signal stack
2620 * frame points to the faulting instruction.
2622 signr = dequeue_synchronous_signal(&ksig->info);
2624 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2627 break; /* will return 0 */
2629 if (unlikely(current->ptrace) && signr != SIGKILL) {
2630 signr = ptrace_signal(signr, &ksig->info);
2635 ka = &sighand->action[signr-1];
2637 /* Trace actually delivered signals. */
2638 trace_signal_deliver(signr, &ksig->info, ka);
2640 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2642 if (ka->sa.sa_handler != SIG_DFL) {
2643 /* Run the handler. */
2646 if (ka->sa.sa_flags & SA_ONESHOT)
2647 ka->sa.sa_handler = SIG_DFL;
2649 break; /* will return non-zero "signr" value */
2653 * Now we are doing the default action for this signal.
2655 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2659 * Global init gets no signals it doesn't want.
2660 * Container-init gets no signals it doesn't want from same
2663 * Note that if global/container-init sees a sig_kernel_only()
2664 * signal here, the signal must have been generated internally
2665 * or must have come from an ancestor namespace. In either
2666 * case, the signal cannot be dropped.
2668 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2669 !sig_kernel_only(signr))
2672 if (sig_kernel_stop(signr)) {
2674 * The default action is to stop all threads in
2675 * the thread group. The job control signals
2676 * do nothing in an orphaned pgrp, but SIGSTOP
2677 * always works. Note that siglock needs to be
2678 * dropped during the call to is_orphaned_pgrp()
2679 * because of lock ordering with tasklist_lock.
2680 * This allows an intervening SIGCONT to be posted.
2681 * We need to check for that and bail out if necessary.
2683 if (signr != SIGSTOP) {
2684 spin_unlock_irq(&sighand->siglock);
2686 /* signals can be posted during this window */
2688 if (is_current_pgrp_orphaned())
2691 spin_lock_irq(&sighand->siglock);
2694 if (likely(do_signal_stop(ksig->info.si_signo))) {
2695 /* It released the siglock. */
2700 * We didn't actually stop, due to a race
2701 * with SIGCONT or something like that.
2707 spin_unlock_irq(&sighand->siglock);
2708 if (unlikely(cgroup_task_frozen(current)))
2709 cgroup_leave_frozen(true);
2712 * Anything else is fatal, maybe with a core dump.
2714 current->flags |= PF_SIGNALED;
2716 if (sig_kernel_coredump(signr)) {
2717 if (print_fatal_signals)
2718 print_fatal_signal(ksig->info.si_signo);
2719 proc_coredump_connector(current);
2721 * If it was able to dump core, this kills all
2722 * other threads in the group and synchronizes with
2723 * their demise. If we lost the race with another
2724 * thread getting here, it set group_exit_code
2725 * first and our do_group_exit call below will use
2726 * that value and ignore the one we pass it.
2728 do_coredump(&ksig->info);
2732 * Death signals, no core dump.
2734 do_group_exit(ksig->info.si_signo);
2737 spin_unlock_irq(&sighand->siglock);
2740 return ksig->sig > 0;
2744 * signal_delivered -
2745 * @ksig: kernel signal struct
2746 * @stepping: nonzero if debugger single-step or block-step in use
2748 * This function should be called when a signal has successfully been
2749 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2750 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2751 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2753 static void signal_delivered(struct ksignal *ksig, int stepping)
2757 /* A signal was successfully delivered, and the
2758 saved sigmask was stored on the signal frame,
2759 and will be restored by sigreturn. So we can
2760 simply clear the restore sigmask flag. */
2761 clear_restore_sigmask();
2763 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2764 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2765 sigaddset(&blocked, ksig->sig);
2766 set_current_blocked(&blocked);
2767 tracehook_signal_handler(stepping);
2770 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2773 force_sigsegv(ksig->sig);
2775 signal_delivered(ksig, stepping);
2779 * It could be that complete_signal() picked us to notify about the
2780 * group-wide signal. Other threads should be notified now to take
2781 * the shared signals in @which since we will not.
2783 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2786 struct task_struct *t;
2788 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2789 if (sigisemptyset(&retarget))
2793 while_each_thread(tsk, t) {
2794 if (t->flags & PF_EXITING)
2797 if (!has_pending_signals(&retarget, &t->blocked))
2799 /* Remove the signals this thread can handle. */
2800 sigandsets(&retarget, &retarget, &t->blocked);
2802 if (!signal_pending(t))
2803 signal_wake_up(t, 0);
2805 if (sigisemptyset(&retarget))
2810 void exit_signals(struct task_struct *tsk)
2816 * @tsk is about to have PF_EXITING set - lock out users which
2817 * expect stable threadgroup.
2819 cgroup_threadgroup_change_begin(tsk);
2821 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2822 tsk->flags |= PF_EXITING;
2823 cgroup_threadgroup_change_end(tsk);
2827 spin_lock_irq(&tsk->sighand->siglock);
2829 * From now this task is not visible for group-wide signals,
2830 * see wants_signal(), do_signal_stop().
2832 tsk->flags |= PF_EXITING;
2834 cgroup_threadgroup_change_end(tsk);
2836 if (!signal_pending(tsk))
2839 unblocked = tsk->blocked;
2840 signotset(&unblocked);
2841 retarget_shared_pending(tsk, &unblocked);
2843 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2844 task_participate_group_stop(tsk))
2845 group_stop = CLD_STOPPED;
2847 spin_unlock_irq(&tsk->sighand->siglock);
2850 * If group stop has completed, deliver the notification. This
2851 * should always go to the real parent of the group leader.
2853 if (unlikely(group_stop)) {
2854 read_lock(&tasklist_lock);
2855 do_notify_parent_cldstop(tsk, false, group_stop);
2856 read_unlock(&tasklist_lock);
2861 * System call entry points.
2865 * sys_restart_syscall - restart a system call
2867 SYSCALL_DEFINE0(restart_syscall)
2869 struct restart_block *restart = ¤t->restart_block;
2870 return restart->fn(restart);
2873 long do_no_restart_syscall(struct restart_block *param)
2878 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2880 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2881 sigset_t newblocked;
2882 /* A set of now blocked but previously unblocked signals. */
2883 sigandnsets(&newblocked, newset, ¤t->blocked);
2884 retarget_shared_pending(tsk, &newblocked);
2886 tsk->blocked = *newset;
2887 recalc_sigpending();
2891 * set_current_blocked - change current->blocked mask
2894 * It is wrong to change ->blocked directly, this helper should be used
2895 * to ensure the process can't miss a shared signal we are going to block.
2897 void set_current_blocked(sigset_t *newset)
2899 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2900 __set_current_blocked(newset);
2903 void __set_current_blocked(const sigset_t *newset)
2905 struct task_struct *tsk = current;
2908 * In case the signal mask hasn't changed, there is nothing we need
2909 * to do. The current->blocked shouldn't be modified by other task.
2911 if (sigequalsets(&tsk->blocked, newset))
2914 spin_lock_irq(&tsk->sighand->siglock);
2915 __set_task_blocked(tsk, newset);
2916 spin_unlock_irq(&tsk->sighand->siglock);
2920 * This is also useful for kernel threads that want to temporarily
2921 * (or permanently) block certain signals.
2923 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2924 * interface happily blocks "unblockable" signals like SIGKILL
2927 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2929 struct task_struct *tsk = current;
2932 /* Lockless, only current can change ->blocked, never from irq */
2934 *oldset = tsk->blocked;
2938 sigorsets(&newset, &tsk->blocked, set);
2941 sigandnsets(&newset, &tsk->blocked, set);
2950 __set_current_blocked(&newset);
2953 EXPORT_SYMBOL(sigprocmask);
2956 * The api helps set app-provided sigmasks.
2958 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2959 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2961 * Note that it does set_restore_sigmask() in advance, so it must be always
2962 * paired with restore_saved_sigmask_unless() before return from syscall.
2964 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2970 if (sigsetsize != sizeof(sigset_t))
2972 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2975 set_restore_sigmask();
2976 current->saved_sigmask = current->blocked;
2977 set_current_blocked(&kmask);
2982 #ifdef CONFIG_COMPAT
2983 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2990 if (sigsetsize != sizeof(compat_sigset_t))
2992 if (get_compat_sigset(&kmask, umask))
2995 set_restore_sigmask();
2996 current->saved_sigmask = current->blocked;
2997 set_current_blocked(&kmask);
3004 * sys_rt_sigprocmask - change the list of currently blocked signals
3005 * @how: whether to add, remove, or set signals
3006 * @nset: stores pending signals
3007 * @oset: previous value of signal mask if non-null
3008 * @sigsetsize: size of sigset_t type
3010 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3011 sigset_t __user *, oset, size_t, sigsetsize)
3013 sigset_t old_set, new_set;
3016 /* XXX: Don't preclude handling different sized sigset_t's. */
3017 if (sigsetsize != sizeof(sigset_t))
3020 old_set = current->blocked;
3023 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3025 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3027 error = sigprocmask(how, &new_set, NULL);
3033 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3040 #ifdef CONFIG_COMPAT
3041 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3042 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3044 sigset_t old_set = current->blocked;
3046 /* XXX: Don't preclude handling different sized sigset_t's. */
3047 if (sigsetsize != sizeof(sigset_t))
3053 if (get_compat_sigset(&new_set, nset))
3055 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3057 error = sigprocmask(how, &new_set, NULL);
3061 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3065 static void do_sigpending(sigset_t *set)
3067 spin_lock_irq(¤t->sighand->siglock);
3068 sigorsets(set, ¤t->pending.signal,
3069 ¤t->signal->shared_pending.signal);
3070 spin_unlock_irq(¤t->sighand->siglock);
3072 /* Outside the lock because only this thread touches it. */
3073 sigandsets(set, ¤t->blocked, set);
3077 * sys_rt_sigpending - examine a pending signal that has been raised
3079 * @uset: stores pending signals
3080 * @sigsetsize: size of sigset_t type or larger
3082 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3086 if (sigsetsize > sizeof(*uset))
3089 do_sigpending(&set);
3091 if (copy_to_user(uset, &set, sigsetsize))
3097 #ifdef CONFIG_COMPAT
3098 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3099 compat_size_t, sigsetsize)
3103 if (sigsetsize > sizeof(*uset))
3106 do_sigpending(&set);
3108 return put_compat_sigset(uset, &set, sigsetsize);
3112 static const struct {
3113 unsigned char limit, layout;
3115 [SIGILL] = { NSIGILL, SIL_FAULT },
3116 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3117 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3118 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3119 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3121 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3123 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3124 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3125 [SIGSYS] = { NSIGSYS, SIL_SYS },
3128 static bool known_siginfo_layout(unsigned sig, int si_code)
3130 if (si_code == SI_KERNEL)
3132 else if ((si_code > SI_USER)) {
3133 if (sig_specific_sicodes(sig)) {
3134 if (si_code <= sig_sicodes[sig].limit)
3137 else if (si_code <= NSIGPOLL)
3140 else if (si_code >= SI_DETHREAD)
3142 else if (si_code == SI_ASYNCNL)
3147 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3149 enum siginfo_layout layout = SIL_KILL;
3150 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3151 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3152 (si_code <= sig_sicodes[sig].limit)) {
3153 layout = sig_sicodes[sig].layout;
3154 /* Handle the exceptions */
3155 if ((sig == SIGBUS) &&
3156 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3157 layout = SIL_FAULT_MCEERR;
3158 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3159 layout = SIL_FAULT_BNDERR;
3161 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3162 layout = SIL_FAULT_PKUERR;
3165 else if (si_code <= NSIGPOLL)
3168 if (si_code == SI_TIMER)
3170 else if (si_code == SI_SIGIO)
3172 else if (si_code < 0)
3178 static inline char __user *si_expansion(const siginfo_t __user *info)
3180 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3183 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3185 char __user *expansion = si_expansion(to);
3186 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3188 if (clear_user(expansion, SI_EXPANSION_SIZE))
3193 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3194 const siginfo_t __user *from)
3196 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3197 char __user *expansion = si_expansion(from);
3198 char buf[SI_EXPANSION_SIZE];
3201 * An unknown si_code might need more than
3202 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3203 * extra bytes are 0. This guarantees copy_siginfo_to_user
3204 * will return this data to userspace exactly.
3206 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3208 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3216 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3217 const siginfo_t __user *from)
3219 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3221 to->si_signo = signo;
3222 return post_copy_siginfo_from_user(to, from);
3225 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3227 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3229 return post_copy_siginfo_from_user(to, from);
3232 #ifdef CONFIG_COMPAT
3233 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3234 const struct kernel_siginfo *from)
3235 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3237 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3239 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3240 const struct kernel_siginfo *from, bool x32_ABI)
3243 struct compat_siginfo new;
3244 memset(&new, 0, sizeof(new));
3246 new.si_signo = from->si_signo;
3247 new.si_errno = from->si_errno;
3248 new.si_code = from->si_code;
3249 switch(siginfo_layout(from->si_signo, from->si_code)) {
3251 new.si_pid = from->si_pid;
3252 new.si_uid = from->si_uid;
3255 new.si_tid = from->si_tid;
3256 new.si_overrun = from->si_overrun;
3257 new.si_int = from->si_int;
3260 new.si_band = from->si_band;
3261 new.si_fd = from->si_fd;
3264 new.si_addr = ptr_to_compat(from->si_addr);
3265 #ifdef __ARCH_SI_TRAPNO
3266 new.si_trapno = from->si_trapno;
3269 case SIL_FAULT_MCEERR:
3270 new.si_addr = ptr_to_compat(from->si_addr);
3271 #ifdef __ARCH_SI_TRAPNO
3272 new.si_trapno = from->si_trapno;
3274 new.si_addr_lsb = from->si_addr_lsb;
3276 case SIL_FAULT_BNDERR:
3277 new.si_addr = ptr_to_compat(from->si_addr);
3278 #ifdef __ARCH_SI_TRAPNO
3279 new.si_trapno = from->si_trapno;
3281 new.si_lower = ptr_to_compat(from->si_lower);
3282 new.si_upper = ptr_to_compat(from->si_upper);
3284 case SIL_FAULT_PKUERR:
3285 new.si_addr = ptr_to_compat(from->si_addr);
3286 #ifdef __ARCH_SI_TRAPNO
3287 new.si_trapno = from->si_trapno;
3289 new.si_pkey = from->si_pkey;
3292 new.si_pid = from->si_pid;
3293 new.si_uid = from->si_uid;
3294 new.si_status = from->si_status;
3295 #ifdef CONFIG_X86_X32_ABI
3297 new._sifields._sigchld_x32._utime = from->si_utime;
3298 new._sifields._sigchld_x32._stime = from->si_stime;
3302 new.si_utime = from->si_utime;
3303 new.si_stime = from->si_stime;
3307 new.si_pid = from->si_pid;
3308 new.si_uid = from->si_uid;
3309 new.si_int = from->si_int;
3312 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3313 new.si_syscall = from->si_syscall;
3314 new.si_arch = from->si_arch;
3318 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3324 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3325 const struct compat_siginfo *from)
3328 to->si_signo = from->si_signo;
3329 to->si_errno = from->si_errno;
3330 to->si_code = from->si_code;
3331 switch(siginfo_layout(from->si_signo, from->si_code)) {
3333 to->si_pid = from->si_pid;
3334 to->si_uid = from->si_uid;
3337 to->si_tid = from->si_tid;
3338 to->si_overrun = from->si_overrun;
3339 to->si_int = from->si_int;
3342 to->si_band = from->si_band;
3343 to->si_fd = from->si_fd;
3346 to->si_addr = compat_ptr(from->si_addr);
3347 #ifdef __ARCH_SI_TRAPNO
3348 to->si_trapno = from->si_trapno;
3351 case SIL_FAULT_MCEERR:
3352 to->si_addr = compat_ptr(from->si_addr);
3353 #ifdef __ARCH_SI_TRAPNO
3354 to->si_trapno = from->si_trapno;
3356 to->si_addr_lsb = from->si_addr_lsb;
3358 case SIL_FAULT_BNDERR:
3359 to->si_addr = compat_ptr(from->si_addr);
3360 #ifdef __ARCH_SI_TRAPNO
3361 to->si_trapno = from->si_trapno;
3363 to->si_lower = compat_ptr(from->si_lower);
3364 to->si_upper = compat_ptr(from->si_upper);
3366 case SIL_FAULT_PKUERR:
3367 to->si_addr = compat_ptr(from->si_addr);
3368 #ifdef __ARCH_SI_TRAPNO
3369 to->si_trapno = from->si_trapno;
3371 to->si_pkey = from->si_pkey;
3374 to->si_pid = from->si_pid;
3375 to->si_uid = from->si_uid;
3376 to->si_status = from->si_status;
3377 #ifdef CONFIG_X86_X32_ABI
3378 if (in_x32_syscall()) {
3379 to->si_utime = from->_sifields._sigchld_x32._utime;
3380 to->si_stime = from->_sifields._sigchld_x32._stime;
3384 to->si_utime = from->si_utime;
3385 to->si_stime = from->si_stime;
3389 to->si_pid = from->si_pid;
3390 to->si_uid = from->si_uid;
3391 to->si_int = from->si_int;
3394 to->si_call_addr = compat_ptr(from->si_call_addr);
3395 to->si_syscall = from->si_syscall;
3396 to->si_arch = from->si_arch;
3402 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3403 const struct compat_siginfo __user *ufrom)
3405 struct compat_siginfo from;
3407 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3410 from.si_signo = signo;
3411 return post_copy_siginfo_from_user32(to, &from);
3414 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3415 const struct compat_siginfo __user *ufrom)
3417 struct compat_siginfo from;
3419 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3422 return post_copy_siginfo_from_user32(to, &from);
3424 #endif /* CONFIG_COMPAT */
3427 * do_sigtimedwait - wait for queued signals specified in @which
3428 * @which: queued signals to wait for
3429 * @info: if non-null, the signal's siginfo is returned here
3430 * @ts: upper bound on process time suspension
3432 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3433 const struct timespec64 *ts)
3435 ktime_t *to = NULL, timeout = KTIME_MAX;
3436 struct task_struct *tsk = current;
3437 sigset_t mask = *which;
3441 if (!timespec64_valid(ts))
3443 timeout = timespec64_to_ktime(*ts);
3448 * Invert the set of allowed signals to get those we want to block.
3450 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3453 spin_lock_irq(&tsk->sighand->siglock);
3454 sig = dequeue_signal(tsk, &mask, info);
3455 if (!sig && timeout) {
3457 * None ready, temporarily unblock those we're interested
3458 * while we are sleeping in so that we'll be awakened when
3459 * they arrive. Unblocking is always fine, we can avoid
3460 * set_current_blocked().
3462 tsk->real_blocked = tsk->blocked;
3463 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3464 recalc_sigpending();
3465 spin_unlock_irq(&tsk->sighand->siglock);
3467 __set_current_state(TASK_INTERRUPTIBLE);
3468 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3470 spin_lock_irq(&tsk->sighand->siglock);
3471 __set_task_blocked(tsk, &tsk->real_blocked);
3472 sigemptyset(&tsk->real_blocked);
3473 sig = dequeue_signal(tsk, &mask, info);
3475 spin_unlock_irq(&tsk->sighand->siglock);
3479 return ret ? -EINTR : -EAGAIN;
3483 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3485 * @uthese: queued signals to wait for
3486 * @uinfo: if non-null, the signal's siginfo is returned here
3487 * @uts: upper bound on process time suspension
3488 * @sigsetsize: size of sigset_t type
3490 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3491 siginfo_t __user *, uinfo,
3492 const struct __kernel_timespec __user *, uts,
3496 struct timespec64 ts;
3497 kernel_siginfo_t info;
3500 /* XXX: Don't preclude handling different sized sigset_t's. */
3501 if (sigsetsize != sizeof(sigset_t))
3504 if (copy_from_user(&these, uthese, sizeof(these)))
3508 if (get_timespec64(&ts, uts))
3512 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3514 if (ret > 0 && uinfo) {
3515 if (copy_siginfo_to_user(uinfo, &info))
3522 #ifdef CONFIG_COMPAT_32BIT_TIME
3523 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3524 siginfo_t __user *, uinfo,
3525 const struct old_timespec32 __user *, uts,
3529 struct timespec64 ts;
3530 kernel_siginfo_t info;
3533 if (sigsetsize != sizeof(sigset_t))
3536 if (copy_from_user(&these, uthese, sizeof(these)))
3540 if (get_old_timespec32(&ts, uts))
3544 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3546 if (ret > 0 && uinfo) {
3547 if (copy_siginfo_to_user(uinfo, &info))
3555 #ifdef CONFIG_COMPAT
3556 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3557 struct compat_siginfo __user *, uinfo,
3558 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3561 struct timespec64 t;
3562 kernel_siginfo_t info;
3565 if (sigsetsize != sizeof(sigset_t))
3568 if (get_compat_sigset(&s, uthese))
3572 if (get_timespec64(&t, uts))
3576 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3578 if (ret > 0 && uinfo) {
3579 if (copy_siginfo_to_user32(uinfo, &info))
3586 #ifdef CONFIG_COMPAT_32BIT_TIME
3587 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3588 struct compat_siginfo __user *, uinfo,
3589 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3592 struct timespec64 t;
3593 kernel_siginfo_t info;
3596 if (sigsetsize != sizeof(sigset_t))
3599 if (get_compat_sigset(&s, uthese))
3603 if (get_old_timespec32(&t, uts))
3607 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3609 if (ret > 0 && uinfo) {
3610 if (copy_siginfo_to_user32(uinfo, &info))
3619 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3621 clear_siginfo(info);
3622 info->si_signo = sig;
3624 info->si_code = SI_USER;
3625 info->si_pid = task_tgid_vnr(current);
3626 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3630 * sys_kill - send a signal to a process
3631 * @pid: the PID of the process
3632 * @sig: signal to be sent
3634 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3636 struct kernel_siginfo info;
3638 prepare_kill_siginfo(sig, &info);
3640 return kill_something_info(sig, &info, pid);
3644 * Verify that the signaler and signalee either are in the same pid namespace
3645 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3648 static bool access_pidfd_pidns(struct pid *pid)
3650 struct pid_namespace *active = task_active_pid_ns(current);
3651 struct pid_namespace *p = ns_of_pid(pid);
3664 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3666 #ifdef CONFIG_COMPAT
3668 * Avoid hooking up compat syscalls and instead handle necessary
3669 * conversions here. Note, this is a stop-gap measure and should not be
3670 * considered a generic solution.
3672 if (in_compat_syscall())
3673 return copy_siginfo_from_user32(
3674 kinfo, (struct compat_siginfo __user *)info);
3676 return copy_siginfo_from_user(kinfo, info);
3679 static struct pid *pidfd_to_pid(const struct file *file)
3683 pid = pidfd_pid(file);
3687 return tgid_pidfd_to_pid(file);
3691 * sys_pidfd_send_signal - Signal a process through a pidfd
3692 * @pidfd: file descriptor of the process
3693 * @sig: signal to send
3694 * @info: signal info
3695 * @flags: future flags
3697 * The syscall currently only signals via PIDTYPE_PID which covers
3698 * kill(<positive-pid>, <signal>. It does not signal threads or process
3700 * In order to extend the syscall to threads and process groups the @flags
3701 * argument should be used. In essence, the @flags argument will determine
3702 * what is signaled and not the file descriptor itself. Put in other words,
3703 * grouping is a property of the flags argument not a property of the file
3706 * Return: 0 on success, negative errno on failure
3708 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3709 siginfo_t __user *, info, unsigned int, flags)
3714 kernel_siginfo_t kinfo;
3716 /* Enforce flags be set to 0 until we add an extension. */
3724 /* Is this a pidfd? */
3725 pid = pidfd_to_pid(f.file);
3732 if (!access_pidfd_pidns(pid))
3736 ret = copy_siginfo_from_user_any(&kinfo, info);
3741 if (unlikely(sig != kinfo.si_signo))
3744 /* Only allow sending arbitrary signals to yourself. */
3746 if ((task_pid(current) != pid) &&
3747 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3750 prepare_kill_siginfo(sig, &kinfo);
3753 ret = kill_pid_info(sig, &kinfo, pid);
3761 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3763 struct task_struct *p;
3767 p = find_task_by_vpid(pid);
3768 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3769 error = check_kill_permission(sig, info, p);
3771 * The null signal is a permissions and process existence
3772 * probe. No signal is actually delivered.
3774 if (!error && sig) {
3775 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3777 * If lock_task_sighand() failed we pretend the task
3778 * dies after receiving the signal. The window is tiny,
3779 * and the signal is private anyway.
3781 if (unlikely(error == -ESRCH))
3790 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3792 struct kernel_siginfo info;
3794 clear_siginfo(&info);
3795 info.si_signo = sig;
3797 info.si_code = SI_TKILL;
3798 info.si_pid = task_tgid_vnr(current);
3799 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3801 return do_send_specific(tgid, pid, sig, &info);
3805 * sys_tgkill - send signal to one specific thread
3806 * @tgid: the thread group ID of the thread
3807 * @pid: the PID of the thread
3808 * @sig: signal to be sent
3810 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3811 * exists but it's not belonging to the target process anymore. This
3812 * method solves the problem of threads exiting and PIDs getting reused.
3814 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3816 /* This is only valid for single tasks */
3817 if (pid <= 0 || tgid <= 0)
3820 return do_tkill(tgid, pid, sig);
3824 * sys_tkill - send signal to one specific task
3825 * @pid: the PID of the task
3826 * @sig: signal to be sent
3828 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3830 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3832 /* This is only valid for single tasks */
3836 return do_tkill(0, pid, sig);
3839 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3841 /* Not even root can pretend to send signals from the kernel.
3842 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3844 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3845 (task_pid_vnr(current) != pid))
3848 /* POSIX.1b doesn't mention process groups. */
3849 return kill_proc_info(sig, info, pid);
3853 * sys_rt_sigqueueinfo - send signal information to a signal
3854 * @pid: the PID of the thread
3855 * @sig: signal to be sent
3856 * @uinfo: signal info to be sent
3858 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3859 siginfo_t __user *, uinfo)
3861 kernel_siginfo_t info;
3862 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3865 return do_rt_sigqueueinfo(pid, sig, &info);
3868 #ifdef CONFIG_COMPAT
3869 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3872 struct compat_siginfo __user *, uinfo)
3874 kernel_siginfo_t info;
3875 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3878 return do_rt_sigqueueinfo(pid, sig, &info);
3882 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3884 /* This is only valid for single tasks */
3885 if (pid <= 0 || tgid <= 0)
3888 /* Not even root can pretend to send signals from the kernel.
3889 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3891 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3892 (task_pid_vnr(current) != pid))
3895 return do_send_specific(tgid, pid, sig, info);
3898 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3899 siginfo_t __user *, uinfo)
3901 kernel_siginfo_t info;
3902 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3905 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3908 #ifdef CONFIG_COMPAT
3909 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3913 struct compat_siginfo __user *, uinfo)
3915 kernel_siginfo_t info;
3916 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3919 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3924 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3926 void kernel_sigaction(int sig, __sighandler_t action)
3928 spin_lock_irq(¤t->sighand->siglock);
3929 current->sighand->action[sig - 1].sa.sa_handler = action;
3930 if (action == SIG_IGN) {
3934 sigaddset(&mask, sig);
3936 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3937 flush_sigqueue_mask(&mask, ¤t->pending);
3938 recalc_sigpending();
3940 spin_unlock_irq(¤t->sighand->siglock);
3942 EXPORT_SYMBOL(kernel_sigaction);
3944 void __weak sigaction_compat_abi(struct k_sigaction *act,
3945 struct k_sigaction *oact)
3949 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3951 struct task_struct *p = current, *t;
3952 struct k_sigaction *k;
3955 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3958 k = &p->sighand->action[sig-1];
3960 spin_lock_irq(&p->sighand->siglock);
3964 sigaction_compat_abi(act, oact);
3967 sigdelsetmask(&act->sa.sa_mask,
3968 sigmask(SIGKILL) | sigmask(SIGSTOP));
3972 * "Setting a signal action to SIG_IGN for a signal that is
3973 * pending shall cause the pending signal to be discarded,
3974 * whether or not it is blocked."
3976 * "Setting a signal action to SIG_DFL for a signal that is
3977 * pending and whose default action is to ignore the signal
3978 * (for example, SIGCHLD), shall cause the pending signal to
3979 * be discarded, whether or not it is blocked"
3981 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3983 sigaddset(&mask, sig);
3984 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3985 for_each_thread(p, t)
3986 flush_sigqueue_mask(&mask, &t->pending);
3990 spin_unlock_irq(&p->sighand->siglock);
3995 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3998 struct task_struct *t = current;
4001 memset(oss, 0, sizeof(stack_t));
4002 oss->ss_sp = (void __user *) t->sas_ss_sp;
4003 oss->ss_size = t->sas_ss_size;
4004 oss->ss_flags = sas_ss_flags(sp) |
4005 (current->sas_ss_flags & SS_FLAG_BITS);
4009 void __user *ss_sp = ss->ss_sp;
4010 size_t ss_size = ss->ss_size;
4011 unsigned ss_flags = ss->ss_flags;
4014 if (unlikely(on_sig_stack(sp)))
4017 ss_mode = ss_flags & ~SS_FLAG_BITS;
4018 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4022 if (ss_mode == SS_DISABLE) {
4026 if (unlikely(ss_size < min_ss_size))
4030 t->sas_ss_sp = (unsigned long) ss_sp;
4031 t->sas_ss_size = ss_size;
4032 t->sas_ss_flags = ss_flags;
4037 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4041 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4043 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4044 current_user_stack_pointer(),
4046 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4051 int restore_altstack(const stack_t __user *uss)
4054 if (copy_from_user(&new, uss, sizeof(stack_t)))
4056 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4058 /* squash all but EFAULT for now */
4062 int __save_altstack(stack_t __user *uss, unsigned long sp)
4064 struct task_struct *t = current;
4065 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4066 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4067 __put_user(t->sas_ss_size, &uss->ss_size);
4070 if (t->sas_ss_flags & SS_AUTODISARM)
4075 #ifdef CONFIG_COMPAT
4076 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4077 compat_stack_t __user *uoss_ptr)
4083 compat_stack_t uss32;
4084 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4086 uss.ss_sp = compat_ptr(uss32.ss_sp);
4087 uss.ss_flags = uss32.ss_flags;
4088 uss.ss_size = uss32.ss_size;
4090 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4091 compat_user_stack_pointer(),
4092 COMPAT_MINSIGSTKSZ);
4093 if (ret >= 0 && uoss_ptr) {
4095 memset(&old, 0, sizeof(old));
4096 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4097 old.ss_flags = uoss.ss_flags;
4098 old.ss_size = uoss.ss_size;
4099 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4105 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4106 const compat_stack_t __user *, uss_ptr,
4107 compat_stack_t __user *, uoss_ptr)
4109 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4112 int compat_restore_altstack(const compat_stack_t __user *uss)
4114 int err = do_compat_sigaltstack(uss, NULL);
4115 /* squash all but -EFAULT for now */
4116 return err == -EFAULT ? err : 0;
4119 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4122 struct task_struct *t = current;
4123 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4125 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4126 __put_user(t->sas_ss_size, &uss->ss_size);
4129 if (t->sas_ss_flags & SS_AUTODISARM)
4135 #ifdef __ARCH_WANT_SYS_SIGPENDING
4138 * sys_sigpending - examine pending signals
4139 * @uset: where mask of pending signal is returned
4141 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4145 if (sizeof(old_sigset_t) > sizeof(*uset))
4148 do_sigpending(&set);
4150 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4156 #ifdef CONFIG_COMPAT
4157 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4161 do_sigpending(&set);
4163 return put_user(set.sig[0], set32);
4169 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4171 * sys_sigprocmask - examine and change blocked signals
4172 * @how: whether to add, remove, or set signals
4173 * @nset: signals to add or remove (if non-null)
4174 * @oset: previous value of signal mask if non-null
4176 * Some platforms have their own version with special arguments;
4177 * others support only sys_rt_sigprocmask.
4180 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4181 old_sigset_t __user *, oset)
4183 old_sigset_t old_set, new_set;
4184 sigset_t new_blocked;
4186 old_set = current->blocked.sig[0];
4189 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4192 new_blocked = current->blocked;
4196 sigaddsetmask(&new_blocked, new_set);
4199 sigdelsetmask(&new_blocked, new_set);
4202 new_blocked.sig[0] = new_set;
4208 set_current_blocked(&new_blocked);
4212 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4218 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4220 #ifndef CONFIG_ODD_RT_SIGACTION
4222 * sys_rt_sigaction - alter an action taken by a process
4223 * @sig: signal to be sent
4224 * @act: new sigaction
4225 * @oact: used to save the previous sigaction
4226 * @sigsetsize: size of sigset_t type
4228 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4229 const struct sigaction __user *, act,
4230 struct sigaction __user *, oact,
4233 struct k_sigaction new_sa, old_sa;
4236 /* XXX: Don't preclude handling different sized sigset_t's. */
4237 if (sigsetsize != sizeof(sigset_t))
4240 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4243 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4247 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4252 #ifdef CONFIG_COMPAT
4253 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4254 const struct compat_sigaction __user *, act,
4255 struct compat_sigaction __user *, oact,
4256 compat_size_t, sigsetsize)
4258 struct k_sigaction new_ka, old_ka;
4259 #ifdef __ARCH_HAS_SA_RESTORER
4260 compat_uptr_t restorer;
4264 /* XXX: Don't preclude handling different sized sigset_t's. */
4265 if (sigsetsize != sizeof(compat_sigset_t))
4269 compat_uptr_t handler;
4270 ret = get_user(handler, &act->sa_handler);
4271 new_ka.sa.sa_handler = compat_ptr(handler);
4272 #ifdef __ARCH_HAS_SA_RESTORER
4273 ret |= get_user(restorer, &act->sa_restorer);
4274 new_ka.sa.sa_restorer = compat_ptr(restorer);
4276 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4277 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4282 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4284 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4286 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4287 sizeof(oact->sa_mask));
4288 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4289 #ifdef __ARCH_HAS_SA_RESTORER
4290 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4291 &oact->sa_restorer);
4297 #endif /* !CONFIG_ODD_RT_SIGACTION */
4299 #ifdef CONFIG_OLD_SIGACTION
4300 SYSCALL_DEFINE3(sigaction, int, sig,
4301 const struct old_sigaction __user *, act,
4302 struct old_sigaction __user *, oact)
4304 struct k_sigaction new_ka, old_ka;
4309 if (!access_ok(act, sizeof(*act)) ||
4310 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4311 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4312 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4313 __get_user(mask, &act->sa_mask))
4315 #ifdef __ARCH_HAS_KA_RESTORER
4316 new_ka.ka_restorer = NULL;
4318 siginitset(&new_ka.sa.sa_mask, mask);
4321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4324 if (!access_ok(oact, sizeof(*oact)) ||
4325 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4326 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4327 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4328 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4335 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4336 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4337 const struct compat_old_sigaction __user *, act,
4338 struct compat_old_sigaction __user *, oact)
4340 struct k_sigaction new_ka, old_ka;
4342 compat_old_sigset_t mask;
4343 compat_uptr_t handler, restorer;
4346 if (!access_ok(act, sizeof(*act)) ||
4347 __get_user(handler, &act->sa_handler) ||
4348 __get_user(restorer, &act->sa_restorer) ||
4349 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4350 __get_user(mask, &act->sa_mask))
4353 #ifdef __ARCH_HAS_KA_RESTORER
4354 new_ka.ka_restorer = NULL;
4356 new_ka.sa.sa_handler = compat_ptr(handler);
4357 new_ka.sa.sa_restorer = compat_ptr(restorer);
4358 siginitset(&new_ka.sa.sa_mask, mask);
4361 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4364 if (!access_ok(oact, sizeof(*oact)) ||
4365 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4366 &oact->sa_handler) ||
4367 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4368 &oact->sa_restorer) ||
4369 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4370 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4377 #ifdef CONFIG_SGETMASK_SYSCALL
4380 * For backwards compatibility. Functionality superseded by sigprocmask.
4382 SYSCALL_DEFINE0(sgetmask)
4385 return current->blocked.sig[0];
4388 SYSCALL_DEFINE1(ssetmask, int, newmask)
4390 int old = current->blocked.sig[0];
4393 siginitset(&newset, newmask);
4394 set_current_blocked(&newset);
4398 #endif /* CONFIG_SGETMASK_SYSCALL */
4400 #ifdef __ARCH_WANT_SYS_SIGNAL
4402 * For backwards compatibility. Functionality superseded by sigaction.
4404 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4406 struct k_sigaction new_sa, old_sa;
4409 new_sa.sa.sa_handler = handler;
4410 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4411 sigemptyset(&new_sa.sa.sa_mask);
4413 ret = do_sigaction(sig, &new_sa, &old_sa);
4415 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4417 #endif /* __ARCH_WANT_SYS_SIGNAL */
4419 #ifdef __ARCH_WANT_SYS_PAUSE
4421 SYSCALL_DEFINE0(pause)
4423 while (!signal_pending(current)) {
4424 __set_current_state(TASK_INTERRUPTIBLE);
4427 return -ERESTARTNOHAND;
4432 static int sigsuspend(sigset_t *set)
4434 current->saved_sigmask = current->blocked;
4435 set_current_blocked(set);
4437 while (!signal_pending(current)) {
4438 __set_current_state(TASK_INTERRUPTIBLE);
4441 set_restore_sigmask();
4442 return -ERESTARTNOHAND;
4446 * sys_rt_sigsuspend - replace the signal mask for a value with the
4447 * @unewset value until a signal is received
4448 * @unewset: new signal mask value
4449 * @sigsetsize: size of sigset_t type
4451 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4455 /* XXX: Don't preclude handling different sized sigset_t's. */
4456 if (sigsetsize != sizeof(sigset_t))
4459 if (copy_from_user(&newset, unewset, sizeof(newset)))
4461 return sigsuspend(&newset);
4464 #ifdef CONFIG_COMPAT
4465 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4469 /* XXX: Don't preclude handling different sized sigset_t's. */
4470 if (sigsetsize != sizeof(sigset_t))
4473 if (get_compat_sigset(&newset, unewset))
4475 return sigsuspend(&newset);
4479 #ifdef CONFIG_OLD_SIGSUSPEND
4480 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4483 siginitset(&blocked, mask);
4484 return sigsuspend(&blocked);
4487 #ifdef CONFIG_OLD_SIGSUSPEND3
4488 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4491 siginitset(&blocked, mask);
4492 return sigsuspend(&blocked);
4496 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4501 static inline void siginfo_buildtime_checks(void)
4503 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4505 /* Verify the offsets in the two siginfos match */
4506 #define CHECK_OFFSET(field) \
4507 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4510 CHECK_OFFSET(si_pid);
4511 CHECK_OFFSET(si_uid);
4514 CHECK_OFFSET(si_tid);
4515 CHECK_OFFSET(si_overrun);
4516 CHECK_OFFSET(si_value);
4519 CHECK_OFFSET(si_pid);
4520 CHECK_OFFSET(si_uid);
4521 CHECK_OFFSET(si_value);
4524 CHECK_OFFSET(si_pid);
4525 CHECK_OFFSET(si_uid);
4526 CHECK_OFFSET(si_status);
4527 CHECK_OFFSET(si_utime);
4528 CHECK_OFFSET(si_stime);
4531 CHECK_OFFSET(si_addr);
4532 CHECK_OFFSET(si_addr_lsb);
4533 CHECK_OFFSET(si_lower);
4534 CHECK_OFFSET(si_upper);
4535 CHECK_OFFSET(si_pkey);
4538 CHECK_OFFSET(si_band);
4539 CHECK_OFFSET(si_fd);
4542 CHECK_OFFSET(si_call_addr);
4543 CHECK_OFFSET(si_syscall);
4544 CHECK_OFFSET(si_arch);
4548 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4549 offsetof(struct siginfo, si_addr));
4550 if (sizeof(int) == sizeof(void __user *)) {
4551 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4552 sizeof(void __user *));
4554 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4555 sizeof_field(struct siginfo, si_uid)) !=
4556 sizeof(void __user *));
4557 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4558 offsetof(struct siginfo, si_uid));
4560 #ifdef CONFIG_COMPAT
4561 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4562 offsetof(struct compat_siginfo, si_addr));
4563 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4564 sizeof(compat_uptr_t));
4565 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4566 sizeof_field(struct siginfo, si_pid));
4570 void __init signals_init(void)
4572 siginfo_buildtime_checks();
4574 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4577 #ifdef CONFIG_KGDB_KDB
4578 #include <linux/kdb.h>
4580 * kdb_send_sig - Allows kdb to send signals without exposing
4581 * signal internals. This function checks if the required locks are
4582 * available before calling the main signal code, to avoid kdb
4585 void kdb_send_sig(struct task_struct *t, int sig)
4587 static struct task_struct *kdb_prev_t;
4589 if (!spin_trylock(&t->sighand->siglock)) {
4590 kdb_printf("Can't do kill command now.\n"
4591 "The sigmask lock is held somewhere else in "
4592 "kernel, try again later\n");
4595 new_t = kdb_prev_t != t;
4597 if (t->state != TASK_RUNNING && new_t) {
4598 spin_unlock(&t->sighand->siglock);
4599 kdb_printf("Process is not RUNNING, sending a signal from "
4600 "kdb risks deadlock\n"
4601 "on the run queue locks. "
4602 "The signal has _not_ been sent.\n"
4603 "Reissue the kill command if you want to risk "
4607 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4608 spin_unlock(&t->sighand->siglock);
4610 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4613 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4615 #endif /* CONFIG_KGDB_KDB */