2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
56 * SLAB caches for signal bits.
59 static struct kmem_cache *sigqueue_cachep;
61 int print_fatal_signals __read_mostly;
63 static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler;
68 static int sig_handler_ignored(void __user *handler, int sig)
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
75 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
79 handler = sig_handler(t, sig);
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
85 return sig_handler_ignored(handler, sig);
88 static int sig_ignored(struct task_struct *t, int sig, bool force)
91 * Blocked signals are never ignored, since the
92 * signal handler may change by the time it is
95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
99 * Tracers may want to know about even ignored signal unless it
100 * is SIGKILL which can't be reported anyway but can be ignored
101 * by SIGNAL_UNKILLABLE task.
103 if (t->ptrace && sig != SIGKILL)
106 return sig_task_ignored(t, sig, force);
110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals.
113 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
118 switch (_NSIG_WORDS) {
120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
121 ready |= signal->sig[i] &~ blocked->sig[i];
124 case 4: ready = signal->sig[3] &~ blocked->sig[3];
125 ready |= signal->sig[2] &~ blocked->sig[2];
126 ready |= signal->sig[1] &~ blocked->sig[1];
127 ready |= signal->sig[0] &~ blocked->sig[0];
130 case 2: ready = signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
134 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
141 static int recalc_sigpending_tsk(struct task_struct *t)
143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
144 PENDING(&t->pending, &t->blocked) ||
145 PENDING(&t->signal->shared_pending, &t->blocked)) {
146 set_tsk_thread_flag(t, TIF_SIGPENDING);
150 * We must never clear the flag in another thread, or in current
151 * when it's possible the current syscall is returning -ERESTART*.
152 * So we don't clear it here, and only callers who know they should do.
158 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
159 * This is superfluous when called on current, the wakeup is a harmless no-op.
161 void recalc_sigpending_and_wake(struct task_struct *t)
163 if (recalc_sigpending_tsk(t))
164 signal_wake_up(t, 0);
167 void recalc_sigpending(void)
169 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
170 !klp_patch_pending(current))
171 clear_thread_flag(TIF_SIGPENDING);
175 /* Given the mask, find the first available signal that should be serviced. */
177 #define SYNCHRONOUS_MASK \
178 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
179 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
181 int next_signal(struct sigpending *pending, sigset_t *mask)
183 unsigned long i, *s, *m, x;
186 s = pending->signal.sig;
190 * Handle the first word specially: it contains the
191 * synchronous signals that need to be dequeued first.
195 if (x & SYNCHRONOUS_MASK)
196 x &= SYNCHRONOUS_MASK;
201 switch (_NSIG_WORDS) {
203 for (i = 1; i < _NSIG_WORDS; ++i) {
207 sig = ffz(~x) + i*_NSIG_BPW + 1;
216 sig = ffz(~x) + _NSIG_BPW + 1;
227 static inline void print_dropped_signal(int sig)
229 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
231 if (!print_fatal_signals)
234 if (!__ratelimit(&ratelimit_state))
237 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
238 current->comm, current->pid, sig);
242 * task_set_jobctl_pending - set jobctl pending bits
244 * @mask: pending bits to set
246 * Clear @mask from @task->jobctl. @mask must be subset of
247 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
248 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
249 * cleared. If @task is already being killed or exiting, this function
253 * Must be called with @task->sighand->siglock held.
256 * %true if @mask is set, %false if made noop because @task was dying.
258 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
260 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
261 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
262 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
264 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
267 if (mask & JOBCTL_STOP_SIGMASK)
268 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
270 task->jobctl |= mask;
275 * task_clear_jobctl_trapping - clear jobctl trapping bit
278 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
279 * Clear it and wake up the ptracer. Note that we don't need any further
280 * locking. @task->siglock guarantees that @task->parent points to the
284 * Must be called with @task->sighand->siglock held.
286 void task_clear_jobctl_trapping(struct task_struct *task)
288 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
289 task->jobctl &= ~JOBCTL_TRAPPING;
290 smp_mb(); /* advised by wake_up_bit() */
291 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
296 * task_clear_jobctl_pending - clear jobctl pending bits
298 * @mask: pending bits to clear
300 * Clear @mask from @task->jobctl. @mask must be subset of
301 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
302 * STOP bits are cleared together.
304 * If clearing of @mask leaves no stop or trap pending, this function calls
305 * task_clear_jobctl_trapping().
308 * Must be called with @task->sighand->siglock held.
310 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
312 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
314 if (mask & JOBCTL_STOP_PENDING)
315 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
317 task->jobctl &= ~mask;
319 if (!(task->jobctl & JOBCTL_PENDING_MASK))
320 task_clear_jobctl_trapping(task);
324 * task_participate_group_stop - participate in a group stop
325 * @task: task participating in a group stop
327 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
328 * Group stop states are cleared and the group stop count is consumed if
329 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
330 * stop, the appropriate %SIGNAL_* flags are set.
333 * Must be called with @task->sighand->siglock held.
336 * %true if group stop completion should be notified to the parent, %false
339 static bool task_participate_group_stop(struct task_struct *task)
341 struct signal_struct *sig = task->signal;
342 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
344 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
346 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
351 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
352 sig->group_stop_count--;
355 * Tell the caller to notify completion iff we are entering into a
356 * fresh group stop. Read comment in do_signal_stop() for details.
358 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
359 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
366 * allocate a new signal queue record
367 * - this may be called without locks if and only if t == current, otherwise an
368 * appropriate lock must be held to stop the target task from exiting
370 static struct sigqueue *
371 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
373 struct sigqueue *q = NULL;
374 struct user_struct *user;
377 * Protect access to @t credentials. This can go away when all
378 * callers hold rcu read lock.
381 user = get_uid(__task_cred(t)->user);
382 atomic_inc(&user->sigpending);
385 if (override_rlimit ||
386 atomic_read(&user->sigpending) <=
387 task_rlimit(t, RLIMIT_SIGPENDING)) {
388 q = kmem_cache_alloc(sigqueue_cachep, flags);
390 print_dropped_signal(sig);
393 if (unlikely(q == NULL)) {
394 atomic_dec(&user->sigpending);
397 INIT_LIST_HEAD(&q->list);
405 static void __sigqueue_free(struct sigqueue *q)
407 if (q->flags & SIGQUEUE_PREALLOC)
409 atomic_dec(&q->user->sigpending);
411 kmem_cache_free(sigqueue_cachep, q);
414 void flush_sigqueue(struct sigpending *queue)
418 sigemptyset(&queue->signal);
419 while (!list_empty(&queue->list)) {
420 q = list_entry(queue->list.next, struct sigqueue , list);
421 list_del_init(&q->list);
427 * Flush all pending signals for this kthread.
429 void flush_signals(struct task_struct *t)
433 spin_lock_irqsave(&t->sighand->siglock, flags);
434 clear_tsk_thread_flag(t, TIF_SIGPENDING);
435 flush_sigqueue(&t->pending);
436 flush_sigqueue(&t->signal->shared_pending);
437 spin_unlock_irqrestore(&t->sighand->siglock, flags);
440 #ifdef CONFIG_POSIX_TIMERS
441 static void __flush_itimer_signals(struct sigpending *pending)
443 sigset_t signal, retain;
444 struct sigqueue *q, *n;
446 signal = pending->signal;
447 sigemptyset(&retain);
449 list_for_each_entry_safe(q, n, &pending->list, list) {
450 int sig = q->info.si_signo;
452 if (likely(q->info.si_code != SI_TIMER)) {
453 sigaddset(&retain, sig);
455 sigdelset(&signal, sig);
456 list_del_init(&q->list);
461 sigorsets(&pending->signal, &signal, &retain);
464 void flush_itimer_signals(void)
466 struct task_struct *tsk = current;
469 spin_lock_irqsave(&tsk->sighand->siglock, flags);
470 __flush_itimer_signals(&tsk->pending);
471 __flush_itimer_signals(&tsk->signal->shared_pending);
472 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
476 void ignore_signals(struct task_struct *t)
480 for (i = 0; i < _NSIG; ++i)
481 t->sighand->action[i].sa.sa_handler = SIG_IGN;
487 * Flush all handlers for a task.
491 flush_signal_handlers(struct task_struct *t, int force_default)
494 struct k_sigaction *ka = &t->sighand->action[0];
495 for (i = _NSIG ; i != 0 ; i--) {
496 if (force_default || ka->sa.sa_handler != SIG_IGN)
497 ka->sa.sa_handler = SIG_DFL;
499 #ifdef __ARCH_HAS_SA_RESTORER
500 ka->sa.sa_restorer = NULL;
502 sigemptyset(&ka->sa.sa_mask);
507 int unhandled_signal(struct task_struct *tsk, int sig)
509 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
510 if (is_global_init(tsk))
512 if (handler != SIG_IGN && handler != SIG_DFL)
514 /* if ptraced, let the tracer determine */
518 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
521 struct sigqueue *q, *first = NULL;
524 * Collect the siginfo appropriate to this signal. Check if
525 * there is another siginfo for the same signal.
527 list_for_each_entry(q, &list->list, list) {
528 if (q->info.si_signo == sig) {
535 sigdelset(&list->signal, sig);
539 list_del_init(&first->list);
540 copy_siginfo(info, &first->info);
543 (first->flags & SIGQUEUE_PREALLOC) &&
544 (info->si_code == SI_TIMER) &&
545 (info->si_sys_private);
547 __sigqueue_free(first);
550 * Ok, it wasn't in the queue. This must be
551 * a fast-pathed signal or we must have been
552 * out of queue space. So zero out the info.
555 info->si_signo = sig;
557 info->si_code = SI_USER;
563 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
564 siginfo_t *info, bool *resched_timer)
566 int sig = next_signal(pending, mask);
569 collect_signal(sig, pending, info, resched_timer);
574 * Dequeue a signal and return the element to the caller, which is
575 * expected to free it.
577 * All callers have to hold the siglock.
579 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
581 bool resched_timer = false;
584 /* We only dequeue private signals from ourselves, we don't let
585 * signalfd steal them
587 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
589 signr = __dequeue_signal(&tsk->signal->shared_pending,
590 mask, info, &resched_timer);
591 #ifdef CONFIG_POSIX_TIMERS
595 * itimers are process shared and we restart periodic
596 * itimers in the signal delivery path to prevent DoS
597 * attacks in the high resolution timer case. This is
598 * compliant with the old way of self-restarting
599 * itimers, as the SIGALRM is a legacy signal and only
600 * queued once. Changing the restart behaviour to
601 * restart the timer in the signal dequeue path is
602 * reducing the timer noise on heavy loaded !highres
605 if (unlikely(signr == SIGALRM)) {
606 struct hrtimer *tmr = &tsk->signal->real_timer;
608 if (!hrtimer_is_queued(tmr) &&
609 tsk->signal->it_real_incr != 0) {
610 hrtimer_forward(tmr, tmr->base->get_time(),
611 tsk->signal->it_real_incr);
612 hrtimer_restart(tmr);
622 if (unlikely(sig_kernel_stop(signr))) {
624 * Set a marker that we have dequeued a stop signal. Our
625 * caller might release the siglock and then the pending
626 * stop signal it is about to process is no longer in the
627 * pending bitmasks, but must still be cleared by a SIGCONT
628 * (and overruled by a SIGKILL). So those cases clear this
629 * shared flag after we've set it. Note that this flag may
630 * remain set after the signal we return is ignored or
631 * handled. That doesn't matter because its only purpose
632 * is to alert stop-signal processing code when another
633 * processor has come along and cleared the flag.
635 current->jobctl |= JOBCTL_STOP_DEQUEUED;
637 #ifdef CONFIG_POSIX_TIMERS
640 * Release the siglock to ensure proper locking order
641 * of timer locks outside of siglocks. Note, we leave
642 * irqs disabled here, since the posix-timers code is
643 * about to disable them again anyway.
645 spin_unlock(&tsk->sighand->siglock);
646 posixtimer_rearm(info);
647 spin_lock(&tsk->sighand->siglock);
649 /* Don't expose the si_sys_private value to userspace */
650 info->si_sys_private = 0;
657 * Tell a process that it has a new active signal..
659 * NOTE! we rely on the previous spin_lock to
660 * lock interrupts for us! We can only be called with
661 * "siglock" held, and the local interrupt must
662 * have been disabled when that got acquired!
664 * No need to set need_resched since signal event passing
665 * goes through ->blocked
667 void signal_wake_up_state(struct task_struct *t, unsigned int state)
669 set_tsk_thread_flag(t, TIF_SIGPENDING);
671 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
672 * case. We don't check t->state here because there is a race with it
673 * executing another processor and just now entering stopped state.
674 * By using wake_up_state, we ensure the process will wake up and
675 * handle its death signal.
677 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
682 * Remove signals in mask from the pending set and queue.
683 * Returns 1 if any signals were found.
685 * All callers must be holding the siglock.
687 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
689 struct sigqueue *q, *n;
692 sigandsets(&m, mask, &s->signal);
693 if (sigisemptyset(&m))
696 sigandnsets(&s->signal, &s->signal, mask);
697 list_for_each_entry_safe(q, n, &s->list, list) {
698 if (sigismember(mask, q->info.si_signo)) {
699 list_del_init(&q->list);
706 static inline int is_si_special(const struct siginfo *info)
708 return info <= SEND_SIG_FORCED;
711 static inline bool si_fromuser(const struct siginfo *info)
713 return info == SEND_SIG_NOINFO ||
714 (!is_si_special(info) && SI_FROMUSER(info));
718 * called with RCU read lock from check_kill_permission()
720 static int kill_ok_by_cred(struct task_struct *t)
722 const struct cred *cred = current_cred();
723 const struct cred *tcred = __task_cred(t);
725 if (uid_eq(cred->euid, tcred->suid) ||
726 uid_eq(cred->euid, tcred->uid) ||
727 uid_eq(cred->uid, tcred->suid) ||
728 uid_eq(cred->uid, tcred->uid))
731 if (ns_capable(tcred->user_ns, CAP_KILL))
738 * Bad permissions for sending the signal
739 * - the caller must hold the RCU read lock
741 static int check_kill_permission(int sig, struct siginfo *info,
742 struct task_struct *t)
747 if (!valid_signal(sig))
750 if (!si_fromuser(info))
753 error = audit_signal_info(sig, t); /* Let audit system see the signal */
757 if (!same_thread_group(current, t) &&
758 !kill_ok_by_cred(t)) {
761 sid = task_session(t);
763 * We don't return the error if sid == NULL. The
764 * task was unhashed, the caller must notice this.
766 if (!sid || sid == task_session(current))
773 return security_task_kill(t, info, sig, NULL);
777 * ptrace_trap_notify - schedule trap to notify ptracer
778 * @t: tracee wanting to notify tracer
780 * This function schedules sticky ptrace trap which is cleared on the next
781 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
784 * If @t is running, STOP trap will be taken. If trapped for STOP and
785 * ptracer is listening for events, tracee is woken up so that it can
786 * re-trap for the new event. If trapped otherwise, STOP trap will be
787 * eventually taken without returning to userland after the existing traps
788 * are finished by PTRACE_CONT.
791 * Must be called with @task->sighand->siglock held.
793 static void ptrace_trap_notify(struct task_struct *t)
795 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
796 assert_spin_locked(&t->sighand->siglock);
798 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
799 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
803 * Handle magic process-wide effects of stop/continue signals. Unlike
804 * the signal actions, these happen immediately at signal-generation
805 * time regardless of blocking, ignoring, or handling. This does the
806 * actual continuing for SIGCONT, but not the actual stopping for stop
807 * signals. The process stop is done as a signal action for SIG_DFL.
809 * Returns true if the signal should be actually delivered, otherwise
810 * it should be dropped.
812 static bool prepare_signal(int sig, struct task_struct *p, bool force)
814 struct signal_struct *signal = p->signal;
815 struct task_struct *t;
818 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
819 if (!(signal->flags & SIGNAL_GROUP_EXIT))
820 return sig == SIGKILL;
822 * The process is in the middle of dying, nothing to do.
824 } else if (sig_kernel_stop(sig)) {
826 * This is a stop signal. Remove SIGCONT from all queues.
828 siginitset(&flush, sigmask(SIGCONT));
829 flush_sigqueue_mask(&flush, &signal->shared_pending);
830 for_each_thread(p, t)
831 flush_sigqueue_mask(&flush, &t->pending);
832 } else if (sig == SIGCONT) {
835 * Remove all stop signals from all queues, wake all threads.
837 siginitset(&flush, SIG_KERNEL_STOP_MASK);
838 flush_sigqueue_mask(&flush, &signal->shared_pending);
839 for_each_thread(p, t) {
840 flush_sigqueue_mask(&flush, &t->pending);
841 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
842 if (likely(!(t->ptrace & PT_SEIZED)))
843 wake_up_state(t, __TASK_STOPPED);
845 ptrace_trap_notify(t);
849 * Notify the parent with CLD_CONTINUED if we were stopped.
851 * If we were in the middle of a group stop, we pretend it
852 * was already finished, and then continued. Since SIGCHLD
853 * doesn't queue we report only CLD_STOPPED, as if the next
854 * CLD_CONTINUED was dropped.
857 if (signal->flags & SIGNAL_STOP_STOPPED)
858 why |= SIGNAL_CLD_CONTINUED;
859 else if (signal->group_stop_count)
860 why |= SIGNAL_CLD_STOPPED;
864 * The first thread which returns from do_signal_stop()
865 * will take ->siglock, notice SIGNAL_CLD_MASK, and
866 * notify its parent. See get_signal_to_deliver().
868 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
869 signal->group_stop_count = 0;
870 signal->group_exit_code = 0;
874 return !sig_ignored(p, sig, force);
878 * Test if P wants to take SIG. After we've checked all threads with this,
879 * it's equivalent to finding no threads not blocking SIG. Any threads not
880 * blocking SIG were ruled out because they are not running and already
881 * have pending signals. Such threads will dequeue from the shared queue
882 * as soon as they're available, so putting the signal on the shared queue
883 * will be equivalent to sending it to one such thread.
885 static inline int wants_signal(int sig, struct task_struct *p)
887 if (sigismember(&p->blocked, sig))
889 if (p->flags & PF_EXITING)
893 if (task_is_stopped_or_traced(p))
895 return task_curr(p) || !signal_pending(p);
898 static void complete_signal(int sig, struct task_struct *p, int group)
900 struct signal_struct *signal = p->signal;
901 struct task_struct *t;
904 * Now find a thread we can wake up to take the signal off the queue.
906 * If the main thread wants the signal, it gets first crack.
907 * Probably the least surprising to the average bear.
909 if (wants_signal(sig, p))
911 else if (!group || thread_group_empty(p))
913 * There is just one thread and it does not need to be woken.
914 * It will dequeue unblocked signals before it runs again.
919 * Otherwise try to find a suitable thread.
921 t = signal->curr_target;
922 while (!wants_signal(sig, t)) {
924 if (t == signal->curr_target)
926 * No thread needs to be woken.
927 * Any eligible threads will see
928 * the signal in the queue soon.
932 signal->curr_target = t;
936 * Found a killable thread. If the signal will be fatal,
937 * then start taking the whole group down immediately.
939 if (sig_fatal(p, sig) &&
940 !(signal->flags & SIGNAL_GROUP_EXIT) &&
941 !sigismember(&t->real_blocked, sig) &&
942 (sig == SIGKILL || !p->ptrace)) {
944 * This signal will be fatal to the whole group.
946 if (!sig_kernel_coredump(sig)) {
948 * Start a group exit and wake everybody up.
949 * This way we don't have other threads
950 * running and doing things after a slower
951 * thread has the fatal signal pending.
953 signal->flags = SIGNAL_GROUP_EXIT;
954 signal->group_exit_code = sig;
955 signal->group_stop_count = 0;
958 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
959 sigaddset(&t->pending.signal, SIGKILL);
960 signal_wake_up(t, 1);
961 } while_each_thread(p, t);
967 * The signal is already in the shared-pending queue.
968 * Tell the chosen thread to wake up and dequeue it.
970 signal_wake_up(t, sig == SIGKILL);
974 static inline int legacy_queue(struct sigpending *signals, int sig)
976 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
979 #ifdef CONFIG_USER_NS
980 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
982 if (current_user_ns() == task_cred_xxx(t, user_ns))
985 if (SI_FROMKERNEL(info))
989 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
990 make_kuid(current_user_ns(), info->si_uid));
994 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1000 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1001 int group, int from_ancestor_ns)
1003 struct sigpending *pending;
1005 int override_rlimit;
1006 int ret = 0, result;
1008 assert_spin_locked(&t->sighand->siglock);
1010 result = TRACE_SIGNAL_IGNORED;
1011 if (!prepare_signal(sig, t,
1012 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1015 pending = group ? &t->signal->shared_pending : &t->pending;
1017 * Short-circuit ignored signals and support queuing
1018 * exactly one non-rt signal, so that we can get more
1019 * detailed information about the cause of the signal.
1021 result = TRACE_SIGNAL_ALREADY_PENDING;
1022 if (legacy_queue(pending, sig))
1025 result = TRACE_SIGNAL_DELIVERED;
1027 * fast-pathed signals for kernel-internal things like SIGSTOP
1030 if (info == SEND_SIG_FORCED)
1034 * Real-time signals must be queued if sent by sigqueue, or
1035 * some other real-time mechanism. It is implementation
1036 * defined whether kill() does so. We attempt to do so, on
1037 * the principle of least surprise, but since kill is not
1038 * allowed to fail with EAGAIN when low on memory we just
1039 * make sure at least one signal gets delivered and don't
1040 * pass on the info struct.
1043 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1045 override_rlimit = 0;
1047 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1049 list_add_tail(&q->list, &pending->list);
1050 switch ((unsigned long) info) {
1051 case (unsigned long) SEND_SIG_NOINFO:
1052 clear_siginfo(&q->info);
1053 q->info.si_signo = sig;
1054 q->info.si_errno = 0;
1055 q->info.si_code = SI_USER;
1056 q->info.si_pid = task_tgid_nr_ns(current,
1057 task_active_pid_ns(t));
1058 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1060 case (unsigned long) SEND_SIG_PRIV:
1061 clear_siginfo(&q->info);
1062 q->info.si_signo = sig;
1063 q->info.si_errno = 0;
1064 q->info.si_code = SI_KERNEL;
1069 copy_siginfo(&q->info, info);
1070 if (from_ancestor_ns)
1075 userns_fixup_signal_uid(&q->info, t);
1077 } else if (!is_si_special(info)) {
1078 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1080 * Queue overflow, abort. We may abort if the
1081 * signal was rt and sent by user using something
1082 * other than kill().
1084 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1089 * This is a silent loss of information. We still
1090 * send the signal, but the *info bits are lost.
1092 result = TRACE_SIGNAL_LOSE_INFO;
1097 signalfd_notify(t, sig);
1098 sigaddset(&pending->signal, sig);
1099 complete_signal(sig, t, group);
1101 trace_signal_generate(sig, info, t, group, result);
1105 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1108 int from_ancestor_ns = 0;
1110 #ifdef CONFIG_PID_NS
1111 from_ancestor_ns = si_fromuser(info) &&
1112 !task_pid_nr_ns(current, task_active_pid_ns(t));
1115 return __send_signal(sig, info, t, group, from_ancestor_ns);
1118 static void print_fatal_signal(int signr)
1120 struct pt_regs *regs = signal_pt_regs();
1121 pr_info("potentially unexpected fatal signal %d.\n", signr);
1123 #if defined(__i386__) && !defined(__arch_um__)
1124 pr_info("code at %08lx: ", regs->ip);
1127 for (i = 0; i < 16; i++) {
1130 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1132 pr_cont("%02x ", insn);
1142 static int __init setup_print_fatal_signals(char *str)
1144 get_option (&str, &print_fatal_signals);
1149 __setup("print-fatal-signals=", setup_print_fatal_signals);
1152 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1154 return send_signal(sig, info, p, 1);
1158 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1160 return send_signal(sig, info, t, 0);
1163 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1166 unsigned long flags;
1169 if (lock_task_sighand(p, &flags)) {
1170 ret = send_signal(sig, info, p, group);
1171 unlock_task_sighand(p, &flags);
1178 * Force a signal that the process can't ignore: if necessary
1179 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1181 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1182 * since we do not want to have a signal handler that was blocked
1183 * be invoked when user space had explicitly blocked it.
1185 * We don't want to have recursive SIGSEGV's etc, for example,
1186 * that is why we also clear SIGNAL_UNKILLABLE.
1189 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1191 unsigned long int flags;
1192 int ret, blocked, ignored;
1193 struct k_sigaction *action;
1195 spin_lock_irqsave(&t->sighand->siglock, flags);
1196 action = &t->sighand->action[sig-1];
1197 ignored = action->sa.sa_handler == SIG_IGN;
1198 blocked = sigismember(&t->blocked, sig);
1199 if (blocked || ignored) {
1200 action->sa.sa_handler = SIG_DFL;
1202 sigdelset(&t->blocked, sig);
1203 recalc_sigpending_and_wake(t);
1207 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1208 * debugging to leave init killable.
1210 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1211 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1212 ret = specific_send_sig_info(sig, info, t);
1213 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1219 * Nuke all other threads in the group.
1221 int zap_other_threads(struct task_struct *p)
1223 struct task_struct *t = p;
1226 p->signal->group_stop_count = 0;
1228 while_each_thread(p, t) {
1229 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1232 /* Don't bother with already dead threads */
1235 sigaddset(&t->pending.signal, SIGKILL);
1236 signal_wake_up(t, 1);
1242 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1243 unsigned long *flags)
1245 struct sighand_struct *sighand;
1249 sighand = rcu_dereference(tsk->sighand);
1250 if (unlikely(sighand == NULL))
1254 * This sighand can be already freed and even reused, but
1255 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1256 * initializes ->siglock: this slab can't go away, it has
1257 * the same object type, ->siglock can't be reinitialized.
1259 * We need to ensure that tsk->sighand is still the same
1260 * after we take the lock, we can race with de_thread() or
1261 * __exit_signal(). In the latter case the next iteration
1262 * must see ->sighand == NULL.
1264 spin_lock_irqsave(&sighand->siglock, *flags);
1265 if (likely(sighand == tsk->sighand))
1267 spin_unlock_irqrestore(&sighand->siglock, *flags);
1275 * send signal info to all the members of a group
1277 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282 ret = check_kill_permission(sig, info, p);
1286 ret = do_send_sig_info(sig, info, p, true);
1292 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1293 * control characters do (^C, ^Z etc)
1294 * - the caller must hold at least a readlock on tasklist_lock
1296 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1298 struct task_struct *p = NULL;
1299 int retval, success;
1303 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1304 int err = group_send_sig_info(sig, info, p);
1307 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1308 return success ? 0 : retval;
1311 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1314 struct task_struct *p;
1318 p = pid_task(pid, PIDTYPE_PID);
1320 error = group_send_sig_info(sig, info, p);
1322 if (likely(!p || error != -ESRCH))
1326 * The task was unhashed in between, try again. If it
1327 * is dead, pid_task() will return NULL, if we race with
1328 * de_thread() it will find the new leader.
1333 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1337 error = kill_pid_info(sig, info, find_vpid(pid));
1342 static inline bool kill_as_cred_perm(const struct cred *cred,
1343 struct task_struct *target)
1345 const struct cred *pcred = __task_cred(target);
1347 return uid_eq(cred->euid, pcred->suid) ||
1348 uid_eq(cred->euid, pcred->uid) ||
1349 uid_eq(cred->uid, pcred->suid) ||
1350 uid_eq(cred->uid, pcred->uid);
1353 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1354 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1355 const struct cred *cred)
1358 struct task_struct *p;
1359 unsigned long flags;
1361 if (!valid_signal(sig))
1365 p = pid_task(pid, PIDTYPE_PID);
1370 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1374 ret = security_task_kill(p, info, sig, cred);
1379 if (lock_task_sighand(p, &flags)) {
1380 ret = __send_signal(sig, info, p, 1, 0);
1381 unlock_task_sighand(p, &flags);
1389 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1392 * kill_something_info() interprets pid in interesting ways just like kill(2).
1394 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1395 * is probably wrong. Should make it like BSD or SYSV.
1398 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1404 ret = kill_pid_info(sig, info, find_vpid(pid));
1409 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1413 read_lock(&tasklist_lock);
1415 ret = __kill_pgrp_info(sig, info,
1416 pid ? find_vpid(-pid) : task_pgrp(current));
1418 int retval = 0, count = 0;
1419 struct task_struct * p;
1421 for_each_process(p) {
1422 if (task_pid_vnr(p) > 1 &&
1423 !same_thread_group(p, current)) {
1424 int err = group_send_sig_info(sig, info, p);
1430 ret = count ? retval : -ESRCH;
1432 read_unlock(&tasklist_lock);
1438 * These are for backward compatibility with the rest of the kernel source.
1441 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1444 * Make sure legacy kernel users don't send in bad values
1445 * (normal paths check this in check_kill_permission).
1447 if (!valid_signal(sig))
1450 return do_send_sig_info(sig, info, p, false);
1453 #define __si_special(priv) \
1454 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1457 send_sig(int sig, struct task_struct *p, int priv)
1459 return send_sig_info(sig, __si_special(priv), p);
1462 void force_sig(int sig, struct task_struct *p)
1464 force_sig_info(sig, SEND_SIG_PRIV, p);
1468 * When things go south during signal handling, we
1469 * will force a SIGSEGV. And if the signal that caused
1470 * the problem was already a SIGSEGV, we'll want to
1471 * make sure we don't even try to deliver the signal..
1473 void force_sigsegv(int sig, struct task_struct *p)
1475 if (sig == SIGSEGV) {
1476 unsigned long flags;
1477 spin_lock_irqsave(&p->sighand->siglock, flags);
1478 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1479 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1481 force_sig(SIGSEGV, p);
1484 int force_sig_fault(int sig, int code, void __user *addr
1485 ___ARCH_SI_TRAPNO(int trapno)
1486 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1487 , struct task_struct *t)
1489 struct siginfo info;
1491 clear_siginfo(&info);
1492 info.si_signo = sig;
1494 info.si_code = code;
1495 info.si_addr = addr;
1496 #ifdef __ARCH_SI_TRAPNO
1497 info.si_trapno = trapno;
1501 info.si_flags = flags;
1504 return force_sig_info(info.si_signo, &info, t);
1507 int send_sig_fault(int sig, int code, void __user *addr
1508 ___ARCH_SI_TRAPNO(int trapno)
1509 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1510 , struct task_struct *t)
1512 struct siginfo info;
1514 clear_siginfo(&info);
1515 info.si_signo = sig;
1517 info.si_code = code;
1518 info.si_addr = addr;
1519 #ifdef __ARCH_SI_TRAPNO
1520 info.si_trapno = trapno;
1524 info.si_flags = flags;
1527 return send_sig_info(info.si_signo, &info, t);
1530 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1532 struct siginfo info;
1534 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1535 clear_siginfo(&info);
1536 info.si_signo = SIGBUS;
1538 info.si_code = code;
1539 info.si_addr = addr;
1540 info.si_addr_lsb = lsb;
1541 return force_sig_info(info.si_signo, &info, t);
1544 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1546 struct siginfo info;
1548 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1549 clear_siginfo(&info);
1550 info.si_signo = SIGBUS;
1552 info.si_code = code;
1553 info.si_addr = addr;
1554 info.si_addr_lsb = lsb;
1555 return send_sig_info(info.si_signo, &info, t);
1557 EXPORT_SYMBOL(send_sig_mceerr);
1559 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1561 struct siginfo info;
1563 clear_siginfo(&info);
1564 info.si_signo = SIGSEGV;
1566 info.si_code = SEGV_BNDERR;
1567 info.si_addr = addr;
1568 info.si_lower = lower;
1569 info.si_upper = upper;
1570 return force_sig_info(info.si_signo, &info, current);
1574 int force_sig_pkuerr(void __user *addr, u32 pkey)
1576 struct siginfo info;
1578 clear_siginfo(&info);
1579 info.si_signo = SIGSEGV;
1581 info.si_code = SEGV_PKUERR;
1582 info.si_addr = addr;
1583 info.si_pkey = pkey;
1584 return force_sig_info(info.si_signo, &info, current);
1588 /* For the crazy architectures that include trap information in
1589 * the errno field, instead of an actual errno value.
1591 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1593 struct siginfo info;
1595 clear_siginfo(&info);
1596 info.si_signo = SIGTRAP;
1597 info.si_errno = errno;
1598 info.si_code = TRAP_HWBKPT;
1599 info.si_addr = addr;
1600 return force_sig_info(info.si_signo, &info, current);
1603 int kill_pgrp(struct pid *pid, int sig, int priv)
1607 read_lock(&tasklist_lock);
1608 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1609 read_unlock(&tasklist_lock);
1613 EXPORT_SYMBOL(kill_pgrp);
1615 int kill_pid(struct pid *pid, int sig, int priv)
1617 return kill_pid_info(sig, __si_special(priv), pid);
1619 EXPORT_SYMBOL(kill_pid);
1622 * These functions support sending signals using preallocated sigqueue
1623 * structures. This is needed "because realtime applications cannot
1624 * afford to lose notifications of asynchronous events, like timer
1625 * expirations or I/O completions". In the case of POSIX Timers
1626 * we allocate the sigqueue structure from the timer_create. If this
1627 * allocation fails we are able to report the failure to the application
1628 * with an EAGAIN error.
1630 struct sigqueue *sigqueue_alloc(void)
1632 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1635 q->flags |= SIGQUEUE_PREALLOC;
1640 void sigqueue_free(struct sigqueue *q)
1642 unsigned long flags;
1643 spinlock_t *lock = ¤t->sighand->siglock;
1645 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1647 * We must hold ->siglock while testing q->list
1648 * to serialize with collect_signal() or with
1649 * __exit_signal()->flush_sigqueue().
1651 spin_lock_irqsave(lock, flags);
1652 q->flags &= ~SIGQUEUE_PREALLOC;
1654 * If it is queued it will be freed when dequeued,
1655 * like the "regular" sigqueue.
1657 if (!list_empty(&q->list))
1659 spin_unlock_irqrestore(lock, flags);
1665 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1667 int sig = q->info.si_signo;
1668 struct sigpending *pending;
1669 unsigned long flags;
1672 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1675 if (!likely(lock_task_sighand(t, &flags)))
1678 ret = 1; /* the signal is ignored */
1679 result = TRACE_SIGNAL_IGNORED;
1680 if (!prepare_signal(sig, t, false))
1684 if (unlikely(!list_empty(&q->list))) {
1686 * If an SI_TIMER entry is already queue just increment
1687 * the overrun count.
1689 BUG_ON(q->info.si_code != SI_TIMER);
1690 q->info.si_overrun++;
1691 result = TRACE_SIGNAL_ALREADY_PENDING;
1694 q->info.si_overrun = 0;
1696 signalfd_notify(t, sig);
1697 pending = group ? &t->signal->shared_pending : &t->pending;
1698 list_add_tail(&q->list, &pending->list);
1699 sigaddset(&pending->signal, sig);
1700 complete_signal(sig, t, group);
1701 result = TRACE_SIGNAL_DELIVERED;
1703 trace_signal_generate(sig, &q->info, t, group, result);
1704 unlock_task_sighand(t, &flags);
1710 * Let a parent know about the death of a child.
1711 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1713 * Returns true if our parent ignored us and so we've switched to
1716 bool do_notify_parent(struct task_struct *tsk, int sig)
1718 struct siginfo info;
1719 unsigned long flags;
1720 struct sighand_struct *psig;
1721 bool autoreap = false;
1726 /* do_notify_parent_cldstop should have been called instead. */
1727 BUG_ON(task_is_stopped_or_traced(tsk));
1729 BUG_ON(!tsk->ptrace &&
1730 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1732 if (sig != SIGCHLD) {
1734 * This is only possible if parent == real_parent.
1735 * Check if it has changed security domain.
1737 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1741 clear_siginfo(&info);
1742 info.si_signo = sig;
1745 * We are under tasklist_lock here so our parent is tied to
1746 * us and cannot change.
1748 * task_active_pid_ns will always return the same pid namespace
1749 * until a task passes through release_task.
1751 * write_lock() currently calls preempt_disable() which is the
1752 * same as rcu_read_lock(), but according to Oleg, this is not
1753 * correct to rely on this
1756 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1757 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1761 task_cputime(tsk, &utime, &stime);
1762 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1763 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1765 info.si_status = tsk->exit_code & 0x7f;
1766 if (tsk->exit_code & 0x80)
1767 info.si_code = CLD_DUMPED;
1768 else if (tsk->exit_code & 0x7f)
1769 info.si_code = CLD_KILLED;
1771 info.si_code = CLD_EXITED;
1772 info.si_status = tsk->exit_code >> 8;
1775 psig = tsk->parent->sighand;
1776 spin_lock_irqsave(&psig->siglock, flags);
1777 if (!tsk->ptrace && sig == SIGCHLD &&
1778 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1779 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1781 * We are exiting and our parent doesn't care. POSIX.1
1782 * defines special semantics for setting SIGCHLD to SIG_IGN
1783 * or setting the SA_NOCLDWAIT flag: we should be reaped
1784 * automatically and not left for our parent's wait4 call.
1785 * Rather than having the parent do it as a magic kind of
1786 * signal handler, we just set this to tell do_exit that we
1787 * can be cleaned up without becoming a zombie. Note that
1788 * we still call __wake_up_parent in this case, because a
1789 * blocked sys_wait4 might now return -ECHILD.
1791 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1792 * is implementation-defined: we do (if you don't want
1793 * it, just use SIG_IGN instead).
1796 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1799 if (valid_signal(sig) && sig)
1800 __group_send_sig_info(sig, &info, tsk->parent);
1801 __wake_up_parent(tsk, tsk->parent);
1802 spin_unlock_irqrestore(&psig->siglock, flags);
1808 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1809 * @tsk: task reporting the state change
1810 * @for_ptracer: the notification is for ptracer
1811 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1813 * Notify @tsk's parent that the stopped/continued state has changed. If
1814 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1815 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1818 * Must be called with tasklist_lock at least read locked.
1820 static void do_notify_parent_cldstop(struct task_struct *tsk,
1821 bool for_ptracer, int why)
1823 struct siginfo info;
1824 unsigned long flags;
1825 struct task_struct *parent;
1826 struct sighand_struct *sighand;
1830 parent = tsk->parent;
1832 tsk = tsk->group_leader;
1833 parent = tsk->real_parent;
1836 clear_siginfo(&info);
1837 info.si_signo = SIGCHLD;
1840 * see comment in do_notify_parent() about the following 4 lines
1843 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1844 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1847 task_cputime(tsk, &utime, &stime);
1848 info.si_utime = nsec_to_clock_t(utime);
1849 info.si_stime = nsec_to_clock_t(stime);
1854 info.si_status = SIGCONT;
1857 info.si_status = tsk->signal->group_exit_code & 0x7f;
1860 info.si_status = tsk->exit_code & 0x7f;
1866 sighand = parent->sighand;
1867 spin_lock_irqsave(&sighand->siglock, flags);
1868 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1869 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1870 __group_send_sig_info(SIGCHLD, &info, parent);
1872 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1874 __wake_up_parent(tsk, parent);
1875 spin_unlock_irqrestore(&sighand->siglock, flags);
1878 static inline int may_ptrace_stop(void)
1880 if (!likely(current->ptrace))
1883 * Are we in the middle of do_coredump?
1884 * If so and our tracer is also part of the coredump stopping
1885 * is a deadlock situation, and pointless because our tracer
1886 * is dead so don't allow us to stop.
1887 * If SIGKILL was already sent before the caller unlocked
1888 * ->siglock we must see ->core_state != NULL. Otherwise it
1889 * is safe to enter schedule().
1891 * This is almost outdated, a task with the pending SIGKILL can't
1892 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1893 * after SIGKILL was already dequeued.
1895 if (unlikely(current->mm->core_state) &&
1896 unlikely(current->mm == current->parent->mm))
1903 * Return non-zero if there is a SIGKILL that should be waking us up.
1904 * Called with the siglock held.
1906 static int sigkill_pending(struct task_struct *tsk)
1908 return sigismember(&tsk->pending.signal, SIGKILL) ||
1909 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1913 * This must be called with current->sighand->siglock held.
1915 * This should be the path for all ptrace stops.
1916 * We always set current->last_siginfo while stopped here.
1917 * That makes it a way to test a stopped process for
1918 * being ptrace-stopped vs being job-control-stopped.
1920 * If we actually decide not to stop at all because the tracer
1921 * is gone, we keep current->exit_code unless clear_code.
1923 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1924 __releases(¤t->sighand->siglock)
1925 __acquires(¤t->sighand->siglock)
1927 bool gstop_done = false;
1929 if (arch_ptrace_stop_needed(exit_code, info)) {
1931 * The arch code has something special to do before a
1932 * ptrace stop. This is allowed to block, e.g. for faults
1933 * on user stack pages. We can't keep the siglock while
1934 * calling arch_ptrace_stop, so we must release it now.
1935 * To preserve proper semantics, we must do this before
1936 * any signal bookkeeping like checking group_stop_count.
1937 * Meanwhile, a SIGKILL could come in before we retake the
1938 * siglock. That must prevent us from sleeping in TASK_TRACED.
1939 * So after regaining the lock, we must check for SIGKILL.
1941 spin_unlock_irq(¤t->sighand->siglock);
1942 arch_ptrace_stop(exit_code, info);
1943 spin_lock_irq(¤t->sighand->siglock);
1944 if (sigkill_pending(current))
1948 set_special_state(TASK_TRACED);
1951 * We're committing to trapping. TRACED should be visible before
1952 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1953 * Also, transition to TRACED and updates to ->jobctl should be
1954 * atomic with respect to siglock and should be done after the arch
1955 * hook as siglock is released and regrabbed across it.
1960 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1962 * set_current_state() smp_wmb();
1964 * wait_task_stopped()
1965 * task_stopped_code()
1966 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1970 current->last_siginfo = info;
1971 current->exit_code = exit_code;
1974 * If @why is CLD_STOPPED, we're trapping to participate in a group
1975 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1976 * across siglock relocks since INTERRUPT was scheduled, PENDING
1977 * could be clear now. We act as if SIGCONT is received after
1978 * TASK_TRACED is entered - ignore it.
1980 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1981 gstop_done = task_participate_group_stop(current);
1983 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1984 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1985 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1986 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1988 /* entering a trap, clear TRAPPING */
1989 task_clear_jobctl_trapping(current);
1991 spin_unlock_irq(¤t->sighand->siglock);
1992 read_lock(&tasklist_lock);
1993 if (may_ptrace_stop()) {
1995 * Notify parents of the stop.
1997 * While ptraced, there are two parents - the ptracer and
1998 * the real_parent of the group_leader. The ptracer should
1999 * know about every stop while the real parent is only
2000 * interested in the completion of group stop. The states
2001 * for the two don't interact with each other. Notify
2002 * separately unless they're gonna be duplicates.
2004 do_notify_parent_cldstop(current, true, why);
2005 if (gstop_done && ptrace_reparented(current))
2006 do_notify_parent_cldstop(current, false, why);
2009 * Don't want to allow preemption here, because
2010 * sys_ptrace() needs this task to be inactive.
2012 * XXX: implement read_unlock_no_resched().
2015 read_unlock(&tasklist_lock);
2016 preempt_enable_no_resched();
2017 freezable_schedule();
2020 * By the time we got the lock, our tracer went away.
2021 * Don't drop the lock yet, another tracer may come.
2023 * If @gstop_done, the ptracer went away between group stop
2024 * completion and here. During detach, it would have set
2025 * JOBCTL_STOP_PENDING on us and we'll re-enter
2026 * TASK_STOPPED in do_signal_stop() on return, so notifying
2027 * the real parent of the group stop completion is enough.
2030 do_notify_parent_cldstop(current, false, why);
2032 /* tasklist protects us from ptrace_freeze_traced() */
2033 __set_current_state(TASK_RUNNING);
2035 current->exit_code = 0;
2036 read_unlock(&tasklist_lock);
2040 * We are back. Now reacquire the siglock before touching
2041 * last_siginfo, so that we are sure to have synchronized with
2042 * any signal-sending on another CPU that wants to examine it.
2044 spin_lock_irq(¤t->sighand->siglock);
2045 current->last_siginfo = NULL;
2047 /* LISTENING can be set only during STOP traps, clear it */
2048 current->jobctl &= ~JOBCTL_LISTENING;
2051 * Queued signals ignored us while we were stopped for tracing.
2052 * So check for any that we should take before resuming user mode.
2053 * This sets TIF_SIGPENDING, but never clears it.
2055 recalc_sigpending_tsk(current);
2058 static void ptrace_do_notify(int signr, int exit_code, int why)
2062 clear_siginfo(&info);
2063 info.si_signo = signr;
2064 info.si_code = exit_code;
2065 info.si_pid = task_pid_vnr(current);
2066 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2068 /* Let the debugger run. */
2069 ptrace_stop(exit_code, why, 1, &info);
2072 void ptrace_notify(int exit_code)
2074 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2075 if (unlikely(current->task_works))
2078 spin_lock_irq(¤t->sighand->siglock);
2079 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2080 spin_unlock_irq(¤t->sighand->siglock);
2084 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2085 * @signr: signr causing group stop if initiating
2087 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2088 * and participate in it. If already set, participate in the existing
2089 * group stop. If participated in a group stop (and thus slept), %true is
2090 * returned with siglock released.
2092 * If ptraced, this function doesn't handle stop itself. Instead,
2093 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2094 * untouched. The caller must ensure that INTERRUPT trap handling takes
2095 * places afterwards.
2098 * Must be called with @current->sighand->siglock held, which is released
2102 * %false if group stop is already cancelled or ptrace trap is scheduled.
2103 * %true if participated in group stop.
2105 static bool do_signal_stop(int signr)
2106 __releases(¤t->sighand->siglock)
2108 struct signal_struct *sig = current->signal;
2110 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2111 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2112 struct task_struct *t;
2114 /* signr will be recorded in task->jobctl for retries */
2115 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2117 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2118 unlikely(signal_group_exit(sig)))
2121 * There is no group stop already in progress. We must
2124 * While ptraced, a task may be resumed while group stop is
2125 * still in effect and then receive a stop signal and
2126 * initiate another group stop. This deviates from the
2127 * usual behavior as two consecutive stop signals can't
2128 * cause two group stops when !ptraced. That is why we
2129 * also check !task_is_stopped(t) below.
2131 * The condition can be distinguished by testing whether
2132 * SIGNAL_STOP_STOPPED is already set. Don't generate
2133 * group_exit_code in such case.
2135 * This is not necessary for SIGNAL_STOP_CONTINUED because
2136 * an intervening stop signal is required to cause two
2137 * continued events regardless of ptrace.
2139 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2140 sig->group_exit_code = signr;
2142 sig->group_stop_count = 0;
2144 if (task_set_jobctl_pending(current, signr | gstop))
2145 sig->group_stop_count++;
2148 while_each_thread(current, t) {
2150 * Setting state to TASK_STOPPED for a group
2151 * stop is always done with the siglock held,
2152 * so this check has no races.
2154 if (!task_is_stopped(t) &&
2155 task_set_jobctl_pending(t, signr | gstop)) {
2156 sig->group_stop_count++;
2157 if (likely(!(t->ptrace & PT_SEIZED)))
2158 signal_wake_up(t, 0);
2160 ptrace_trap_notify(t);
2165 if (likely(!current->ptrace)) {
2169 * If there are no other threads in the group, or if there
2170 * is a group stop in progress and we are the last to stop,
2171 * report to the parent.
2173 if (task_participate_group_stop(current))
2174 notify = CLD_STOPPED;
2176 set_special_state(TASK_STOPPED);
2177 spin_unlock_irq(¤t->sighand->siglock);
2180 * Notify the parent of the group stop completion. Because
2181 * we're not holding either the siglock or tasklist_lock
2182 * here, ptracer may attach inbetween; however, this is for
2183 * group stop and should always be delivered to the real
2184 * parent of the group leader. The new ptracer will get
2185 * its notification when this task transitions into
2189 read_lock(&tasklist_lock);
2190 do_notify_parent_cldstop(current, false, notify);
2191 read_unlock(&tasklist_lock);
2194 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2195 freezable_schedule();
2199 * While ptraced, group stop is handled by STOP trap.
2200 * Schedule it and let the caller deal with it.
2202 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2208 * do_jobctl_trap - take care of ptrace jobctl traps
2210 * When PT_SEIZED, it's used for both group stop and explicit
2211 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2212 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2213 * the stop signal; otherwise, %SIGTRAP.
2215 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2216 * number as exit_code and no siginfo.
2219 * Must be called with @current->sighand->siglock held, which may be
2220 * released and re-acquired before returning with intervening sleep.
2222 static void do_jobctl_trap(void)
2224 struct signal_struct *signal = current->signal;
2225 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2227 if (current->ptrace & PT_SEIZED) {
2228 if (!signal->group_stop_count &&
2229 !(signal->flags & SIGNAL_STOP_STOPPED))
2231 WARN_ON_ONCE(!signr);
2232 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2235 WARN_ON_ONCE(!signr);
2236 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2237 current->exit_code = 0;
2241 static int ptrace_signal(int signr, siginfo_t *info)
2244 * We do not check sig_kernel_stop(signr) but set this marker
2245 * unconditionally because we do not know whether debugger will
2246 * change signr. This flag has no meaning unless we are going
2247 * to stop after return from ptrace_stop(). In this case it will
2248 * be checked in do_signal_stop(), we should only stop if it was
2249 * not cleared by SIGCONT while we were sleeping. See also the
2250 * comment in dequeue_signal().
2252 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2253 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2255 /* We're back. Did the debugger cancel the sig? */
2256 signr = current->exit_code;
2260 current->exit_code = 0;
2263 * Update the siginfo structure if the signal has
2264 * changed. If the debugger wanted something
2265 * specific in the siginfo structure then it should
2266 * have updated *info via PTRACE_SETSIGINFO.
2268 if (signr != info->si_signo) {
2269 clear_siginfo(info);
2270 info->si_signo = signr;
2272 info->si_code = SI_USER;
2274 info->si_pid = task_pid_vnr(current->parent);
2275 info->si_uid = from_kuid_munged(current_user_ns(),
2276 task_uid(current->parent));
2280 /* If the (new) signal is now blocked, requeue it. */
2281 if (sigismember(¤t->blocked, signr)) {
2282 specific_send_sig_info(signr, info, current);
2289 int get_signal(struct ksignal *ksig)
2291 struct sighand_struct *sighand = current->sighand;
2292 struct signal_struct *signal = current->signal;
2295 if (unlikely(current->task_works))
2298 if (unlikely(uprobe_deny_signal()))
2302 * Do this once, we can't return to user-mode if freezing() == T.
2303 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2304 * thus do not need another check after return.
2309 spin_lock_irq(&sighand->siglock);
2311 * Every stopped thread goes here after wakeup. Check to see if
2312 * we should notify the parent, prepare_signal(SIGCONT) encodes
2313 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2315 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2318 if (signal->flags & SIGNAL_CLD_CONTINUED)
2319 why = CLD_CONTINUED;
2323 signal->flags &= ~SIGNAL_CLD_MASK;
2325 spin_unlock_irq(&sighand->siglock);
2328 * Notify the parent that we're continuing. This event is
2329 * always per-process and doesn't make whole lot of sense
2330 * for ptracers, who shouldn't consume the state via
2331 * wait(2) either, but, for backward compatibility, notify
2332 * the ptracer of the group leader too unless it's gonna be
2335 read_lock(&tasklist_lock);
2336 do_notify_parent_cldstop(current, false, why);
2338 if (ptrace_reparented(current->group_leader))
2339 do_notify_parent_cldstop(current->group_leader,
2341 read_unlock(&tasklist_lock);
2347 struct k_sigaction *ka;
2349 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2353 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2355 spin_unlock_irq(&sighand->siglock);
2359 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2362 break; /* will return 0 */
2364 if (unlikely(current->ptrace) && signr != SIGKILL) {
2365 signr = ptrace_signal(signr, &ksig->info);
2370 ka = &sighand->action[signr-1];
2372 /* Trace actually delivered signals. */
2373 trace_signal_deliver(signr, &ksig->info, ka);
2375 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2377 if (ka->sa.sa_handler != SIG_DFL) {
2378 /* Run the handler. */
2381 if (ka->sa.sa_flags & SA_ONESHOT)
2382 ka->sa.sa_handler = SIG_DFL;
2384 break; /* will return non-zero "signr" value */
2388 * Now we are doing the default action for this signal.
2390 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2394 * Global init gets no signals it doesn't want.
2395 * Container-init gets no signals it doesn't want from same
2398 * Note that if global/container-init sees a sig_kernel_only()
2399 * signal here, the signal must have been generated internally
2400 * or must have come from an ancestor namespace. In either
2401 * case, the signal cannot be dropped.
2403 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2404 !sig_kernel_only(signr))
2407 if (sig_kernel_stop(signr)) {
2409 * The default action is to stop all threads in
2410 * the thread group. The job control signals
2411 * do nothing in an orphaned pgrp, but SIGSTOP
2412 * always works. Note that siglock needs to be
2413 * dropped during the call to is_orphaned_pgrp()
2414 * because of lock ordering with tasklist_lock.
2415 * This allows an intervening SIGCONT to be posted.
2416 * We need to check for that and bail out if necessary.
2418 if (signr != SIGSTOP) {
2419 spin_unlock_irq(&sighand->siglock);
2421 /* signals can be posted during this window */
2423 if (is_current_pgrp_orphaned())
2426 spin_lock_irq(&sighand->siglock);
2429 if (likely(do_signal_stop(ksig->info.si_signo))) {
2430 /* It released the siglock. */
2435 * We didn't actually stop, due to a race
2436 * with SIGCONT or something like that.
2441 spin_unlock_irq(&sighand->siglock);
2444 * Anything else is fatal, maybe with a core dump.
2446 current->flags |= PF_SIGNALED;
2448 if (sig_kernel_coredump(signr)) {
2449 if (print_fatal_signals)
2450 print_fatal_signal(ksig->info.si_signo);
2451 proc_coredump_connector(current);
2453 * If it was able to dump core, this kills all
2454 * other threads in the group and synchronizes with
2455 * their demise. If we lost the race with another
2456 * thread getting here, it set group_exit_code
2457 * first and our do_group_exit call below will use
2458 * that value and ignore the one we pass it.
2460 do_coredump(&ksig->info);
2464 * Death signals, no core dump.
2466 do_group_exit(ksig->info.si_signo);
2469 spin_unlock_irq(&sighand->siglock);
2472 return ksig->sig > 0;
2476 * signal_delivered -
2477 * @ksig: kernel signal struct
2478 * @stepping: nonzero if debugger single-step or block-step in use
2480 * This function should be called when a signal has successfully been
2481 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2482 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2483 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2485 static void signal_delivered(struct ksignal *ksig, int stepping)
2489 /* A signal was successfully delivered, and the
2490 saved sigmask was stored on the signal frame,
2491 and will be restored by sigreturn. So we can
2492 simply clear the restore sigmask flag. */
2493 clear_restore_sigmask();
2495 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2496 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2497 sigaddset(&blocked, ksig->sig);
2498 set_current_blocked(&blocked);
2499 tracehook_signal_handler(stepping);
2502 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2505 force_sigsegv(ksig->sig, current);
2507 signal_delivered(ksig, stepping);
2511 * It could be that complete_signal() picked us to notify about the
2512 * group-wide signal. Other threads should be notified now to take
2513 * the shared signals in @which since we will not.
2515 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2518 struct task_struct *t;
2520 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2521 if (sigisemptyset(&retarget))
2525 while_each_thread(tsk, t) {
2526 if (t->flags & PF_EXITING)
2529 if (!has_pending_signals(&retarget, &t->blocked))
2531 /* Remove the signals this thread can handle. */
2532 sigandsets(&retarget, &retarget, &t->blocked);
2534 if (!signal_pending(t))
2535 signal_wake_up(t, 0);
2537 if (sigisemptyset(&retarget))
2542 void exit_signals(struct task_struct *tsk)
2548 * @tsk is about to have PF_EXITING set - lock out users which
2549 * expect stable threadgroup.
2551 cgroup_threadgroup_change_begin(tsk);
2553 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2554 tsk->flags |= PF_EXITING;
2555 cgroup_threadgroup_change_end(tsk);
2559 spin_lock_irq(&tsk->sighand->siglock);
2561 * From now this task is not visible for group-wide signals,
2562 * see wants_signal(), do_signal_stop().
2564 tsk->flags |= PF_EXITING;
2566 cgroup_threadgroup_change_end(tsk);
2568 if (!signal_pending(tsk))
2571 unblocked = tsk->blocked;
2572 signotset(&unblocked);
2573 retarget_shared_pending(tsk, &unblocked);
2575 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2576 task_participate_group_stop(tsk))
2577 group_stop = CLD_STOPPED;
2579 spin_unlock_irq(&tsk->sighand->siglock);
2582 * If group stop has completed, deliver the notification. This
2583 * should always go to the real parent of the group leader.
2585 if (unlikely(group_stop)) {
2586 read_lock(&tasklist_lock);
2587 do_notify_parent_cldstop(tsk, false, group_stop);
2588 read_unlock(&tasklist_lock);
2592 EXPORT_SYMBOL(recalc_sigpending);
2593 EXPORT_SYMBOL_GPL(dequeue_signal);
2594 EXPORT_SYMBOL(flush_signals);
2595 EXPORT_SYMBOL(force_sig);
2596 EXPORT_SYMBOL(send_sig);
2597 EXPORT_SYMBOL(send_sig_info);
2598 EXPORT_SYMBOL(sigprocmask);
2601 * System call entry points.
2605 * sys_restart_syscall - restart a system call
2607 SYSCALL_DEFINE0(restart_syscall)
2609 struct restart_block *restart = ¤t->restart_block;
2610 return restart->fn(restart);
2613 long do_no_restart_syscall(struct restart_block *param)
2618 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2620 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2621 sigset_t newblocked;
2622 /* A set of now blocked but previously unblocked signals. */
2623 sigandnsets(&newblocked, newset, ¤t->blocked);
2624 retarget_shared_pending(tsk, &newblocked);
2626 tsk->blocked = *newset;
2627 recalc_sigpending();
2631 * set_current_blocked - change current->blocked mask
2634 * It is wrong to change ->blocked directly, this helper should be used
2635 * to ensure the process can't miss a shared signal we are going to block.
2637 void set_current_blocked(sigset_t *newset)
2639 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2640 __set_current_blocked(newset);
2643 void __set_current_blocked(const sigset_t *newset)
2645 struct task_struct *tsk = current;
2648 * In case the signal mask hasn't changed, there is nothing we need
2649 * to do. The current->blocked shouldn't be modified by other task.
2651 if (sigequalsets(&tsk->blocked, newset))
2654 spin_lock_irq(&tsk->sighand->siglock);
2655 __set_task_blocked(tsk, newset);
2656 spin_unlock_irq(&tsk->sighand->siglock);
2660 * This is also useful for kernel threads that want to temporarily
2661 * (or permanently) block certain signals.
2663 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2664 * interface happily blocks "unblockable" signals like SIGKILL
2667 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2669 struct task_struct *tsk = current;
2672 /* Lockless, only current can change ->blocked, never from irq */
2674 *oldset = tsk->blocked;
2678 sigorsets(&newset, &tsk->blocked, set);
2681 sigandnsets(&newset, &tsk->blocked, set);
2690 __set_current_blocked(&newset);
2695 * sys_rt_sigprocmask - change the list of currently blocked signals
2696 * @how: whether to add, remove, or set signals
2697 * @nset: stores pending signals
2698 * @oset: previous value of signal mask if non-null
2699 * @sigsetsize: size of sigset_t type
2701 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2702 sigset_t __user *, oset, size_t, sigsetsize)
2704 sigset_t old_set, new_set;
2707 /* XXX: Don't preclude handling different sized sigset_t's. */
2708 if (sigsetsize != sizeof(sigset_t))
2711 old_set = current->blocked;
2714 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2716 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2718 error = sigprocmask(how, &new_set, NULL);
2724 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2731 #ifdef CONFIG_COMPAT
2732 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2733 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2735 sigset_t old_set = current->blocked;
2737 /* XXX: Don't preclude handling different sized sigset_t's. */
2738 if (sigsetsize != sizeof(sigset_t))
2744 if (get_compat_sigset(&new_set, nset))
2746 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2748 error = sigprocmask(how, &new_set, NULL);
2752 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2756 static int do_sigpending(sigset_t *set)
2758 spin_lock_irq(¤t->sighand->siglock);
2759 sigorsets(set, ¤t->pending.signal,
2760 ¤t->signal->shared_pending.signal);
2761 spin_unlock_irq(¤t->sighand->siglock);
2763 /* Outside the lock because only this thread touches it. */
2764 sigandsets(set, ¤t->blocked, set);
2769 * sys_rt_sigpending - examine a pending signal that has been raised
2771 * @uset: stores pending signals
2772 * @sigsetsize: size of sigset_t type or larger
2774 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2779 if (sigsetsize > sizeof(*uset))
2782 err = do_sigpending(&set);
2783 if (!err && copy_to_user(uset, &set, sigsetsize))
2788 #ifdef CONFIG_COMPAT
2789 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2790 compat_size_t, sigsetsize)
2795 if (sigsetsize > sizeof(*uset))
2798 err = do_sigpending(&set);
2800 err = put_compat_sigset(uset, &set, sigsetsize);
2805 enum siginfo_layout siginfo_layout(int sig, int si_code)
2807 enum siginfo_layout layout = SIL_KILL;
2808 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2809 static const struct {
2810 unsigned char limit, layout;
2812 [SIGILL] = { NSIGILL, SIL_FAULT },
2813 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2814 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2815 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2816 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2817 #if defined(SIGEMT) && defined(NSIGEMT)
2818 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2820 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2821 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2822 [SIGSYS] = { NSIGSYS, SIL_SYS },
2824 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2825 layout = filter[sig].layout;
2826 /* Handle the exceptions */
2827 if ((sig == SIGBUS) &&
2828 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2829 layout = SIL_FAULT_MCEERR;
2830 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2831 layout = SIL_FAULT_BNDERR;
2833 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2834 layout = SIL_FAULT_PKUERR;
2837 else if (si_code <= NSIGPOLL)
2840 if (si_code == SI_TIMER)
2842 else if (si_code == SI_SIGIO)
2844 else if (si_code < 0)
2850 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2852 if (copy_to_user(to, from , sizeof(struct siginfo)))
2857 #ifdef CONFIG_COMPAT
2858 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2859 const struct siginfo *from)
2860 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2862 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2864 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2865 const struct siginfo *from, bool x32_ABI)
2868 struct compat_siginfo new;
2869 memset(&new, 0, sizeof(new));
2871 new.si_signo = from->si_signo;
2872 new.si_errno = from->si_errno;
2873 new.si_code = from->si_code;
2874 switch(siginfo_layout(from->si_signo, from->si_code)) {
2876 new.si_pid = from->si_pid;
2877 new.si_uid = from->si_uid;
2880 new.si_tid = from->si_tid;
2881 new.si_overrun = from->si_overrun;
2882 new.si_int = from->si_int;
2885 new.si_band = from->si_band;
2886 new.si_fd = from->si_fd;
2889 new.si_addr = ptr_to_compat(from->si_addr);
2890 #ifdef __ARCH_SI_TRAPNO
2891 new.si_trapno = from->si_trapno;
2894 case SIL_FAULT_MCEERR:
2895 new.si_addr = ptr_to_compat(from->si_addr);
2896 #ifdef __ARCH_SI_TRAPNO
2897 new.si_trapno = from->si_trapno;
2899 new.si_addr_lsb = from->si_addr_lsb;
2901 case SIL_FAULT_BNDERR:
2902 new.si_addr = ptr_to_compat(from->si_addr);
2903 #ifdef __ARCH_SI_TRAPNO
2904 new.si_trapno = from->si_trapno;
2906 new.si_lower = ptr_to_compat(from->si_lower);
2907 new.si_upper = ptr_to_compat(from->si_upper);
2909 case SIL_FAULT_PKUERR:
2910 new.si_addr = ptr_to_compat(from->si_addr);
2911 #ifdef __ARCH_SI_TRAPNO
2912 new.si_trapno = from->si_trapno;
2914 new.si_pkey = from->si_pkey;
2917 new.si_pid = from->si_pid;
2918 new.si_uid = from->si_uid;
2919 new.si_status = from->si_status;
2920 #ifdef CONFIG_X86_X32_ABI
2922 new._sifields._sigchld_x32._utime = from->si_utime;
2923 new._sifields._sigchld_x32._stime = from->si_stime;
2927 new.si_utime = from->si_utime;
2928 new.si_stime = from->si_stime;
2932 new.si_pid = from->si_pid;
2933 new.si_uid = from->si_uid;
2934 new.si_int = from->si_int;
2937 new.si_call_addr = ptr_to_compat(from->si_call_addr);
2938 new.si_syscall = from->si_syscall;
2939 new.si_arch = from->si_arch;
2943 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
2949 int copy_siginfo_from_user32(struct siginfo *to,
2950 const struct compat_siginfo __user *ufrom)
2952 struct compat_siginfo from;
2954 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
2958 to->si_signo = from.si_signo;
2959 to->si_errno = from.si_errno;
2960 to->si_code = from.si_code;
2961 switch(siginfo_layout(from.si_signo, from.si_code)) {
2963 to->si_pid = from.si_pid;
2964 to->si_uid = from.si_uid;
2967 to->si_tid = from.si_tid;
2968 to->si_overrun = from.si_overrun;
2969 to->si_int = from.si_int;
2972 to->si_band = from.si_band;
2973 to->si_fd = from.si_fd;
2976 to->si_addr = compat_ptr(from.si_addr);
2977 #ifdef __ARCH_SI_TRAPNO
2978 to->si_trapno = from.si_trapno;
2981 case SIL_FAULT_MCEERR:
2982 to->si_addr = compat_ptr(from.si_addr);
2983 #ifdef __ARCH_SI_TRAPNO
2984 to->si_trapno = from.si_trapno;
2986 to->si_addr_lsb = from.si_addr_lsb;
2988 case SIL_FAULT_BNDERR:
2989 to->si_addr = compat_ptr(from.si_addr);
2990 #ifdef __ARCH_SI_TRAPNO
2991 to->si_trapno = from.si_trapno;
2993 to->si_lower = compat_ptr(from.si_lower);
2994 to->si_upper = compat_ptr(from.si_upper);
2996 case SIL_FAULT_PKUERR:
2997 to->si_addr = compat_ptr(from.si_addr);
2998 #ifdef __ARCH_SI_TRAPNO
2999 to->si_trapno = from.si_trapno;
3001 to->si_pkey = from.si_pkey;
3004 to->si_pid = from.si_pid;
3005 to->si_uid = from.si_uid;
3006 to->si_status = from.si_status;
3007 #ifdef CONFIG_X86_X32_ABI
3008 if (in_x32_syscall()) {
3009 to->si_utime = from._sifields._sigchld_x32._utime;
3010 to->si_stime = from._sifields._sigchld_x32._stime;
3014 to->si_utime = from.si_utime;
3015 to->si_stime = from.si_stime;
3019 to->si_pid = from.si_pid;
3020 to->si_uid = from.si_uid;
3021 to->si_int = from.si_int;
3024 to->si_call_addr = compat_ptr(from.si_call_addr);
3025 to->si_syscall = from.si_syscall;
3026 to->si_arch = from.si_arch;
3031 #endif /* CONFIG_COMPAT */
3034 * do_sigtimedwait - wait for queued signals specified in @which
3035 * @which: queued signals to wait for
3036 * @info: if non-null, the signal's siginfo is returned here
3037 * @ts: upper bound on process time suspension
3039 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3040 const struct timespec *ts)
3042 ktime_t *to = NULL, timeout = KTIME_MAX;
3043 struct task_struct *tsk = current;
3044 sigset_t mask = *which;
3048 if (!timespec_valid(ts))
3050 timeout = timespec_to_ktime(*ts);
3055 * Invert the set of allowed signals to get those we want to block.
3057 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3060 spin_lock_irq(&tsk->sighand->siglock);
3061 sig = dequeue_signal(tsk, &mask, info);
3062 if (!sig && timeout) {
3064 * None ready, temporarily unblock those we're interested
3065 * while we are sleeping in so that we'll be awakened when
3066 * they arrive. Unblocking is always fine, we can avoid
3067 * set_current_blocked().
3069 tsk->real_blocked = tsk->blocked;
3070 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3071 recalc_sigpending();
3072 spin_unlock_irq(&tsk->sighand->siglock);
3074 __set_current_state(TASK_INTERRUPTIBLE);
3075 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3077 spin_lock_irq(&tsk->sighand->siglock);
3078 __set_task_blocked(tsk, &tsk->real_blocked);
3079 sigemptyset(&tsk->real_blocked);
3080 sig = dequeue_signal(tsk, &mask, info);
3082 spin_unlock_irq(&tsk->sighand->siglock);
3086 return ret ? -EINTR : -EAGAIN;
3090 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3092 * @uthese: queued signals to wait for
3093 * @uinfo: if non-null, the signal's siginfo is returned here
3094 * @uts: upper bound on process time suspension
3095 * @sigsetsize: size of sigset_t type
3097 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3098 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3106 /* XXX: Don't preclude handling different sized sigset_t's. */
3107 if (sigsetsize != sizeof(sigset_t))
3110 if (copy_from_user(&these, uthese, sizeof(these)))
3114 if (copy_from_user(&ts, uts, sizeof(ts)))
3118 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3120 if (ret > 0 && uinfo) {
3121 if (copy_siginfo_to_user(uinfo, &info))
3128 #ifdef CONFIG_COMPAT
3129 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3130 struct compat_siginfo __user *, uinfo,
3131 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3138 if (sigsetsize != sizeof(sigset_t))
3141 if (get_compat_sigset(&s, uthese))
3145 if (compat_get_timespec(&t, uts))
3149 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3151 if (ret > 0 && uinfo) {
3152 if (copy_siginfo_to_user32(uinfo, &info))
3161 * sys_kill - send a signal to a process
3162 * @pid: the PID of the process
3163 * @sig: signal to be sent
3165 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3167 struct siginfo info;
3169 clear_siginfo(&info);
3170 info.si_signo = sig;
3172 info.si_code = SI_USER;
3173 info.si_pid = task_tgid_vnr(current);
3174 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3176 return kill_something_info(sig, &info, pid);
3180 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3182 struct task_struct *p;
3186 p = find_task_by_vpid(pid);
3187 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3188 error = check_kill_permission(sig, info, p);
3190 * The null signal is a permissions and process existence
3191 * probe. No signal is actually delivered.
3193 if (!error && sig) {
3194 error = do_send_sig_info(sig, info, p, false);
3196 * If lock_task_sighand() failed we pretend the task
3197 * dies after receiving the signal. The window is tiny,
3198 * and the signal is private anyway.
3200 if (unlikely(error == -ESRCH))
3209 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3211 struct siginfo info;
3213 clear_siginfo(&info);
3214 info.si_signo = sig;
3216 info.si_code = SI_TKILL;
3217 info.si_pid = task_tgid_vnr(current);
3218 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3220 return do_send_specific(tgid, pid, sig, &info);
3224 * sys_tgkill - send signal to one specific thread
3225 * @tgid: the thread group ID of the thread
3226 * @pid: the PID of the thread
3227 * @sig: signal to be sent
3229 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3230 * exists but it's not belonging to the target process anymore. This
3231 * method solves the problem of threads exiting and PIDs getting reused.
3233 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3235 /* This is only valid for single tasks */
3236 if (pid <= 0 || tgid <= 0)
3239 return do_tkill(tgid, pid, sig);
3243 * sys_tkill - send signal to one specific task
3244 * @pid: the PID of the task
3245 * @sig: signal to be sent
3247 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3249 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3251 /* This is only valid for single tasks */
3255 return do_tkill(0, pid, sig);
3258 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3260 /* Not even root can pretend to send signals from the kernel.
3261 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3263 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3264 (task_pid_vnr(current) != pid))
3267 info->si_signo = sig;
3269 /* POSIX.1b doesn't mention process groups. */
3270 return kill_proc_info(sig, info, pid);
3274 * sys_rt_sigqueueinfo - send signal information to a signal
3275 * @pid: the PID of the thread
3276 * @sig: signal to be sent
3277 * @uinfo: signal info to be sent
3279 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3280 siginfo_t __user *, uinfo)
3283 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3285 return do_rt_sigqueueinfo(pid, sig, &info);
3288 #ifdef CONFIG_COMPAT
3289 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3292 struct compat_siginfo __user *, uinfo)
3295 int ret = copy_siginfo_from_user32(&info, uinfo);
3298 return do_rt_sigqueueinfo(pid, sig, &info);
3302 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3304 /* This is only valid for single tasks */
3305 if (pid <= 0 || tgid <= 0)
3308 /* Not even root can pretend to send signals from the kernel.
3309 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3311 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3312 (task_pid_vnr(current) != pid))
3315 info->si_signo = sig;
3317 return do_send_specific(tgid, pid, sig, info);
3320 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3321 siginfo_t __user *, uinfo)
3325 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3328 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3331 #ifdef CONFIG_COMPAT
3332 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3336 struct compat_siginfo __user *, uinfo)
3340 if (copy_siginfo_from_user32(&info, uinfo))
3342 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3347 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3349 void kernel_sigaction(int sig, __sighandler_t action)
3351 spin_lock_irq(¤t->sighand->siglock);
3352 current->sighand->action[sig - 1].sa.sa_handler = action;
3353 if (action == SIG_IGN) {
3357 sigaddset(&mask, sig);
3359 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3360 flush_sigqueue_mask(&mask, ¤t->pending);
3361 recalc_sigpending();
3363 spin_unlock_irq(¤t->sighand->siglock);
3365 EXPORT_SYMBOL(kernel_sigaction);
3367 void __weak sigaction_compat_abi(struct k_sigaction *act,
3368 struct k_sigaction *oact)
3372 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3374 struct task_struct *p = current, *t;
3375 struct k_sigaction *k;
3378 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3381 k = &p->sighand->action[sig-1];
3383 spin_lock_irq(&p->sighand->siglock);
3387 sigaction_compat_abi(act, oact);
3390 sigdelsetmask(&act->sa.sa_mask,
3391 sigmask(SIGKILL) | sigmask(SIGSTOP));
3395 * "Setting a signal action to SIG_IGN for a signal that is
3396 * pending shall cause the pending signal to be discarded,
3397 * whether or not it is blocked."
3399 * "Setting a signal action to SIG_DFL for a signal that is
3400 * pending and whose default action is to ignore the signal
3401 * (for example, SIGCHLD), shall cause the pending signal to
3402 * be discarded, whether or not it is blocked"
3404 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3406 sigaddset(&mask, sig);
3407 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3408 for_each_thread(p, t)
3409 flush_sigqueue_mask(&mask, &t->pending);
3413 spin_unlock_irq(&p->sighand->siglock);
3418 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3420 struct task_struct *t = current;
3423 memset(oss, 0, sizeof(stack_t));
3424 oss->ss_sp = (void __user *) t->sas_ss_sp;
3425 oss->ss_size = t->sas_ss_size;
3426 oss->ss_flags = sas_ss_flags(sp) |
3427 (current->sas_ss_flags & SS_FLAG_BITS);
3431 void __user *ss_sp = ss->ss_sp;
3432 size_t ss_size = ss->ss_size;
3433 unsigned ss_flags = ss->ss_flags;
3436 if (unlikely(on_sig_stack(sp)))
3439 ss_mode = ss_flags & ~SS_FLAG_BITS;
3440 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3444 if (ss_mode == SS_DISABLE) {
3448 if (unlikely(ss_size < MINSIGSTKSZ))
3452 t->sas_ss_sp = (unsigned long) ss_sp;
3453 t->sas_ss_size = ss_size;
3454 t->sas_ss_flags = ss_flags;
3459 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3463 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3465 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3466 current_user_stack_pointer());
3467 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3472 int restore_altstack(const stack_t __user *uss)
3475 if (copy_from_user(&new, uss, sizeof(stack_t)))
3477 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3478 /* squash all but EFAULT for now */
3482 int __save_altstack(stack_t __user *uss, unsigned long sp)
3484 struct task_struct *t = current;
3485 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3486 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3487 __put_user(t->sas_ss_size, &uss->ss_size);
3490 if (t->sas_ss_flags & SS_AUTODISARM)
3495 #ifdef CONFIG_COMPAT
3496 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3497 compat_stack_t __user *uoss_ptr)
3503 compat_stack_t uss32;
3504 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3506 uss.ss_sp = compat_ptr(uss32.ss_sp);
3507 uss.ss_flags = uss32.ss_flags;
3508 uss.ss_size = uss32.ss_size;
3510 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3511 compat_user_stack_pointer());
3512 if (ret >= 0 && uoss_ptr) {
3514 memset(&old, 0, sizeof(old));
3515 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3516 old.ss_flags = uoss.ss_flags;
3517 old.ss_size = uoss.ss_size;
3518 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3524 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3525 const compat_stack_t __user *, uss_ptr,
3526 compat_stack_t __user *, uoss_ptr)
3528 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3531 int compat_restore_altstack(const compat_stack_t __user *uss)
3533 int err = do_compat_sigaltstack(uss, NULL);
3534 /* squash all but -EFAULT for now */
3535 return err == -EFAULT ? err : 0;
3538 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3541 struct task_struct *t = current;
3542 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3544 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3545 __put_user(t->sas_ss_size, &uss->ss_size);
3548 if (t->sas_ss_flags & SS_AUTODISARM)
3554 #ifdef __ARCH_WANT_SYS_SIGPENDING
3557 * sys_sigpending - examine pending signals
3558 * @uset: where mask of pending signal is returned
3560 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3565 if (sizeof(old_sigset_t) > sizeof(*uset))
3568 err = do_sigpending(&set);
3569 if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t)))
3574 #ifdef CONFIG_COMPAT
3575 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3578 int err = do_sigpending(&set);
3580 err = put_user(set.sig[0], set32);
3587 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3589 * sys_sigprocmask - examine and change blocked signals
3590 * @how: whether to add, remove, or set signals
3591 * @nset: signals to add or remove (if non-null)
3592 * @oset: previous value of signal mask if non-null
3594 * Some platforms have their own version with special arguments;
3595 * others support only sys_rt_sigprocmask.
3598 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3599 old_sigset_t __user *, oset)
3601 old_sigset_t old_set, new_set;
3602 sigset_t new_blocked;
3604 old_set = current->blocked.sig[0];
3607 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3610 new_blocked = current->blocked;
3614 sigaddsetmask(&new_blocked, new_set);
3617 sigdelsetmask(&new_blocked, new_set);
3620 new_blocked.sig[0] = new_set;
3626 set_current_blocked(&new_blocked);
3630 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3636 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3638 #ifndef CONFIG_ODD_RT_SIGACTION
3640 * sys_rt_sigaction - alter an action taken by a process
3641 * @sig: signal to be sent
3642 * @act: new sigaction
3643 * @oact: used to save the previous sigaction
3644 * @sigsetsize: size of sigset_t type
3646 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3647 const struct sigaction __user *, act,
3648 struct sigaction __user *, oact,
3651 struct k_sigaction new_sa, old_sa;
3654 /* XXX: Don't preclude handling different sized sigset_t's. */
3655 if (sigsetsize != sizeof(sigset_t))
3659 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3663 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3666 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3672 #ifdef CONFIG_COMPAT
3673 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3674 const struct compat_sigaction __user *, act,
3675 struct compat_sigaction __user *, oact,
3676 compat_size_t, sigsetsize)
3678 struct k_sigaction new_ka, old_ka;
3679 #ifdef __ARCH_HAS_SA_RESTORER
3680 compat_uptr_t restorer;
3684 /* XXX: Don't preclude handling different sized sigset_t's. */
3685 if (sigsetsize != sizeof(compat_sigset_t))
3689 compat_uptr_t handler;
3690 ret = get_user(handler, &act->sa_handler);
3691 new_ka.sa.sa_handler = compat_ptr(handler);
3692 #ifdef __ARCH_HAS_SA_RESTORER
3693 ret |= get_user(restorer, &act->sa_restorer);
3694 new_ka.sa.sa_restorer = compat_ptr(restorer);
3696 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3697 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3702 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3704 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3706 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3707 sizeof(oact->sa_mask));
3708 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3709 #ifdef __ARCH_HAS_SA_RESTORER
3710 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3711 &oact->sa_restorer);
3717 #endif /* !CONFIG_ODD_RT_SIGACTION */
3719 #ifdef CONFIG_OLD_SIGACTION
3720 SYSCALL_DEFINE3(sigaction, int, sig,
3721 const struct old_sigaction __user *, act,
3722 struct old_sigaction __user *, oact)
3724 struct k_sigaction new_ka, old_ka;
3729 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3730 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3731 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3732 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3733 __get_user(mask, &act->sa_mask))
3735 #ifdef __ARCH_HAS_KA_RESTORER
3736 new_ka.ka_restorer = NULL;
3738 siginitset(&new_ka.sa.sa_mask, mask);
3741 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3744 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3745 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3746 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3747 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3748 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3755 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3756 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3757 const struct compat_old_sigaction __user *, act,
3758 struct compat_old_sigaction __user *, oact)
3760 struct k_sigaction new_ka, old_ka;
3762 compat_old_sigset_t mask;
3763 compat_uptr_t handler, restorer;
3766 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3767 __get_user(handler, &act->sa_handler) ||
3768 __get_user(restorer, &act->sa_restorer) ||
3769 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3770 __get_user(mask, &act->sa_mask))
3773 #ifdef __ARCH_HAS_KA_RESTORER
3774 new_ka.ka_restorer = NULL;
3776 new_ka.sa.sa_handler = compat_ptr(handler);
3777 new_ka.sa.sa_restorer = compat_ptr(restorer);
3778 siginitset(&new_ka.sa.sa_mask, mask);
3781 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3784 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3785 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3786 &oact->sa_handler) ||
3787 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3788 &oact->sa_restorer) ||
3789 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3790 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3797 #ifdef CONFIG_SGETMASK_SYSCALL
3800 * For backwards compatibility. Functionality superseded by sigprocmask.
3802 SYSCALL_DEFINE0(sgetmask)
3805 return current->blocked.sig[0];
3808 SYSCALL_DEFINE1(ssetmask, int, newmask)
3810 int old = current->blocked.sig[0];
3813 siginitset(&newset, newmask);
3814 set_current_blocked(&newset);
3818 #endif /* CONFIG_SGETMASK_SYSCALL */
3820 #ifdef __ARCH_WANT_SYS_SIGNAL
3822 * For backwards compatibility. Functionality superseded by sigaction.
3824 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3826 struct k_sigaction new_sa, old_sa;
3829 new_sa.sa.sa_handler = handler;
3830 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3831 sigemptyset(&new_sa.sa.sa_mask);
3833 ret = do_sigaction(sig, &new_sa, &old_sa);
3835 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3837 #endif /* __ARCH_WANT_SYS_SIGNAL */
3839 #ifdef __ARCH_WANT_SYS_PAUSE
3841 SYSCALL_DEFINE0(pause)
3843 while (!signal_pending(current)) {
3844 __set_current_state(TASK_INTERRUPTIBLE);
3847 return -ERESTARTNOHAND;
3852 static int sigsuspend(sigset_t *set)
3854 current->saved_sigmask = current->blocked;
3855 set_current_blocked(set);
3857 while (!signal_pending(current)) {
3858 __set_current_state(TASK_INTERRUPTIBLE);
3861 set_restore_sigmask();
3862 return -ERESTARTNOHAND;
3866 * sys_rt_sigsuspend - replace the signal mask for a value with the
3867 * @unewset value until a signal is received
3868 * @unewset: new signal mask value
3869 * @sigsetsize: size of sigset_t type
3871 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3875 /* XXX: Don't preclude handling different sized sigset_t's. */
3876 if (sigsetsize != sizeof(sigset_t))
3879 if (copy_from_user(&newset, unewset, sizeof(newset)))
3881 return sigsuspend(&newset);
3884 #ifdef CONFIG_COMPAT
3885 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3889 /* XXX: Don't preclude handling different sized sigset_t's. */
3890 if (sigsetsize != sizeof(sigset_t))
3893 if (get_compat_sigset(&newset, unewset))
3895 return sigsuspend(&newset);
3899 #ifdef CONFIG_OLD_SIGSUSPEND
3900 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3903 siginitset(&blocked, mask);
3904 return sigsuspend(&blocked);
3907 #ifdef CONFIG_OLD_SIGSUSPEND3
3908 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3911 siginitset(&blocked, mask);
3912 return sigsuspend(&blocked);
3916 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3921 void __init signals_init(void)
3923 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3924 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3925 != offsetof(struct siginfo, _sifields._pad));
3926 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
3928 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3931 #ifdef CONFIG_KGDB_KDB
3932 #include <linux/kdb.h>
3934 * kdb_send_sig - Allows kdb to send signals without exposing
3935 * signal internals. This function checks if the required locks are
3936 * available before calling the main signal code, to avoid kdb
3939 void kdb_send_sig(struct task_struct *t, int sig)
3941 static struct task_struct *kdb_prev_t;
3943 if (!spin_trylock(&t->sighand->siglock)) {
3944 kdb_printf("Can't do kill command now.\n"
3945 "The sigmask lock is held somewhere else in "
3946 "kernel, try again later\n");
3949 new_t = kdb_prev_t != t;
3951 if (t->state != TASK_RUNNING && new_t) {
3952 spin_unlock(&t->sighand->siglock);
3953 kdb_printf("Process is not RUNNING, sending a signal from "
3954 "kdb risks deadlock\n"
3955 "on the run queue locks. "
3956 "The signal has _not_ been sent.\n"
3957 "Reissue the kill command if you want to risk "
3961 ret = send_signal(sig, SEND_SIG_PRIV, t, false);
3962 spin_unlock(&t->sighand->siglock);
3964 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3967 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3969 #endif /* CONFIG_KGDB_KDB */