1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 * Preallocation does not hold sighand::siglock so it can't
436 * use the cache. The lockless caching requires that only
437 * one consumer and only one producer run at a time.
439 * For the regular allocation case it is sufficient to
440 * check @q for NULL because this code can only be called
441 * if the target task @t has not been reaped yet; which
442 * means this code can never observe the error pointer which is
443 * written to @t->sigqueue_cache in exit_task_sigqueue_cache().
445 q = READ_ONCE(t->sigqueue_cache);
446 if (!q || sigqueue_flags)
447 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
449 WRITE_ONCE(t->sigqueue_cache, NULL);
451 print_dropped_signal(sig);
454 if (unlikely(q == NULL)) {
455 if (atomic_dec_and_test(&user->sigpending))
458 INIT_LIST_HEAD(&q->list);
459 q->flags = sigqueue_flags;
466 void exit_task_sigqueue_cache(struct task_struct *tsk)
468 /* Race free because @tsk is mopped up */
469 struct sigqueue *q = tsk->sigqueue_cache;
473 * Hand it back to the cache as the task might
474 * be self reaping which would leak the object.
476 kmem_cache_free(sigqueue_cachep, q);
480 * Set an error pointer to ensure that @tsk will not cache a
481 * sigqueue when it is reaping it's child tasks
483 tsk->sigqueue_cache = ERR_PTR(-1);
486 static void sigqueue_cache_or_free(struct sigqueue *q)
489 * Cache one sigqueue per task. This pairs with the consumer side
490 * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
491 * compiler from store tearing and to tell KCSAN that the data race
492 * is intentional when run without holding current->sighand->siglock,
493 * which is fine as current obviously cannot run __sigqueue_free()
496 * The NULL check is safe even if current has been reaped already,
497 * in which case exit_task_sigqueue_cache() wrote an error pointer
498 * into current->sigqueue_cache.
500 if (!READ_ONCE(current->sigqueue_cache))
501 WRITE_ONCE(current->sigqueue_cache, q);
503 kmem_cache_free(sigqueue_cachep, q);
506 static void __sigqueue_free(struct sigqueue *q)
508 if (q->flags & SIGQUEUE_PREALLOC)
510 if (atomic_dec_and_test(&q->user->sigpending))
512 sigqueue_cache_or_free(q);
515 void flush_sigqueue(struct sigpending *queue)
519 sigemptyset(&queue->signal);
520 while (!list_empty(&queue->list)) {
521 q = list_entry(queue->list.next, struct sigqueue , list);
522 list_del_init(&q->list);
528 * Flush all pending signals for this kthread.
530 void flush_signals(struct task_struct *t)
534 spin_lock_irqsave(&t->sighand->siglock, flags);
535 clear_tsk_thread_flag(t, TIF_SIGPENDING);
536 flush_sigqueue(&t->pending);
537 flush_sigqueue(&t->signal->shared_pending);
538 spin_unlock_irqrestore(&t->sighand->siglock, flags);
540 EXPORT_SYMBOL(flush_signals);
542 #ifdef CONFIG_POSIX_TIMERS
543 static void __flush_itimer_signals(struct sigpending *pending)
545 sigset_t signal, retain;
546 struct sigqueue *q, *n;
548 signal = pending->signal;
549 sigemptyset(&retain);
551 list_for_each_entry_safe(q, n, &pending->list, list) {
552 int sig = q->info.si_signo;
554 if (likely(q->info.si_code != SI_TIMER)) {
555 sigaddset(&retain, sig);
557 sigdelset(&signal, sig);
558 list_del_init(&q->list);
563 sigorsets(&pending->signal, &signal, &retain);
566 void flush_itimer_signals(void)
568 struct task_struct *tsk = current;
571 spin_lock_irqsave(&tsk->sighand->siglock, flags);
572 __flush_itimer_signals(&tsk->pending);
573 __flush_itimer_signals(&tsk->signal->shared_pending);
574 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
578 void ignore_signals(struct task_struct *t)
582 for (i = 0; i < _NSIG; ++i)
583 t->sighand->action[i].sa.sa_handler = SIG_IGN;
589 * Flush all handlers for a task.
593 flush_signal_handlers(struct task_struct *t, int force_default)
596 struct k_sigaction *ka = &t->sighand->action[0];
597 for (i = _NSIG ; i != 0 ; i--) {
598 if (force_default || ka->sa.sa_handler != SIG_IGN)
599 ka->sa.sa_handler = SIG_DFL;
601 #ifdef __ARCH_HAS_SA_RESTORER
602 ka->sa.sa_restorer = NULL;
604 sigemptyset(&ka->sa.sa_mask);
609 bool unhandled_signal(struct task_struct *tsk, int sig)
611 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
612 if (is_global_init(tsk))
615 if (handler != SIG_IGN && handler != SIG_DFL)
618 /* if ptraced, let the tracer determine */
622 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
625 struct sigqueue *q, *first = NULL;
628 * Collect the siginfo appropriate to this signal. Check if
629 * there is another siginfo for the same signal.
631 list_for_each_entry(q, &list->list, list) {
632 if (q->info.si_signo == sig) {
639 sigdelset(&list->signal, sig);
643 list_del_init(&first->list);
644 copy_siginfo(info, &first->info);
647 (first->flags & SIGQUEUE_PREALLOC) &&
648 (info->si_code == SI_TIMER) &&
649 (info->si_sys_private);
651 __sigqueue_free(first);
654 * Ok, it wasn't in the queue. This must be
655 * a fast-pathed signal or we must have been
656 * out of queue space. So zero out the info.
659 info->si_signo = sig;
661 info->si_code = SI_USER;
667 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
668 kernel_siginfo_t *info, bool *resched_timer)
670 int sig = next_signal(pending, mask);
673 collect_signal(sig, pending, info, resched_timer);
678 * Dequeue a signal and return the element to the caller, which is
679 * expected to free it.
681 * All callers have to hold the siglock.
683 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
685 bool resched_timer = false;
688 /* We only dequeue private signals from ourselves, we don't let
689 * signalfd steal them
691 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
693 signr = __dequeue_signal(&tsk->signal->shared_pending,
694 mask, info, &resched_timer);
695 #ifdef CONFIG_POSIX_TIMERS
699 * itimers are process shared and we restart periodic
700 * itimers in the signal delivery path to prevent DoS
701 * attacks in the high resolution timer case. This is
702 * compliant with the old way of self-restarting
703 * itimers, as the SIGALRM is a legacy signal and only
704 * queued once. Changing the restart behaviour to
705 * restart the timer in the signal dequeue path is
706 * reducing the timer noise on heavy loaded !highres
709 if (unlikely(signr == SIGALRM)) {
710 struct hrtimer *tmr = &tsk->signal->real_timer;
712 if (!hrtimer_is_queued(tmr) &&
713 tsk->signal->it_real_incr != 0) {
714 hrtimer_forward(tmr, tmr->base->get_time(),
715 tsk->signal->it_real_incr);
716 hrtimer_restart(tmr);
726 if (unlikely(sig_kernel_stop(signr))) {
728 * Set a marker that we have dequeued a stop signal. Our
729 * caller might release the siglock and then the pending
730 * stop signal it is about to process is no longer in the
731 * pending bitmasks, but must still be cleared by a SIGCONT
732 * (and overruled by a SIGKILL). So those cases clear this
733 * shared flag after we've set it. Note that this flag may
734 * remain set after the signal we return is ignored or
735 * handled. That doesn't matter because its only purpose
736 * is to alert stop-signal processing code when another
737 * processor has come along and cleared the flag.
739 current->jobctl |= JOBCTL_STOP_DEQUEUED;
741 #ifdef CONFIG_POSIX_TIMERS
744 * Release the siglock to ensure proper locking order
745 * of timer locks outside of siglocks. Note, we leave
746 * irqs disabled here, since the posix-timers code is
747 * about to disable them again anyway.
749 spin_unlock(&tsk->sighand->siglock);
750 posixtimer_rearm(info);
751 spin_lock(&tsk->sighand->siglock);
753 /* Don't expose the si_sys_private value to userspace */
754 info->si_sys_private = 0;
759 EXPORT_SYMBOL_GPL(dequeue_signal);
761 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
763 struct task_struct *tsk = current;
764 struct sigpending *pending = &tsk->pending;
765 struct sigqueue *q, *sync = NULL;
768 * Might a synchronous signal be in the queue?
770 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
774 * Return the first synchronous signal in the queue.
776 list_for_each_entry(q, &pending->list, list) {
777 /* Synchronous signals have a positive si_code */
778 if ((q->info.si_code > SI_USER) &&
779 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
787 * Check if there is another siginfo for the same signal.
789 list_for_each_entry_continue(q, &pending->list, list) {
790 if (q->info.si_signo == sync->info.si_signo)
794 sigdelset(&pending->signal, sync->info.si_signo);
797 list_del_init(&sync->list);
798 copy_siginfo(info, &sync->info);
799 __sigqueue_free(sync);
800 return info->si_signo;
804 * Tell a process that it has a new active signal..
806 * NOTE! we rely on the previous spin_lock to
807 * lock interrupts for us! We can only be called with
808 * "siglock" held, and the local interrupt must
809 * have been disabled when that got acquired!
811 * No need to set need_resched since signal event passing
812 * goes through ->blocked
814 void signal_wake_up_state(struct task_struct *t, unsigned int state)
816 set_tsk_thread_flag(t, TIF_SIGPENDING);
818 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
819 * case. We don't check t->state here because there is a race with it
820 * executing another processor and just now entering stopped state.
821 * By using wake_up_state, we ensure the process will wake up and
822 * handle its death signal.
824 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
829 * Remove signals in mask from the pending set and queue.
830 * Returns 1 if any signals were found.
832 * All callers must be holding the siglock.
834 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
836 struct sigqueue *q, *n;
839 sigandsets(&m, mask, &s->signal);
840 if (sigisemptyset(&m))
843 sigandnsets(&s->signal, &s->signal, mask);
844 list_for_each_entry_safe(q, n, &s->list, list) {
845 if (sigismember(mask, q->info.si_signo)) {
846 list_del_init(&q->list);
852 static inline int is_si_special(const struct kernel_siginfo *info)
854 return info <= SEND_SIG_PRIV;
857 static inline bool si_fromuser(const struct kernel_siginfo *info)
859 return info == SEND_SIG_NOINFO ||
860 (!is_si_special(info) && SI_FROMUSER(info));
864 * called with RCU read lock from check_kill_permission()
866 static bool kill_ok_by_cred(struct task_struct *t)
868 const struct cred *cred = current_cred();
869 const struct cred *tcred = __task_cred(t);
871 return uid_eq(cred->euid, tcred->suid) ||
872 uid_eq(cred->euid, tcred->uid) ||
873 uid_eq(cred->uid, tcred->suid) ||
874 uid_eq(cred->uid, tcred->uid) ||
875 ns_capable(tcred->user_ns, CAP_KILL);
879 * Bad permissions for sending the signal
880 * - the caller must hold the RCU read lock
882 static int check_kill_permission(int sig, struct kernel_siginfo *info,
883 struct task_struct *t)
888 if (!valid_signal(sig))
891 if (!si_fromuser(info))
894 error = audit_signal_info(sig, t); /* Let audit system see the signal */
898 if (!same_thread_group(current, t) &&
899 !kill_ok_by_cred(t)) {
902 sid = task_session(t);
904 * We don't return the error if sid == NULL. The
905 * task was unhashed, the caller must notice this.
907 if (!sid || sid == task_session(current))
915 return security_task_kill(t, info, sig, NULL);
919 * ptrace_trap_notify - schedule trap to notify ptracer
920 * @t: tracee wanting to notify tracer
922 * This function schedules sticky ptrace trap which is cleared on the next
923 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
926 * If @t is running, STOP trap will be taken. If trapped for STOP and
927 * ptracer is listening for events, tracee is woken up so that it can
928 * re-trap for the new event. If trapped otherwise, STOP trap will be
929 * eventually taken without returning to userland after the existing traps
930 * are finished by PTRACE_CONT.
933 * Must be called with @task->sighand->siglock held.
935 static void ptrace_trap_notify(struct task_struct *t)
937 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
938 assert_spin_locked(&t->sighand->siglock);
940 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
941 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
945 * Handle magic process-wide effects of stop/continue signals. Unlike
946 * the signal actions, these happen immediately at signal-generation
947 * time regardless of blocking, ignoring, or handling. This does the
948 * actual continuing for SIGCONT, but not the actual stopping for stop
949 * signals. The process stop is done as a signal action for SIG_DFL.
951 * Returns true if the signal should be actually delivered, otherwise
952 * it should be dropped.
954 static bool prepare_signal(int sig, struct task_struct *p, bool force)
956 struct signal_struct *signal = p->signal;
957 struct task_struct *t;
960 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
961 if (!(signal->flags & SIGNAL_GROUP_EXIT))
962 return sig == SIGKILL;
964 * The process is in the middle of dying, nothing to do.
966 } else if (sig_kernel_stop(sig)) {
968 * This is a stop signal. Remove SIGCONT from all queues.
970 siginitset(&flush, sigmask(SIGCONT));
971 flush_sigqueue_mask(&flush, &signal->shared_pending);
972 for_each_thread(p, t)
973 flush_sigqueue_mask(&flush, &t->pending);
974 } else if (sig == SIGCONT) {
977 * Remove all stop signals from all queues, wake all threads.
979 siginitset(&flush, SIG_KERNEL_STOP_MASK);
980 flush_sigqueue_mask(&flush, &signal->shared_pending);
981 for_each_thread(p, t) {
982 flush_sigqueue_mask(&flush, &t->pending);
983 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
984 if (likely(!(t->ptrace & PT_SEIZED)))
985 wake_up_state(t, __TASK_STOPPED);
987 ptrace_trap_notify(t);
991 * Notify the parent with CLD_CONTINUED if we were stopped.
993 * If we were in the middle of a group stop, we pretend it
994 * was already finished, and then continued. Since SIGCHLD
995 * doesn't queue we report only CLD_STOPPED, as if the next
996 * CLD_CONTINUED was dropped.
999 if (signal->flags & SIGNAL_STOP_STOPPED)
1000 why |= SIGNAL_CLD_CONTINUED;
1001 else if (signal->group_stop_count)
1002 why |= SIGNAL_CLD_STOPPED;
1006 * The first thread which returns from do_signal_stop()
1007 * will take ->siglock, notice SIGNAL_CLD_MASK, and
1008 * notify its parent. See get_signal().
1010 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
1011 signal->group_stop_count = 0;
1012 signal->group_exit_code = 0;
1016 return !sig_ignored(p, sig, force);
1020 * Test if P wants to take SIG. After we've checked all threads with this,
1021 * it's equivalent to finding no threads not blocking SIG. Any threads not
1022 * blocking SIG were ruled out because they are not running and already
1023 * have pending signals. Such threads will dequeue from the shared queue
1024 * as soon as they're available, so putting the signal on the shared queue
1025 * will be equivalent to sending it to one such thread.
1027 static inline bool wants_signal(int sig, struct task_struct *p)
1029 if (sigismember(&p->blocked, sig))
1032 if (p->flags & PF_EXITING)
1038 if (task_is_stopped_or_traced(p))
1041 return task_curr(p) || !task_sigpending(p);
1044 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1046 struct signal_struct *signal = p->signal;
1047 struct task_struct *t;
1050 * Now find a thread we can wake up to take the signal off the queue.
1052 * If the main thread wants the signal, it gets first crack.
1053 * Probably the least surprising to the average bear.
1055 if (wants_signal(sig, p))
1057 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1059 * There is just one thread and it does not need to be woken.
1060 * It will dequeue unblocked signals before it runs again.
1065 * Otherwise try to find a suitable thread.
1067 t = signal->curr_target;
1068 while (!wants_signal(sig, t)) {
1070 if (t == signal->curr_target)
1072 * No thread needs to be woken.
1073 * Any eligible threads will see
1074 * the signal in the queue soon.
1078 signal->curr_target = t;
1082 * Found a killable thread. If the signal will be fatal,
1083 * then start taking the whole group down immediately.
1085 if (sig_fatal(p, sig) &&
1086 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1087 !sigismember(&t->real_blocked, sig) &&
1088 (sig == SIGKILL || !p->ptrace)) {
1090 * This signal will be fatal to the whole group.
1092 if (!sig_kernel_coredump(sig)) {
1094 * Start a group exit and wake everybody up.
1095 * This way we don't have other threads
1096 * running and doing things after a slower
1097 * thread has the fatal signal pending.
1099 signal->flags = SIGNAL_GROUP_EXIT;
1100 signal->group_exit_code = sig;
1101 signal->group_stop_count = 0;
1104 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1105 sigaddset(&t->pending.signal, SIGKILL);
1106 signal_wake_up(t, 1);
1107 } while_each_thread(p, t);
1113 * The signal is already in the shared-pending queue.
1114 * Tell the chosen thread to wake up and dequeue it.
1116 signal_wake_up(t, sig == SIGKILL);
1120 static inline bool legacy_queue(struct sigpending *signals, int sig)
1122 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1125 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1126 enum pid_type type, bool force)
1128 struct sigpending *pending;
1130 int override_rlimit;
1131 int ret = 0, result;
1133 assert_spin_locked(&t->sighand->siglock);
1135 result = TRACE_SIGNAL_IGNORED;
1136 if (!prepare_signal(sig, t, force))
1139 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1141 * Short-circuit ignored signals and support queuing
1142 * exactly one non-rt signal, so that we can get more
1143 * detailed information about the cause of the signal.
1145 result = TRACE_SIGNAL_ALREADY_PENDING;
1146 if (legacy_queue(pending, sig))
1149 result = TRACE_SIGNAL_DELIVERED;
1151 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1153 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1157 * Real-time signals must be queued if sent by sigqueue, or
1158 * some other real-time mechanism. It is implementation
1159 * defined whether kill() does so. We attempt to do so, on
1160 * the principle of least surprise, but since kill is not
1161 * allowed to fail with EAGAIN when low on memory we just
1162 * make sure at least one signal gets delivered and don't
1163 * pass on the info struct.
1166 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1168 override_rlimit = 0;
1170 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1173 list_add_tail(&q->list, &pending->list);
1174 switch ((unsigned long) info) {
1175 case (unsigned long) SEND_SIG_NOINFO:
1176 clear_siginfo(&q->info);
1177 q->info.si_signo = sig;
1178 q->info.si_errno = 0;
1179 q->info.si_code = SI_USER;
1180 q->info.si_pid = task_tgid_nr_ns(current,
1181 task_active_pid_ns(t));
1184 from_kuid_munged(task_cred_xxx(t, user_ns),
1188 case (unsigned long) SEND_SIG_PRIV:
1189 clear_siginfo(&q->info);
1190 q->info.si_signo = sig;
1191 q->info.si_errno = 0;
1192 q->info.si_code = SI_KERNEL;
1197 copy_siginfo(&q->info, info);
1200 } else if (!is_si_special(info) &&
1201 sig >= SIGRTMIN && info->si_code != SI_USER) {
1203 * Queue overflow, abort. We may abort if the
1204 * signal was rt and sent by user using something
1205 * other than kill().
1207 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1212 * This is a silent loss of information. We still
1213 * send the signal, but the *info bits are lost.
1215 result = TRACE_SIGNAL_LOSE_INFO;
1219 signalfd_notify(t, sig);
1220 sigaddset(&pending->signal, sig);
1222 /* Let multiprocess signals appear after on-going forks */
1223 if (type > PIDTYPE_TGID) {
1224 struct multiprocess_signals *delayed;
1225 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1226 sigset_t *signal = &delayed->signal;
1227 /* Can't queue both a stop and a continue signal */
1229 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1230 else if (sig_kernel_stop(sig))
1231 sigdelset(signal, SIGCONT);
1232 sigaddset(signal, sig);
1236 complete_signal(sig, t, type);
1238 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1242 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1245 switch (siginfo_layout(info->si_signo, info->si_code)) {
1254 case SIL_FAULT_TRAPNO:
1255 case SIL_FAULT_MCEERR:
1256 case SIL_FAULT_BNDERR:
1257 case SIL_FAULT_PKUERR:
1258 case SIL_PERF_EVENT:
1266 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1269 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1272 if (info == SEND_SIG_NOINFO) {
1273 /* Force if sent from an ancestor pid namespace */
1274 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1275 } else if (info == SEND_SIG_PRIV) {
1276 /* Don't ignore kernel generated signals */
1278 } else if (has_si_pid_and_uid(info)) {
1279 /* SIGKILL and SIGSTOP is special or has ids */
1280 struct user_namespace *t_user_ns;
1283 t_user_ns = task_cred_xxx(t, user_ns);
1284 if (current_user_ns() != t_user_ns) {
1285 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1286 info->si_uid = from_kuid_munged(t_user_ns, uid);
1290 /* A kernel generated signal? */
1291 force = (info->si_code == SI_KERNEL);
1293 /* From an ancestor pid namespace? */
1294 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1299 return __send_signal(sig, info, t, type, force);
1302 static void print_fatal_signal(int signr)
1304 struct pt_regs *regs = signal_pt_regs();
1305 pr_info("potentially unexpected fatal signal %d.\n", signr);
1307 #if defined(__i386__) && !defined(__arch_um__)
1308 pr_info("code at %08lx: ", regs->ip);
1311 for (i = 0; i < 16; i++) {
1314 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1316 pr_cont("%02x ", insn);
1326 static int __init setup_print_fatal_signals(char *str)
1328 get_option (&str, &print_fatal_signals);
1333 __setup("print-fatal-signals=", setup_print_fatal_signals);
1336 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1338 return send_signal(sig, info, p, PIDTYPE_TGID);
1341 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1344 unsigned long flags;
1347 if (lock_task_sighand(p, &flags)) {
1348 ret = send_signal(sig, info, p, type);
1349 unlock_task_sighand(p, &flags);
1356 * Force a signal that the process can't ignore: if necessary
1357 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1359 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1360 * since we do not want to have a signal handler that was blocked
1361 * be invoked when user space had explicitly blocked it.
1363 * We don't want to have recursive SIGSEGV's etc, for example,
1364 * that is why we also clear SIGNAL_UNKILLABLE.
1367 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1369 unsigned long int flags;
1370 int ret, blocked, ignored;
1371 struct k_sigaction *action;
1372 int sig = info->si_signo;
1374 spin_lock_irqsave(&t->sighand->siglock, flags);
1375 action = &t->sighand->action[sig-1];
1376 ignored = action->sa.sa_handler == SIG_IGN;
1377 blocked = sigismember(&t->blocked, sig);
1378 if (blocked || ignored) {
1379 action->sa.sa_handler = SIG_DFL;
1381 sigdelset(&t->blocked, sig);
1382 recalc_sigpending_and_wake(t);
1386 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1387 * debugging to leave init killable.
1389 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1390 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1391 ret = send_signal(sig, info, t, PIDTYPE_PID);
1392 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1397 int force_sig_info(struct kernel_siginfo *info)
1399 return force_sig_info_to_task(info, current);
1403 * Nuke all other threads in the group.
1405 int zap_other_threads(struct task_struct *p)
1407 struct task_struct *t = p;
1410 p->signal->group_stop_count = 0;
1412 while_each_thread(p, t) {
1413 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1416 /* Don't bother with already dead threads */
1419 sigaddset(&t->pending.signal, SIGKILL);
1420 signal_wake_up(t, 1);
1426 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1427 unsigned long *flags)
1429 struct sighand_struct *sighand;
1433 sighand = rcu_dereference(tsk->sighand);
1434 if (unlikely(sighand == NULL))
1438 * This sighand can be already freed and even reused, but
1439 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1440 * initializes ->siglock: this slab can't go away, it has
1441 * the same object type, ->siglock can't be reinitialized.
1443 * We need to ensure that tsk->sighand is still the same
1444 * after we take the lock, we can race with de_thread() or
1445 * __exit_signal(). In the latter case the next iteration
1446 * must see ->sighand == NULL.
1448 spin_lock_irqsave(&sighand->siglock, *flags);
1449 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1451 spin_unlock_irqrestore(&sighand->siglock, *flags);
1459 * send signal info to all the members of a group
1461 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1462 struct task_struct *p, enum pid_type type)
1467 ret = check_kill_permission(sig, info, p);
1471 ret = do_send_sig_info(sig, info, p, type);
1477 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1478 * control characters do (^C, ^Z etc)
1479 * - the caller must hold at least a readlock on tasklist_lock
1481 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1483 struct task_struct *p = NULL;
1484 int retval, success;
1488 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1489 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1492 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1493 return success ? 0 : retval;
1496 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1499 struct task_struct *p;
1503 p = pid_task(pid, PIDTYPE_PID);
1505 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1507 if (likely(!p || error != -ESRCH))
1511 * The task was unhashed in between, try again. If it
1512 * is dead, pid_task() will return NULL, if we race with
1513 * de_thread() it will find the new leader.
1518 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1522 error = kill_pid_info(sig, info, find_vpid(pid));
1527 static inline bool kill_as_cred_perm(const struct cred *cred,
1528 struct task_struct *target)
1530 const struct cred *pcred = __task_cred(target);
1532 return uid_eq(cred->euid, pcred->suid) ||
1533 uid_eq(cred->euid, pcred->uid) ||
1534 uid_eq(cred->uid, pcred->suid) ||
1535 uid_eq(cred->uid, pcred->uid);
1539 * The usb asyncio usage of siginfo is wrong. The glibc support
1540 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1541 * AKA after the generic fields:
1542 * kernel_pid_t si_pid;
1543 * kernel_uid32_t si_uid;
1544 * sigval_t si_value;
1546 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1547 * after the generic fields is:
1548 * void __user *si_addr;
1550 * This is a practical problem when there is a 64bit big endian kernel
1551 * and a 32bit userspace. As the 32bit address will encoded in the low
1552 * 32bits of the pointer. Those low 32bits will be stored at higher
1553 * address than appear in a 32 bit pointer. So userspace will not
1554 * see the address it was expecting for it's completions.
1556 * There is nothing in the encoding that can allow
1557 * copy_siginfo_to_user32 to detect this confusion of formats, so
1558 * handle this by requiring the caller of kill_pid_usb_asyncio to
1559 * notice when this situration takes place and to store the 32bit
1560 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1563 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1564 struct pid *pid, const struct cred *cred)
1566 struct kernel_siginfo info;
1567 struct task_struct *p;
1568 unsigned long flags;
1571 if (!valid_signal(sig))
1574 clear_siginfo(&info);
1575 info.si_signo = sig;
1576 info.si_errno = errno;
1577 info.si_code = SI_ASYNCIO;
1578 *((sigval_t *)&info.si_pid) = addr;
1581 p = pid_task(pid, PIDTYPE_PID);
1586 if (!kill_as_cred_perm(cred, p)) {
1590 ret = security_task_kill(p, &info, sig, cred);
1595 if (lock_task_sighand(p, &flags)) {
1596 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1597 unlock_task_sighand(p, &flags);
1605 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1608 * kill_something_info() interprets pid in interesting ways just like kill(2).
1610 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1611 * is probably wrong. Should make it like BSD or SYSV.
1614 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1619 return kill_proc_info(sig, info, pid);
1621 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1625 read_lock(&tasklist_lock);
1627 ret = __kill_pgrp_info(sig, info,
1628 pid ? find_vpid(-pid) : task_pgrp(current));
1630 int retval = 0, count = 0;
1631 struct task_struct * p;
1633 for_each_process(p) {
1634 if (task_pid_vnr(p) > 1 &&
1635 !same_thread_group(p, current)) {
1636 int err = group_send_sig_info(sig, info, p,
1643 ret = count ? retval : -ESRCH;
1645 read_unlock(&tasklist_lock);
1651 * These are for backward compatibility with the rest of the kernel source.
1654 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1657 * Make sure legacy kernel users don't send in bad values
1658 * (normal paths check this in check_kill_permission).
1660 if (!valid_signal(sig))
1663 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1665 EXPORT_SYMBOL(send_sig_info);
1667 #define __si_special(priv) \
1668 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1671 send_sig(int sig, struct task_struct *p, int priv)
1673 return send_sig_info(sig, __si_special(priv), p);
1675 EXPORT_SYMBOL(send_sig);
1677 void force_sig(int sig)
1679 struct kernel_siginfo info;
1681 clear_siginfo(&info);
1682 info.si_signo = sig;
1684 info.si_code = SI_KERNEL;
1687 force_sig_info(&info);
1689 EXPORT_SYMBOL(force_sig);
1692 * When things go south during signal handling, we
1693 * will force a SIGSEGV. And if the signal that caused
1694 * the problem was already a SIGSEGV, we'll want to
1695 * make sure we don't even try to deliver the signal..
1697 void force_sigsegv(int sig)
1699 struct task_struct *p = current;
1701 if (sig == SIGSEGV) {
1702 unsigned long flags;
1703 spin_lock_irqsave(&p->sighand->siglock, flags);
1704 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1705 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1710 int force_sig_fault_to_task(int sig, int code, void __user *addr
1711 ___ARCH_SI_TRAPNO(int trapno)
1712 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1713 , struct task_struct *t)
1715 struct kernel_siginfo info;
1717 clear_siginfo(&info);
1718 info.si_signo = sig;
1720 info.si_code = code;
1721 info.si_addr = addr;
1722 #ifdef __ARCH_SI_TRAPNO
1723 info.si_trapno = trapno;
1727 info.si_flags = flags;
1730 return force_sig_info_to_task(&info, t);
1733 int force_sig_fault(int sig, int code, void __user *addr
1734 ___ARCH_SI_TRAPNO(int trapno)
1735 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1737 return force_sig_fault_to_task(sig, code, addr
1738 ___ARCH_SI_TRAPNO(trapno)
1739 ___ARCH_SI_IA64(imm, flags, isr), current);
1742 int send_sig_fault(int sig, int code, void __user *addr
1743 ___ARCH_SI_TRAPNO(int trapno)
1744 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1745 , struct task_struct *t)
1747 struct kernel_siginfo info;
1749 clear_siginfo(&info);
1750 info.si_signo = sig;
1752 info.si_code = code;
1753 info.si_addr = addr;
1754 #ifdef __ARCH_SI_TRAPNO
1755 info.si_trapno = trapno;
1759 info.si_flags = flags;
1762 return send_sig_info(info.si_signo, &info, t);
1765 int force_sig_mceerr(int code, void __user *addr, short lsb)
1767 struct kernel_siginfo info;
1769 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1770 clear_siginfo(&info);
1771 info.si_signo = SIGBUS;
1773 info.si_code = code;
1774 info.si_addr = addr;
1775 info.si_addr_lsb = lsb;
1776 return force_sig_info(&info);
1779 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1781 struct kernel_siginfo info;
1783 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1784 clear_siginfo(&info);
1785 info.si_signo = SIGBUS;
1787 info.si_code = code;
1788 info.si_addr = addr;
1789 info.si_addr_lsb = lsb;
1790 return send_sig_info(info.si_signo, &info, t);
1792 EXPORT_SYMBOL(send_sig_mceerr);
1794 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1796 struct kernel_siginfo info;
1798 clear_siginfo(&info);
1799 info.si_signo = SIGSEGV;
1801 info.si_code = SEGV_BNDERR;
1802 info.si_addr = addr;
1803 info.si_lower = lower;
1804 info.si_upper = upper;
1805 return force_sig_info(&info);
1809 int force_sig_pkuerr(void __user *addr, u32 pkey)
1811 struct kernel_siginfo info;
1813 clear_siginfo(&info);
1814 info.si_signo = SIGSEGV;
1816 info.si_code = SEGV_PKUERR;
1817 info.si_addr = addr;
1818 info.si_pkey = pkey;
1819 return force_sig_info(&info);
1823 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1825 struct kernel_siginfo info;
1827 clear_siginfo(&info);
1828 info.si_signo = SIGTRAP;
1830 info.si_code = TRAP_PERF;
1831 info.si_addr = addr;
1832 info.si_perf_data = sig_data;
1833 info.si_perf_type = type;
1835 return force_sig_info(&info);
1838 /* For the crazy architectures that include trap information in
1839 * the errno field, instead of an actual errno value.
1841 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1843 struct kernel_siginfo info;
1845 clear_siginfo(&info);
1846 info.si_signo = SIGTRAP;
1847 info.si_errno = errno;
1848 info.si_code = TRAP_HWBKPT;
1849 info.si_addr = addr;
1850 return force_sig_info(&info);
1853 int kill_pgrp(struct pid *pid, int sig, int priv)
1857 read_lock(&tasklist_lock);
1858 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1859 read_unlock(&tasklist_lock);
1863 EXPORT_SYMBOL(kill_pgrp);
1865 int kill_pid(struct pid *pid, int sig, int priv)
1867 return kill_pid_info(sig, __si_special(priv), pid);
1869 EXPORT_SYMBOL(kill_pid);
1872 * These functions support sending signals using preallocated sigqueue
1873 * structures. This is needed "because realtime applications cannot
1874 * afford to lose notifications of asynchronous events, like timer
1875 * expirations or I/O completions". In the case of POSIX Timers
1876 * we allocate the sigqueue structure from the timer_create. If this
1877 * allocation fails we are able to report the failure to the application
1878 * with an EAGAIN error.
1880 struct sigqueue *sigqueue_alloc(void)
1882 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1885 void sigqueue_free(struct sigqueue *q)
1887 unsigned long flags;
1888 spinlock_t *lock = ¤t->sighand->siglock;
1890 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1892 * We must hold ->siglock while testing q->list
1893 * to serialize with collect_signal() or with
1894 * __exit_signal()->flush_sigqueue().
1896 spin_lock_irqsave(lock, flags);
1897 q->flags &= ~SIGQUEUE_PREALLOC;
1899 * If it is queued it will be freed when dequeued,
1900 * like the "regular" sigqueue.
1902 if (!list_empty(&q->list))
1904 spin_unlock_irqrestore(lock, flags);
1910 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1912 int sig = q->info.si_signo;
1913 struct sigpending *pending;
1914 struct task_struct *t;
1915 unsigned long flags;
1918 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1922 t = pid_task(pid, type);
1923 if (!t || !likely(lock_task_sighand(t, &flags)))
1926 ret = 1; /* the signal is ignored */
1927 result = TRACE_SIGNAL_IGNORED;
1928 if (!prepare_signal(sig, t, false))
1932 if (unlikely(!list_empty(&q->list))) {
1934 * If an SI_TIMER entry is already queue just increment
1935 * the overrun count.
1937 BUG_ON(q->info.si_code != SI_TIMER);
1938 q->info.si_overrun++;
1939 result = TRACE_SIGNAL_ALREADY_PENDING;
1942 q->info.si_overrun = 0;
1944 signalfd_notify(t, sig);
1945 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1946 list_add_tail(&q->list, &pending->list);
1947 sigaddset(&pending->signal, sig);
1948 complete_signal(sig, t, type);
1949 result = TRACE_SIGNAL_DELIVERED;
1951 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1952 unlock_task_sighand(t, &flags);
1958 static void do_notify_pidfd(struct task_struct *task)
1962 WARN_ON(task->exit_state == 0);
1963 pid = task_pid(task);
1964 wake_up_all(&pid->wait_pidfd);
1968 * Let a parent know about the death of a child.
1969 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1971 * Returns true if our parent ignored us and so we've switched to
1974 bool do_notify_parent(struct task_struct *tsk, int sig)
1976 struct kernel_siginfo info;
1977 unsigned long flags;
1978 struct sighand_struct *psig;
1979 bool autoreap = false;
1984 /* do_notify_parent_cldstop should have been called instead. */
1985 BUG_ON(task_is_stopped_or_traced(tsk));
1987 BUG_ON(!tsk->ptrace &&
1988 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1990 /* Wake up all pidfd waiters */
1991 do_notify_pidfd(tsk);
1993 if (sig != SIGCHLD) {
1995 * This is only possible if parent == real_parent.
1996 * Check if it has changed security domain.
1998 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2002 clear_siginfo(&info);
2003 info.si_signo = sig;
2006 * We are under tasklist_lock here so our parent is tied to
2007 * us and cannot change.
2009 * task_active_pid_ns will always return the same pid namespace
2010 * until a task passes through release_task.
2012 * write_lock() currently calls preempt_disable() which is the
2013 * same as rcu_read_lock(), but according to Oleg, this is not
2014 * correct to rely on this
2017 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2018 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2022 task_cputime(tsk, &utime, &stime);
2023 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2024 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2026 info.si_status = tsk->exit_code & 0x7f;
2027 if (tsk->exit_code & 0x80)
2028 info.si_code = CLD_DUMPED;
2029 else if (tsk->exit_code & 0x7f)
2030 info.si_code = CLD_KILLED;
2032 info.si_code = CLD_EXITED;
2033 info.si_status = tsk->exit_code >> 8;
2036 psig = tsk->parent->sighand;
2037 spin_lock_irqsave(&psig->siglock, flags);
2038 if (!tsk->ptrace && sig == SIGCHLD &&
2039 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2040 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2042 * We are exiting and our parent doesn't care. POSIX.1
2043 * defines special semantics for setting SIGCHLD to SIG_IGN
2044 * or setting the SA_NOCLDWAIT flag: we should be reaped
2045 * automatically and not left for our parent's wait4 call.
2046 * Rather than having the parent do it as a magic kind of
2047 * signal handler, we just set this to tell do_exit that we
2048 * can be cleaned up without becoming a zombie. Note that
2049 * we still call __wake_up_parent in this case, because a
2050 * blocked sys_wait4 might now return -ECHILD.
2052 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2053 * is implementation-defined: we do (if you don't want
2054 * it, just use SIG_IGN instead).
2057 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2061 * Send with __send_signal as si_pid and si_uid are in the
2062 * parent's namespaces.
2064 if (valid_signal(sig) && sig)
2065 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2066 __wake_up_parent(tsk, tsk->parent);
2067 spin_unlock_irqrestore(&psig->siglock, flags);
2073 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2074 * @tsk: task reporting the state change
2075 * @for_ptracer: the notification is for ptracer
2076 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2078 * Notify @tsk's parent that the stopped/continued state has changed. If
2079 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2080 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2083 * Must be called with tasklist_lock at least read locked.
2085 static void do_notify_parent_cldstop(struct task_struct *tsk,
2086 bool for_ptracer, int why)
2088 struct kernel_siginfo info;
2089 unsigned long flags;
2090 struct task_struct *parent;
2091 struct sighand_struct *sighand;
2095 parent = tsk->parent;
2097 tsk = tsk->group_leader;
2098 parent = tsk->real_parent;
2101 clear_siginfo(&info);
2102 info.si_signo = SIGCHLD;
2105 * see comment in do_notify_parent() about the following 4 lines
2108 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2109 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2112 task_cputime(tsk, &utime, &stime);
2113 info.si_utime = nsec_to_clock_t(utime);
2114 info.si_stime = nsec_to_clock_t(stime);
2119 info.si_status = SIGCONT;
2122 info.si_status = tsk->signal->group_exit_code & 0x7f;
2125 info.si_status = tsk->exit_code & 0x7f;
2131 sighand = parent->sighand;
2132 spin_lock_irqsave(&sighand->siglock, flags);
2133 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2134 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2135 __group_send_sig_info(SIGCHLD, &info, parent);
2137 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2139 __wake_up_parent(tsk, parent);
2140 spin_unlock_irqrestore(&sighand->siglock, flags);
2143 static inline bool may_ptrace_stop(void)
2145 if (!likely(current->ptrace))
2148 * Are we in the middle of do_coredump?
2149 * If so and our tracer is also part of the coredump stopping
2150 * is a deadlock situation, and pointless because our tracer
2151 * is dead so don't allow us to stop.
2152 * If SIGKILL was already sent before the caller unlocked
2153 * ->siglock we must see ->core_state != NULL. Otherwise it
2154 * is safe to enter schedule().
2156 * This is almost outdated, a task with the pending SIGKILL can't
2157 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2158 * after SIGKILL was already dequeued.
2160 if (unlikely(current->mm->core_state) &&
2161 unlikely(current->mm == current->parent->mm))
2168 * Return non-zero if there is a SIGKILL that should be waking us up.
2169 * Called with the siglock held.
2171 static bool sigkill_pending(struct task_struct *tsk)
2173 return sigismember(&tsk->pending.signal, SIGKILL) ||
2174 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2178 * This must be called with current->sighand->siglock held.
2180 * This should be the path for all ptrace stops.
2181 * We always set current->last_siginfo while stopped here.
2182 * That makes it a way to test a stopped process for
2183 * being ptrace-stopped vs being job-control-stopped.
2185 * If we actually decide not to stop at all because the tracer
2186 * is gone, we keep current->exit_code unless clear_code.
2188 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2189 __releases(¤t->sighand->siglock)
2190 __acquires(¤t->sighand->siglock)
2192 bool gstop_done = false;
2194 if (arch_ptrace_stop_needed(exit_code, info)) {
2196 * The arch code has something special to do before a
2197 * ptrace stop. This is allowed to block, e.g. for faults
2198 * on user stack pages. We can't keep the siglock while
2199 * calling arch_ptrace_stop, so we must release it now.
2200 * To preserve proper semantics, we must do this before
2201 * any signal bookkeeping like checking group_stop_count.
2202 * Meanwhile, a SIGKILL could come in before we retake the
2203 * siglock. That must prevent us from sleeping in TASK_TRACED.
2204 * So after regaining the lock, we must check for SIGKILL.
2206 spin_unlock_irq(¤t->sighand->siglock);
2207 arch_ptrace_stop(exit_code, info);
2208 spin_lock_irq(¤t->sighand->siglock);
2209 if (sigkill_pending(current))
2213 set_special_state(TASK_TRACED);
2216 * We're committing to trapping. TRACED should be visible before
2217 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2218 * Also, transition to TRACED and updates to ->jobctl should be
2219 * atomic with respect to siglock and should be done after the arch
2220 * hook as siglock is released and regrabbed across it.
2225 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2227 * set_current_state() smp_wmb();
2229 * wait_task_stopped()
2230 * task_stopped_code()
2231 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2235 current->last_siginfo = info;
2236 current->exit_code = exit_code;
2239 * If @why is CLD_STOPPED, we're trapping to participate in a group
2240 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2241 * across siglock relocks since INTERRUPT was scheduled, PENDING
2242 * could be clear now. We act as if SIGCONT is received after
2243 * TASK_TRACED is entered - ignore it.
2245 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2246 gstop_done = task_participate_group_stop(current);
2248 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2249 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2250 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2251 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2253 /* entering a trap, clear TRAPPING */
2254 task_clear_jobctl_trapping(current);
2256 spin_unlock_irq(¤t->sighand->siglock);
2257 read_lock(&tasklist_lock);
2258 if (may_ptrace_stop()) {
2260 * Notify parents of the stop.
2262 * While ptraced, there are two parents - the ptracer and
2263 * the real_parent of the group_leader. The ptracer should
2264 * know about every stop while the real parent is only
2265 * interested in the completion of group stop. The states
2266 * for the two don't interact with each other. Notify
2267 * separately unless they're gonna be duplicates.
2269 do_notify_parent_cldstop(current, true, why);
2270 if (gstop_done && ptrace_reparented(current))
2271 do_notify_parent_cldstop(current, false, why);
2274 * Don't want to allow preemption here, because
2275 * sys_ptrace() needs this task to be inactive.
2277 * XXX: implement read_unlock_no_resched().
2280 read_unlock(&tasklist_lock);
2281 cgroup_enter_frozen();
2282 preempt_enable_no_resched();
2283 freezable_schedule();
2284 cgroup_leave_frozen(true);
2287 * By the time we got the lock, our tracer went away.
2288 * Don't drop the lock yet, another tracer may come.
2290 * If @gstop_done, the ptracer went away between group stop
2291 * completion and here. During detach, it would have set
2292 * JOBCTL_STOP_PENDING on us and we'll re-enter
2293 * TASK_STOPPED in do_signal_stop() on return, so notifying
2294 * the real parent of the group stop completion is enough.
2297 do_notify_parent_cldstop(current, false, why);
2299 /* tasklist protects us from ptrace_freeze_traced() */
2300 __set_current_state(TASK_RUNNING);
2302 current->exit_code = 0;
2303 read_unlock(&tasklist_lock);
2307 * We are back. Now reacquire the siglock before touching
2308 * last_siginfo, so that we are sure to have synchronized with
2309 * any signal-sending on another CPU that wants to examine it.
2311 spin_lock_irq(¤t->sighand->siglock);
2312 current->last_siginfo = NULL;
2314 /* LISTENING can be set only during STOP traps, clear it */
2315 current->jobctl &= ~JOBCTL_LISTENING;
2318 * Queued signals ignored us while we were stopped for tracing.
2319 * So check for any that we should take before resuming user mode.
2320 * This sets TIF_SIGPENDING, but never clears it.
2322 recalc_sigpending_tsk(current);
2325 static void ptrace_do_notify(int signr, int exit_code, int why)
2327 kernel_siginfo_t info;
2329 clear_siginfo(&info);
2330 info.si_signo = signr;
2331 info.si_code = exit_code;
2332 info.si_pid = task_pid_vnr(current);
2333 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2335 /* Let the debugger run. */
2336 ptrace_stop(exit_code, why, 1, &info);
2339 void ptrace_notify(int exit_code)
2341 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2342 if (unlikely(current->task_works))
2345 spin_lock_irq(¤t->sighand->siglock);
2346 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2347 spin_unlock_irq(¤t->sighand->siglock);
2351 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2352 * @signr: signr causing group stop if initiating
2354 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2355 * and participate in it. If already set, participate in the existing
2356 * group stop. If participated in a group stop (and thus slept), %true is
2357 * returned with siglock released.
2359 * If ptraced, this function doesn't handle stop itself. Instead,
2360 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2361 * untouched. The caller must ensure that INTERRUPT trap handling takes
2362 * places afterwards.
2365 * Must be called with @current->sighand->siglock held, which is released
2369 * %false if group stop is already cancelled or ptrace trap is scheduled.
2370 * %true if participated in group stop.
2372 static bool do_signal_stop(int signr)
2373 __releases(¤t->sighand->siglock)
2375 struct signal_struct *sig = current->signal;
2377 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2378 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2379 struct task_struct *t;
2381 /* signr will be recorded in task->jobctl for retries */
2382 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2384 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2385 unlikely(signal_group_exit(sig)))
2388 * There is no group stop already in progress. We must
2391 * While ptraced, a task may be resumed while group stop is
2392 * still in effect and then receive a stop signal and
2393 * initiate another group stop. This deviates from the
2394 * usual behavior as two consecutive stop signals can't
2395 * cause two group stops when !ptraced. That is why we
2396 * also check !task_is_stopped(t) below.
2398 * The condition can be distinguished by testing whether
2399 * SIGNAL_STOP_STOPPED is already set. Don't generate
2400 * group_exit_code in such case.
2402 * This is not necessary for SIGNAL_STOP_CONTINUED because
2403 * an intervening stop signal is required to cause two
2404 * continued events regardless of ptrace.
2406 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2407 sig->group_exit_code = signr;
2409 sig->group_stop_count = 0;
2411 if (task_set_jobctl_pending(current, signr | gstop))
2412 sig->group_stop_count++;
2415 while_each_thread(current, t) {
2417 * Setting state to TASK_STOPPED for a group
2418 * stop is always done with the siglock held,
2419 * so this check has no races.
2421 if (!task_is_stopped(t) &&
2422 task_set_jobctl_pending(t, signr | gstop)) {
2423 sig->group_stop_count++;
2424 if (likely(!(t->ptrace & PT_SEIZED)))
2425 signal_wake_up(t, 0);
2427 ptrace_trap_notify(t);
2432 if (likely(!current->ptrace)) {
2436 * If there are no other threads in the group, or if there
2437 * is a group stop in progress and we are the last to stop,
2438 * report to the parent.
2440 if (task_participate_group_stop(current))
2441 notify = CLD_STOPPED;
2443 set_special_state(TASK_STOPPED);
2444 spin_unlock_irq(¤t->sighand->siglock);
2447 * Notify the parent of the group stop completion. Because
2448 * we're not holding either the siglock or tasklist_lock
2449 * here, ptracer may attach inbetween; however, this is for
2450 * group stop and should always be delivered to the real
2451 * parent of the group leader. The new ptracer will get
2452 * its notification when this task transitions into
2456 read_lock(&tasklist_lock);
2457 do_notify_parent_cldstop(current, false, notify);
2458 read_unlock(&tasklist_lock);
2461 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2462 cgroup_enter_frozen();
2463 freezable_schedule();
2467 * While ptraced, group stop is handled by STOP trap.
2468 * Schedule it and let the caller deal with it.
2470 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2476 * do_jobctl_trap - take care of ptrace jobctl traps
2478 * When PT_SEIZED, it's used for both group stop and explicit
2479 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2480 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2481 * the stop signal; otherwise, %SIGTRAP.
2483 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2484 * number as exit_code and no siginfo.
2487 * Must be called with @current->sighand->siglock held, which may be
2488 * released and re-acquired before returning with intervening sleep.
2490 static void do_jobctl_trap(void)
2492 struct signal_struct *signal = current->signal;
2493 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2495 if (current->ptrace & PT_SEIZED) {
2496 if (!signal->group_stop_count &&
2497 !(signal->flags & SIGNAL_STOP_STOPPED))
2499 WARN_ON_ONCE(!signr);
2500 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2503 WARN_ON_ONCE(!signr);
2504 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2505 current->exit_code = 0;
2510 * do_freezer_trap - handle the freezer jobctl trap
2512 * Puts the task into frozen state, if only the task is not about to quit.
2513 * In this case it drops JOBCTL_TRAP_FREEZE.
2516 * Must be called with @current->sighand->siglock held,
2517 * which is always released before returning.
2519 static void do_freezer_trap(void)
2520 __releases(¤t->sighand->siglock)
2523 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2524 * let's make another loop to give it a chance to be handled.
2525 * In any case, we'll return back.
2527 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2528 JOBCTL_TRAP_FREEZE) {
2529 spin_unlock_irq(¤t->sighand->siglock);
2534 * Now we're sure that there is no pending fatal signal and no
2535 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2536 * immediately (if there is a non-fatal signal pending), and
2537 * put the task into sleep.
2539 __set_current_state(TASK_INTERRUPTIBLE);
2540 clear_thread_flag(TIF_SIGPENDING);
2541 spin_unlock_irq(¤t->sighand->siglock);
2542 cgroup_enter_frozen();
2543 freezable_schedule();
2546 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2549 * We do not check sig_kernel_stop(signr) but set this marker
2550 * unconditionally because we do not know whether debugger will
2551 * change signr. This flag has no meaning unless we are going
2552 * to stop after return from ptrace_stop(). In this case it will
2553 * be checked in do_signal_stop(), we should only stop if it was
2554 * not cleared by SIGCONT while we were sleeping. See also the
2555 * comment in dequeue_signal().
2557 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2558 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2560 /* We're back. Did the debugger cancel the sig? */
2561 signr = current->exit_code;
2565 current->exit_code = 0;
2568 * Update the siginfo structure if the signal has
2569 * changed. If the debugger wanted something
2570 * specific in the siginfo structure then it should
2571 * have updated *info via PTRACE_SETSIGINFO.
2573 if (signr != info->si_signo) {
2574 clear_siginfo(info);
2575 info->si_signo = signr;
2577 info->si_code = SI_USER;
2579 info->si_pid = task_pid_vnr(current->parent);
2580 info->si_uid = from_kuid_munged(current_user_ns(),
2581 task_uid(current->parent));
2585 /* If the (new) signal is now blocked, requeue it. */
2586 if (sigismember(¤t->blocked, signr)) {
2587 send_signal(signr, info, current, PIDTYPE_PID);
2594 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2596 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2598 case SIL_FAULT_TRAPNO:
2599 case SIL_FAULT_MCEERR:
2600 case SIL_FAULT_BNDERR:
2601 case SIL_FAULT_PKUERR:
2602 case SIL_PERF_EVENT:
2603 ksig->info.si_addr = arch_untagged_si_addr(
2604 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2616 bool get_signal(struct ksignal *ksig)
2618 struct sighand_struct *sighand = current->sighand;
2619 struct signal_struct *signal = current->signal;
2622 if (unlikely(current->task_works))
2626 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2627 * that the arch handlers don't all have to do it. If we get here
2628 * without TIF_SIGPENDING, just exit after running signal work.
2630 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2631 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2632 tracehook_notify_signal();
2633 if (!task_sigpending(current))
2637 if (unlikely(uprobe_deny_signal()))
2641 * Do this once, we can't return to user-mode if freezing() == T.
2642 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2643 * thus do not need another check after return.
2648 spin_lock_irq(&sighand->siglock);
2651 * Every stopped thread goes here after wakeup. Check to see if
2652 * we should notify the parent, prepare_signal(SIGCONT) encodes
2653 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2655 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2658 if (signal->flags & SIGNAL_CLD_CONTINUED)
2659 why = CLD_CONTINUED;
2663 signal->flags &= ~SIGNAL_CLD_MASK;
2665 spin_unlock_irq(&sighand->siglock);
2668 * Notify the parent that we're continuing. This event is
2669 * always per-process and doesn't make whole lot of sense
2670 * for ptracers, who shouldn't consume the state via
2671 * wait(2) either, but, for backward compatibility, notify
2672 * the ptracer of the group leader too unless it's gonna be
2675 read_lock(&tasklist_lock);
2676 do_notify_parent_cldstop(current, false, why);
2678 if (ptrace_reparented(current->group_leader))
2679 do_notify_parent_cldstop(current->group_leader,
2681 read_unlock(&tasklist_lock);
2686 /* Has this task already been marked for death? */
2687 if (signal_group_exit(signal)) {
2688 ksig->info.si_signo = signr = SIGKILL;
2689 sigdelset(¤t->pending.signal, SIGKILL);
2690 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2691 &sighand->action[SIGKILL - 1]);
2692 recalc_sigpending();
2697 struct k_sigaction *ka;
2699 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2703 if (unlikely(current->jobctl &
2704 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2705 if (current->jobctl & JOBCTL_TRAP_MASK) {
2707 spin_unlock_irq(&sighand->siglock);
2708 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2715 * If the task is leaving the frozen state, let's update
2716 * cgroup counters and reset the frozen bit.
2718 if (unlikely(cgroup_task_frozen(current))) {
2719 spin_unlock_irq(&sighand->siglock);
2720 cgroup_leave_frozen(false);
2725 * Signals generated by the execution of an instruction
2726 * need to be delivered before any other pending signals
2727 * so that the instruction pointer in the signal stack
2728 * frame points to the faulting instruction.
2730 signr = dequeue_synchronous_signal(&ksig->info);
2732 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2735 break; /* will return 0 */
2737 if (unlikely(current->ptrace) && signr != SIGKILL) {
2738 signr = ptrace_signal(signr, &ksig->info);
2743 ka = &sighand->action[signr-1];
2745 /* Trace actually delivered signals. */
2746 trace_signal_deliver(signr, &ksig->info, ka);
2748 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2750 if (ka->sa.sa_handler != SIG_DFL) {
2751 /* Run the handler. */
2754 if (ka->sa.sa_flags & SA_ONESHOT)
2755 ka->sa.sa_handler = SIG_DFL;
2757 break; /* will return non-zero "signr" value */
2761 * Now we are doing the default action for this signal.
2763 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2767 * Global init gets no signals it doesn't want.
2768 * Container-init gets no signals it doesn't want from same
2771 * Note that if global/container-init sees a sig_kernel_only()
2772 * signal here, the signal must have been generated internally
2773 * or must have come from an ancestor namespace. In either
2774 * case, the signal cannot be dropped.
2776 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2777 !sig_kernel_only(signr))
2780 if (sig_kernel_stop(signr)) {
2782 * The default action is to stop all threads in
2783 * the thread group. The job control signals
2784 * do nothing in an orphaned pgrp, but SIGSTOP
2785 * always works. Note that siglock needs to be
2786 * dropped during the call to is_orphaned_pgrp()
2787 * because of lock ordering with tasklist_lock.
2788 * This allows an intervening SIGCONT to be posted.
2789 * We need to check for that and bail out if necessary.
2791 if (signr != SIGSTOP) {
2792 spin_unlock_irq(&sighand->siglock);
2794 /* signals can be posted during this window */
2796 if (is_current_pgrp_orphaned())
2799 spin_lock_irq(&sighand->siglock);
2802 if (likely(do_signal_stop(ksig->info.si_signo))) {
2803 /* It released the siglock. */
2808 * We didn't actually stop, due to a race
2809 * with SIGCONT or something like that.
2815 spin_unlock_irq(&sighand->siglock);
2816 if (unlikely(cgroup_task_frozen(current)))
2817 cgroup_leave_frozen(true);
2820 * Anything else is fatal, maybe with a core dump.
2822 current->flags |= PF_SIGNALED;
2824 if (sig_kernel_coredump(signr)) {
2825 if (print_fatal_signals)
2826 print_fatal_signal(ksig->info.si_signo);
2827 proc_coredump_connector(current);
2829 * If it was able to dump core, this kills all
2830 * other threads in the group and synchronizes with
2831 * their demise. If we lost the race with another
2832 * thread getting here, it set group_exit_code
2833 * first and our do_group_exit call below will use
2834 * that value and ignore the one we pass it.
2836 do_coredump(&ksig->info);
2840 * PF_IO_WORKER threads will catch and exit on fatal signals
2841 * themselves. They have cleanup that must be performed, so
2842 * we cannot call do_exit() on their behalf.
2844 if (current->flags & PF_IO_WORKER)
2848 * Death signals, no core dump.
2850 do_group_exit(ksig->info.si_signo);
2853 spin_unlock_irq(&sighand->siglock);
2857 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2858 hide_si_addr_tag_bits(ksig);
2860 return ksig->sig > 0;
2864 * signal_delivered -
2865 * @ksig: kernel signal struct
2866 * @stepping: nonzero if debugger single-step or block-step in use
2868 * This function should be called when a signal has successfully been
2869 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2870 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2871 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2873 static void signal_delivered(struct ksignal *ksig, int stepping)
2877 /* A signal was successfully delivered, and the
2878 saved sigmask was stored on the signal frame,
2879 and will be restored by sigreturn. So we can
2880 simply clear the restore sigmask flag. */
2881 clear_restore_sigmask();
2883 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2884 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2885 sigaddset(&blocked, ksig->sig);
2886 set_current_blocked(&blocked);
2887 tracehook_signal_handler(stepping);
2890 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2893 force_sigsegv(ksig->sig);
2895 signal_delivered(ksig, stepping);
2899 * It could be that complete_signal() picked us to notify about the
2900 * group-wide signal. Other threads should be notified now to take
2901 * the shared signals in @which since we will not.
2903 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2906 struct task_struct *t;
2908 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2909 if (sigisemptyset(&retarget))
2913 while_each_thread(tsk, t) {
2914 if (t->flags & PF_EXITING)
2917 if (!has_pending_signals(&retarget, &t->blocked))
2919 /* Remove the signals this thread can handle. */
2920 sigandsets(&retarget, &retarget, &t->blocked);
2922 if (!task_sigpending(t))
2923 signal_wake_up(t, 0);
2925 if (sigisemptyset(&retarget))
2930 void exit_signals(struct task_struct *tsk)
2936 * @tsk is about to have PF_EXITING set - lock out users which
2937 * expect stable threadgroup.
2939 cgroup_threadgroup_change_begin(tsk);
2941 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2942 tsk->flags |= PF_EXITING;
2943 cgroup_threadgroup_change_end(tsk);
2947 spin_lock_irq(&tsk->sighand->siglock);
2949 * From now this task is not visible for group-wide signals,
2950 * see wants_signal(), do_signal_stop().
2952 tsk->flags |= PF_EXITING;
2954 cgroup_threadgroup_change_end(tsk);
2956 if (!task_sigpending(tsk))
2959 unblocked = tsk->blocked;
2960 signotset(&unblocked);
2961 retarget_shared_pending(tsk, &unblocked);
2963 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2964 task_participate_group_stop(tsk))
2965 group_stop = CLD_STOPPED;
2967 spin_unlock_irq(&tsk->sighand->siglock);
2970 * If group stop has completed, deliver the notification. This
2971 * should always go to the real parent of the group leader.
2973 if (unlikely(group_stop)) {
2974 read_lock(&tasklist_lock);
2975 do_notify_parent_cldstop(tsk, false, group_stop);
2976 read_unlock(&tasklist_lock);
2981 * System call entry points.
2985 * sys_restart_syscall - restart a system call
2987 SYSCALL_DEFINE0(restart_syscall)
2989 struct restart_block *restart = ¤t->restart_block;
2990 return restart->fn(restart);
2993 long do_no_restart_syscall(struct restart_block *param)
2998 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3000 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3001 sigset_t newblocked;
3002 /* A set of now blocked but previously unblocked signals. */
3003 sigandnsets(&newblocked, newset, ¤t->blocked);
3004 retarget_shared_pending(tsk, &newblocked);
3006 tsk->blocked = *newset;
3007 recalc_sigpending();
3011 * set_current_blocked - change current->blocked mask
3014 * It is wrong to change ->blocked directly, this helper should be used
3015 * to ensure the process can't miss a shared signal we are going to block.
3017 void set_current_blocked(sigset_t *newset)
3019 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3020 __set_current_blocked(newset);
3023 void __set_current_blocked(const sigset_t *newset)
3025 struct task_struct *tsk = current;
3028 * In case the signal mask hasn't changed, there is nothing we need
3029 * to do. The current->blocked shouldn't be modified by other task.
3031 if (sigequalsets(&tsk->blocked, newset))
3034 spin_lock_irq(&tsk->sighand->siglock);
3035 __set_task_blocked(tsk, newset);
3036 spin_unlock_irq(&tsk->sighand->siglock);
3040 * This is also useful for kernel threads that want to temporarily
3041 * (or permanently) block certain signals.
3043 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3044 * interface happily blocks "unblockable" signals like SIGKILL
3047 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3049 struct task_struct *tsk = current;
3052 /* Lockless, only current can change ->blocked, never from irq */
3054 *oldset = tsk->blocked;
3058 sigorsets(&newset, &tsk->blocked, set);
3061 sigandnsets(&newset, &tsk->blocked, set);
3070 __set_current_blocked(&newset);
3073 EXPORT_SYMBOL(sigprocmask);
3076 * The api helps set app-provided sigmasks.
3078 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3079 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3081 * Note that it does set_restore_sigmask() in advance, so it must be always
3082 * paired with restore_saved_sigmask_unless() before return from syscall.
3084 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3090 if (sigsetsize != sizeof(sigset_t))
3092 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3095 set_restore_sigmask();
3096 current->saved_sigmask = current->blocked;
3097 set_current_blocked(&kmask);
3102 #ifdef CONFIG_COMPAT
3103 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3110 if (sigsetsize != sizeof(compat_sigset_t))
3112 if (get_compat_sigset(&kmask, umask))
3115 set_restore_sigmask();
3116 current->saved_sigmask = current->blocked;
3117 set_current_blocked(&kmask);
3124 * sys_rt_sigprocmask - change the list of currently blocked signals
3125 * @how: whether to add, remove, or set signals
3126 * @nset: stores pending signals
3127 * @oset: previous value of signal mask if non-null
3128 * @sigsetsize: size of sigset_t type
3130 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3131 sigset_t __user *, oset, size_t, sigsetsize)
3133 sigset_t old_set, new_set;
3136 /* XXX: Don't preclude handling different sized sigset_t's. */
3137 if (sigsetsize != sizeof(sigset_t))
3140 old_set = current->blocked;
3143 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3145 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3147 error = sigprocmask(how, &new_set, NULL);
3153 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3160 #ifdef CONFIG_COMPAT
3161 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3162 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3164 sigset_t old_set = current->blocked;
3166 /* XXX: Don't preclude handling different sized sigset_t's. */
3167 if (sigsetsize != sizeof(sigset_t))
3173 if (get_compat_sigset(&new_set, nset))
3175 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3177 error = sigprocmask(how, &new_set, NULL);
3181 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3185 static void do_sigpending(sigset_t *set)
3187 spin_lock_irq(¤t->sighand->siglock);
3188 sigorsets(set, ¤t->pending.signal,
3189 ¤t->signal->shared_pending.signal);
3190 spin_unlock_irq(¤t->sighand->siglock);
3192 /* Outside the lock because only this thread touches it. */
3193 sigandsets(set, ¤t->blocked, set);
3197 * sys_rt_sigpending - examine a pending signal that has been raised
3199 * @uset: stores pending signals
3200 * @sigsetsize: size of sigset_t type or larger
3202 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3206 if (sigsetsize > sizeof(*uset))
3209 do_sigpending(&set);
3211 if (copy_to_user(uset, &set, sigsetsize))
3217 #ifdef CONFIG_COMPAT
3218 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3219 compat_size_t, sigsetsize)
3223 if (sigsetsize > sizeof(*uset))
3226 do_sigpending(&set);
3228 return put_compat_sigset(uset, &set, sigsetsize);
3232 static const struct {
3233 unsigned char limit, layout;
3235 [SIGILL] = { NSIGILL, SIL_FAULT },
3236 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3237 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3238 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3239 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3241 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3243 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3244 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3245 [SIGSYS] = { NSIGSYS, SIL_SYS },
3248 static bool known_siginfo_layout(unsigned sig, int si_code)
3250 if (si_code == SI_KERNEL)
3252 else if ((si_code > SI_USER)) {
3253 if (sig_specific_sicodes(sig)) {
3254 if (si_code <= sig_sicodes[sig].limit)
3257 else if (si_code <= NSIGPOLL)
3260 else if (si_code >= SI_DETHREAD)
3262 else if (si_code == SI_ASYNCNL)
3267 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3269 enum siginfo_layout layout = SIL_KILL;
3270 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3271 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3272 (si_code <= sig_sicodes[sig].limit)) {
3273 layout = sig_sicodes[sig].layout;
3274 /* Handle the exceptions */
3275 if ((sig == SIGBUS) &&
3276 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3277 layout = SIL_FAULT_MCEERR;
3278 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3279 layout = SIL_FAULT_BNDERR;
3281 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3282 layout = SIL_FAULT_PKUERR;
3284 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3285 layout = SIL_PERF_EVENT;
3286 #ifdef __ARCH_SI_TRAPNO
3287 else if (layout == SIL_FAULT)
3288 layout = SIL_FAULT_TRAPNO;
3291 else if (si_code <= NSIGPOLL)
3294 if (si_code == SI_TIMER)
3296 else if (si_code == SI_SIGIO)
3298 else if (si_code < 0)
3304 static inline char __user *si_expansion(const siginfo_t __user *info)
3306 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3309 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3311 char __user *expansion = si_expansion(to);
3312 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3314 if (clear_user(expansion, SI_EXPANSION_SIZE))
3319 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3320 const siginfo_t __user *from)
3322 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3323 char __user *expansion = si_expansion(from);
3324 char buf[SI_EXPANSION_SIZE];
3327 * An unknown si_code might need more than
3328 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3329 * extra bytes are 0. This guarantees copy_siginfo_to_user
3330 * will return this data to userspace exactly.
3332 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3334 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3342 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3343 const siginfo_t __user *from)
3345 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3347 to->si_signo = signo;
3348 return post_copy_siginfo_from_user(to, from);
3351 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3353 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3355 return post_copy_siginfo_from_user(to, from);
3358 #ifdef CONFIG_COMPAT
3360 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3361 * @to: compat siginfo destination
3362 * @from: kernel siginfo source
3364 * Note: This function does not work properly for the SIGCHLD on x32, but
3365 * fortunately it doesn't have to. The only valid callers for this function are
3366 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3367 * The latter does not care because SIGCHLD will never cause a coredump.
3369 void copy_siginfo_to_external32(struct compat_siginfo *to,
3370 const struct kernel_siginfo *from)
3372 memset(to, 0, sizeof(*to));
3374 to->si_signo = from->si_signo;
3375 to->si_errno = from->si_errno;
3376 to->si_code = from->si_code;
3377 switch(siginfo_layout(from->si_signo, from->si_code)) {
3379 to->si_pid = from->si_pid;
3380 to->si_uid = from->si_uid;
3383 to->si_tid = from->si_tid;
3384 to->si_overrun = from->si_overrun;
3385 to->si_int = from->si_int;
3388 to->si_band = from->si_band;
3389 to->si_fd = from->si_fd;
3392 to->si_addr = ptr_to_compat(from->si_addr);
3394 case SIL_FAULT_TRAPNO:
3395 to->si_addr = ptr_to_compat(from->si_addr);
3396 to->si_trapno = from->si_trapno;
3398 case SIL_FAULT_MCEERR:
3399 to->si_addr = ptr_to_compat(from->si_addr);
3400 to->si_addr_lsb = from->si_addr_lsb;
3402 case SIL_FAULT_BNDERR:
3403 to->si_addr = ptr_to_compat(from->si_addr);
3404 to->si_lower = ptr_to_compat(from->si_lower);
3405 to->si_upper = ptr_to_compat(from->si_upper);
3407 case SIL_FAULT_PKUERR:
3408 to->si_addr = ptr_to_compat(from->si_addr);
3409 to->si_pkey = from->si_pkey;
3411 case SIL_PERF_EVENT:
3412 to->si_addr = ptr_to_compat(from->si_addr);
3413 to->si_perf_data = from->si_perf_data;
3414 to->si_perf_type = from->si_perf_type;
3417 to->si_pid = from->si_pid;
3418 to->si_uid = from->si_uid;
3419 to->si_status = from->si_status;
3420 to->si_utime = from->si_utime;
3421 to->si_stime = from->si_stime;
3424 to->si_pid = from->si_pid;
3425 to->si_uid = from->si_uid;
3426 to->si_int = from->si_int;
3429 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3430 to->si_syscall = from->si_syscall;
3431 to->si_arch = from->si_arch;
3436 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3437 const struct kernel_siginfo *from)
3439 struct compat_siginfo new;
3441 copy_siginfo_to_external32(&new, from);
3442 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3447 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3448 const struct compat_siginfo *from)
3451 to->si_signo = from->si_signo;
3452 to->si_errno = from->si_errno;
3453 to->si_code = from->si_code;
3454 switch(siginfo_layout(from->si_signo, from->si_code)) {
3456 to->si_pid = from->si_pid;
3457 to->si_uid = from->si_uid;
3460 to->si_tid = from->si_tid;
3461 to->si_overrun = from->si_overrun;
3462 to->si_int = from->si_int;
3465 to->si_band = from->si_band;
3466 to->si_fd = from->si_fd;
3469 to->si_addr = compat_ptr(from->si_addr);
3471 case SIL_FAULT_TRAPNO:
3472 to->si_addr = compat_ptr(from->si_addr);
3473 to->si_trapno = from->si_trapno;
3475 case SIL_FAULT_MCEERR:
3476 to->si_addr = compat_ptr(from->si_addr);
3477 to->si_addr_lsb = from->si_addr_lsb;
3479 case SIL_FAULT_BNDERR:
3480 to->si_addr = compat_ptr(from->si_addr);
3481 to->si_lower = compat_ptr(from->si_lower);
3482 to->si_upper = compat_ptr(from->si_upper);
3484 case SIL_FAULT_PKUERR:
3485 to->si_addr = compat_ptr(from->si_addr);
3486 to->si_pkey = from->si_pkey;
3488 case SIL_PERF_EVENT:
3489 to->si_addr = compat_ptr(from->si_addr);
3490 to->si_perf_data = from->si_perf_data;
3491 to->si_perf_type = from->si_perf_type;
3494 to->si_pid = from->si_pid;
3495 to->si_uid = from->si_uid;
3496 to->si_status = from->si_status;
3497 #ifdef CONFIG_X86_X32_ABI
3498 if (in_x32_syscall()) {
3499 to->si_utime = from->_sifields._sigchld_x32._utime;
3500 to->si_stime = from->_sifields._sigchld_x32._stime;
3504 to->si_utime = from->si_utime;
3505 to->si_stime = from->si_stime;
3509 to->si_pid = from->si_pid;
3510 to->si_uid = from->si_uid;
3511 to->si_int = from->si_int;
3514 to->si_call_addr = compat_ptr(from->si_call_addr);
3515 to->si_syscall = from->si_syscall;
3516 to->si_arch = from->si_arch;
3522 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3523 const struct compat_siginfo __user *ufrom)
3525 struct compat_siginfo from;
3527 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3530 from.si_signo = signo;
3531 return post_copy_siginfo_from_user32(to, &from);
3534 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3535 const struct compat_siginfo __user *ufrom)
3537 struct compat_siginfo from;
3539 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3542 return post_copy_siginfo_from_user32(to, &from);
3544 #endif /* CONFIG_COMPAT */
3547 * do_sigtimedwait - wait for queued signals specified in @which
3548 * @which: queued signals to wait for
3549 * @info: if non-null, the signal's siginfo is returned here
3550 * @ts: upper bound on process time suspension
3552 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3553 const struct timespec64 *ts)
3555 ktime_t *to = NULL, timeout = KTIME_MAX;
3556 struct task_struct *tsk = current;
3557 sigset_t mask = *which;
3561 if (!timespec64_valid(ts))
3563 timeout = timespec64_to_ktime(*ts);
3568 * Invert the set of allowed signals to get those we want to block.
3570 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3573 spin_lock_irq(&tsk->sighand->siglock);
3574 sig = dequeue_signal(tsk, &mask, info);
3575 if (!sig && timeout) {
3577 * None ready, temporarily unblock those we're interested
3578 * while we are sleeping in so that we'll be awakened when
3579 * they arrive. Unblocking is always fine, we can avoid
3580 * set_current_blocked().
3582 tsk->real_blocked = tsk->blocked;
3583 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3584 recalc_sigpending();
3585 spin_unlock_irq(&tsk->sighand->siglock);
3587 __set_current_state(TASK_INTERRUPTIBLE);
3588 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3590 spin_lock_irq(&tsk->sighand->siglock);
3591 __set_task_blocked(tsk, &tsk->real_blocked);
3592 sigemptyset(&tsk->real_blocked);
3593 sig = dequeue_signal(tsk, &mask, info);
3595 spin_unlock_irq(&tsk->sighand->siglock);
3599 return ret ? -EINTR : -EAGAIN;
3603 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3605 * @uthese: queued signals to wait for
3606 * @uinfo: if non-null, the signal's siginfo is returned here
3607 * @uts: upper bound on process time suspension
3608 * @sigsetsize: size of sigset_t type
3610 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3611 siginfo_t __user *, uinfo,
3612 const struct __kernel_timespec __user *, uts,
3616 struct timespec64 ts;
3617 kernel_siginfo_t info;
3620 /* XXX: Don't preclude handling different sized sigset_t's. */
3621 if (sigsetsize != sizeof(sigset_t))
3624 if (copy_from_user(&these, uthese, sizeof(these)))
3628 if (get_timespec64(&ts, uts))
3632 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3634 if (ret > 0 && uinfo) {
3635 if (copy_siginfo_to_user(uinfo, &info))
3642 #ifdef CONFIG_COMPAT_32BIT_TIME
3643 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3644 siginfo_t __user *, uinfo,
3645 const struct old_timespec32 __user *, uts,
3649 struct timespec64 ts;
3650 kernel_siginfo_t info;
3653 if (sigsetsize != sizeof(sigset_t))
3656 if (copy_from_user(&these, uthese, sizeof(these)))
3660 if (get_old_timespec32(&ts, uts))
3664 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3666 if (ret > 0 && uinfo) {
3667 if (copy_siginfo_to_user(uinfo, &info))
3675 #ifdef CONFIG_COMPAT
3676 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3677 struct compat_siginfo __user *, uinfo,
3678 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3681 struct timespec64 t;
3682 kernel_siginfo_t info;
3685 if (sigsetsize != sizeof(sigset_t))
3688 if (get_compat_sigset(&s, uthese))
3692 if (get_timespec64(&t, uts))
3696 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3698 if (ret > 0 && uinfo) {
3699 if (copy_siginfo_to_user32(uinfo, &info))
3706 #ifdef CONFIG_COMPAT_32BIT_TIME
3707 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3708 struct compat_siginfo __user *, uinfo,
3709 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3712 struct timespec64 t;
3713 kernel_siginfo_t info;
3716 if (sigsetsize != sizeof(sigset_t))
3719 if (get_compat_sigset(&s, uthese))
3723 if (get_old_timespec32(&t, uts))
3727 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3729 if (ret > 0 && uinfo) {
3730 if (copy_siginfo_to_user32(uinfo, &info))
3739 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3741 clear_siginfo(info);
3742 info->si_signo = sig;
3744 info->si_code = SI_USER;
3745 info->si_pid = task_tgid_vnr(current);
3746 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3750 * sys_kill - send a signal to a process
3751 * @pid: the PID of the process
3752 * @sig: signal to be sent
3754 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3756 struct kernel_siginfo info;
3758 prepare_kill_siginfo(sig, &info);
3760 return kill_something_info(sig, &info, pid);
3764 * Verify that the signaler and signalee either are in the same pid namespace
3765 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3768 static bool access_pidfd_pidns(struct pid *pid)
3770 struct pid_namespace *active = task_active_pid_ns(current);
3771 struct pid_namespace *p = ns_of_pid(pid);
3784 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3785 siginfo_t __user *info)
3787 #ifdef CONFIG_COMPAT
3789 * Avoid hooking up compat syscalls and instead handle necessary
3790 * conversions here. Note, this is a stop-gap measure and should not be
3791 * considered a generic solution.
3793 if (in_compat_syscall())
3794 return copy_siginfo_from_user32(
3795 kinfo, (struct compat_siginfo __user *)info);
3797 return copy_siginfo_from_user(kinfo, info);
3800 static struct pid *pidfd_to_pid(const struct file *file)
3804 pid = pidfd_pid(file);
3808 return tgid_pidfd_to_pid(file);
3812 * sys_pidfd_send_signal - Signal a process through a pidfd
3813 * @pidfd: file descriptor of the process
3814 * @sig: signal to send
3815 * @info: signal info
3816 * @flags: future flags
3818 * The syscall currently only signals via PIDTYPE_PID which covers
3819 * kill(<positive-pid>, <signal>. It does not signal threads or process
3821 * In order to extend the syscall to threads and process groups the @flags
3822 * argument should be used. In essence, the @flags argument will determine
3823 * what is signaled and not the file descriptor itself. Put in other words,
3824 * grouping is a property of the flags argument not a property of the file
3827 * Return: 0 on success, negative errno on failure
3829 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3830 siginfo_t __user *, info, unsigned int, flags)
3835 kernel_siginfo_t kinfo;
3837 /* Enforce flags be set to 0 until we add an extension. */
3845 /* Is this a pidfd? */
3846 pid = pidfd_to_pid(f.file);
3853 if (!access_pidfd_pidns(pid))
3857 ret = copy_siginfo_from_user_any(&kinfo, info);
3862 if (unlikely(sig != kinfo.si_signo))
3865 /* Only allow sending arbitrary signals to yourself. */
3867 if ((task_pid(current) != pid) &&
3868 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3871 prepare_kill_siginfo(sig, &kinfo);
3874 ret = kill_pid_info(sig, &kinfo, pid);
3882 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3884 struct task_struct *p;
3888 p = find_task_by_vpid(pid);
3889 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3890 error = check_kill_permission(sig, info, p);
3892 * The null signal is a permissions and process existence
3893 * probe. No signal is actually delivered.
3895 if (!error && sig) {
3896 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3898 * If lock_task_sighand() failed we pretend the task
3899 * dies after receiving the signal. The window is tiny,
3900 * and the signal is private anyway.
3902 if (unlikely(error == -ESRCH))
3911 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3913 struct kernel_siginfo info;
3915 clear_siginfo(&info);
3916 info.si_signo = sig;
3918 info.si_code = SI_TKILL;
3919 info.si_pid = task_tgid_vnr(current);
3920 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3922 return do_send_specific(tgid, pid, sig, &info);
3926 * sys_tgkill - send signal to one specific thread
3927 * @tgid: the thread group ID of the thread
3928 * @pid: the PID of the thread
3929 * @sig: signal to be sent
3931 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3932 * exists but it's not belonging to the target process anymore. This
3933 * method solves the problem of threads exiting and PIDs getting reused.
3935 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3937 /* This is only valid for single tasks */
3938 if (pid <= 0 || tgid <= 0)
3941 return do_tkill(tgid, pid, sig);
3945 * sys_tkill - send signal to one specific task
3946 * @pid: the PID of the task
3947 * @sig: signal to be sent
3949 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3951 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3953 /* This is only valid for single tasks */
3957 return do_tkill(0, pid, sig);
3960 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3962 /* Not even root can pretend to send signals from the kernel.
3963 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3965 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3966 (task_pid_vnr(current) != pid))
3969 /* POSIX.1b doesn't mention process groups. */
3970 return kill_proc_info(sig, info, pid);
3974 * sys_rt_sigqueueinfo - send signal information to a signal
3975 * @pid: the PID of the thread
3976 * @sig: signal to be sent
3977 * @uinfo: signal info to be sent
3979 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3980 siginfo_t __user *, uinfo)
3982 kernel_siginfo_t info;
3983 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3986 return do_rt_sigqueueinfo(pid, sig, &info);
3989 #ifdef CONFIG_COMPAT
3990 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3993 struct compat_siginfo __user *, uinfo)
3995 kernel_siginfo_t info;
3996 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3999 return do_rt_sigqueueinfo(pid, sig, &info);
4003 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4005 /* This is only valid for single tasks */
4006 if (pid <= 0 || tgid <= 0)
4009 /* Not even root can pretend to send signals from the kernel.
4010 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4012 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4013 (task_pid_vnr(current) != pid))
4016 return do_send_specific(tgid, pid, sig, info);
4019 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4020 siginfo_t __user *, uinfo)
4022 kernel_siginfo_t info;
4023 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4026 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4029 #ifdef CONFIG_COMPAT
4030 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4034 struct compat_siginfo __user *, uinfo)
4036 kernel_siginfo_t info;
4037 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4040 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4045 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4047 void kernel_sigaction(int sig, __sighandler_t action)
4049 spin_lock_irq(¤t->sighand->siglock);
4050 current->sighand->action[sig - 1].sa.sa_handler = action;
4051 if (action == SIG_IGN) {
4055 sigaddset(&mask, sig);
4057 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4058 flush_sigqueue_mask(&mask, ¤t->pending);
4059 recalc_sigpending();
4061 spin_unlock_irq(¤t->sighand->siglock);
4063 EXPORT_SYMBOL(kernel_sigaction);
4065 void __weak sigaction_compat_abi(struct k_sigaction *act,
4066 struct k_sigaction *oact)
4070 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4072 struct task_struct *p = current, *t;
4073 struct k_sigaction *k;
4076 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4079 k = &p->sighand->action[sig-1];
4081 spin_lock_irq(&p->sighand->siglock);
4086 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4087 * e.g. by having an architecture use the bit in their uapi.
4089 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4092 * Clear unknown flag bits in order to allow userspace to detect missing
4093 * support for flag bits and to allow the kernel to use non-uapi bits
4097 act->sa.sa_flags &= UAPI_SA_FLAGS;
4099 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4101 sigaction_compat_abi(act, oact);
4104 sigdelsetmask(&act->sa.sa_mask,
4105 sigmask(SIGKILL) | sigmask(SIGSTOP));
4109 * "Setting a signal action to SIG_IGN for a signal that is
4110 * pending shall cause the pending signal to be discarded,
4111 * whether or not it is blocked."
4113 * "Setting a signal action to SIG_DFL for a signal that is
4114 * pending and whose default action is to ignore the signal
4115 * (for example, SIGCHLD), shall cause the pending signal to
4116 * be discarded, whether or not it is blocked"
4118 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4120 sigaddset(&mask, sig);
4121 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4122 for_each_thread(p, t)
4123 flush_sigqueue_mask(&mask, &t->pending);
4127 spin_unlock_irq(&p->sighand->siglock);
4132 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4135 struct task_struct *t = current;
4138 memset(oss, 0, sizeof(stack_t));
4139 oss->ss_sp = (void __user *) t->sas_ss_sp;
4140 oss->ss_size = t->sas_ss_size;
4141 oss->ss_flags = sas_ss_flags(sp) |
4142 (current->sas_ss_flags & SS_FLAG_BITS);
4146 void __user *ss_sp = ss->ss_sp;
4147 size_t ss_size = ss->ss_size;
4148 unsigned ss_flags = ss->ss_flags;
4151 if (unlikely(on_sig_stack(sp)))
4154 ss_mode = ss_flags & ~SS_FLAG_BITS;
4155 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4159 if (ss_mode == SS_DISABLE) {
4163 if (unlikely(ss_size < min_ss_size))
4167 t->sas_ss_sp = (unsigned long) ss_sp;
4168 t->sas_ss_size = ss_size;
4169 t->sas_ss_flags = ss_flags;
4174 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4178 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4180 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4181 current_user_stack_pointer(),
4183 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4188 int restore_altstack(const stack_t __user *uss)
4191 if (copy_from_user(&new, uss, sizeof(stack_t)))
4193 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4195 /* squash all but EFAULT for now */
4199 int __save_altstack(stack_t __user *uss, unsigned long sp)
4201 struct task_struct *t = current;
4202 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4203 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4204 __put_user(t->sas_ss_size, &uss->ss_size);
4207 if (t->sas_ss_flags & SS_AUTODISARM)
4212 #ifdef CONFIG_COMPAT
4213 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4214 compat_stack_t __user *uoss_ptr)
4220 compat_stack_t uss32;
4221 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4223 uss.ss_sp = compat_ptr(uss32.ss_sp);
4224 uss.ss_flags = uss32.ss_flags;
4225 uss.ss_size = uss32.ss_size;
4227 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4228 compat_user_stack_pointer(),
4229 COMPAT_MINSIGSTKSZ);
4230 if (ret >= 0 && uoss_ptr) {
4232 memset(&old, 0, sizeof(old));
4233 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4234 old.ss_flags = uoss.ss_flags;
4235 old.ss_size = uoss.ss_size;
4236 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4242 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4243 const compat_stack_t __user *, uss_ptr,
4244 compat_stack_t __user *, uoss_ptr)
4246 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4249 int compat_restore_altstack(const compat_stack_t __user *uss)
4251 int err = do_compat_sigaltstack(uss, NULL);
4252 /* squash all but -EFAULT for now */
4253 return err == -EFAULT ? err : 0;
4256 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4259 struct task_struct *t = current;
4260 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4262 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4263 __put_user(t->sas_ss_size, &uss->ss_size);
4266 if (t->sas_ss_flags & SS_AUTODISARM)
4272 #ifdef __ARCH_WANT_SYS_SIGPENDING
4275 * sys_sigpending - examine pending signals
4276 * @uset: where mask of pending signal is returned
4278 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4282 if (sizeof(old_sigset_t) > sizeof(*uset))
4285 do_sigpending(&set);
4287 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4293 #ifdef CONFIG_COMPAT
4294 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4298 do_sigpending(&set);
4300 return put_user(set.sig[0], set32);
4306 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4308 * sys_sigprocmask - examine and change blocked signals
4309 * @how: whether to add, remove, or set signals
4310 * @nset: signals to add or remove (if non-null)
4311 * @oset: previous value of signal mask if non-null
4313 * Some platforms have their own version with special arguments;
4314 * others support only sys_rt_sigprocmask.
4317 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4318 old_sigset_t __user *, oset)
4320 old_sigset_t old_set, new_set;
4321 sigset_t new_blocked;
4323 old_set = current->blocked.sig[0];
4326 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4329 new_blocked = current->blocked;
4333 sigaddsetmask(&new_blocked, new_set);
4336 sigdelsetmask(&new_blocked, new_set);
4339 new_blocked.sig[0] = new_set;
4345 set_current_blocked(&new_blocked);
4349 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4355 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4357 #ifndef CONFIG_ODD_RT_SIGACTION
4359 * sys_rt_sigaction - alter an action taken by a process
4360 * @sig: signal to be sent
4361 * @act: new sigaction
4362 * @oact: used to save the previous sigaction
4363 * @sigsetsize: size of sigset_t type
4365 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4366 const struct sigaction __user *, act,
4367 struct sigaction __user *, oact,
4370 struct k_sigaction new_sa, old_sa;
4373 /* XXX: Don't preclude handling different sized sigset_t's. */
4374 if (sigsetsize != sizeof(sigset_t))
4377 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4380 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4384 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4389 #ifdef CONFIG_COMPAT
4390 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4391 const struct compat_sigaction __user *, act,
4392 struct compat_sigaction __user *, oact,
4393 compat_size_t, sigsetsize)
4395 struct k_sigaction new_ka, old_ka;
4396 #ifdef __ARCH_HAS_SA_RESTORER
4397 compat_uptr_t restorer;
4401 /* XXX: Don't preclude handling different sized sigset_t's. */
4402 if (sigsetsize != sizeof(compat_sigset_t))
4406 compat_uptr_t handler;
4407 ret = get_user(handler, &act->sa_handler);
4408 new_ka.sa.sa_handler = compat_ptr(handler);
4409 #ifdef __ARCH_HAS_SA_RESTORER
4410 ret |= get_user(restorer, &act->sa_restorer);
4411 new_ka.sa.sa_restorer = compat_ptr(restorer);
4413 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4414 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4419 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4421 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4423 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4424 sizeof(oact->sa_mask));
4425 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4426 #ifdef __ARCH_HAS_SA_RESTORER
4427 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4428 &oact->sa_restorer);
4434 #endif /* !CONFIG_ODD_RT_SIGACTION */
4436 #ifdef CONFIG_OLD_SIGACTION
4437 SYSCALL_DEFINE3(sigaction, int, sig,
4438 const struct old_sigaction __user *, act,
4439 struct old_sigaction __user *, oact)
4441 struct k_sigaction new_ka, old_ka;
4446 if (!access_ok(act, sizeof(*act)) ||
4447 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4448 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4449 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4450 __get_user(mask, &act->sa_mask))
4452 #ifdef __ARCH_HAS_KA_RESTORER
4453 new_ka.ka_restorer = NULL;
4455 siginitset(&new_ka.sa.sa_mask, mask);
4458 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4461 if (!access_ok(oact, sizeof(*oact)) ||
4462 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4463 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4464 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4465 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4472 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4473 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4474 const struct compat_old_sigaction __user *, act,
4475 struct compat_old_sigaction __user *, oact)
4477 struct k_sigaction new_ka, old_ka;
4479 compat_old_sigset_t mask;
4480 compat_uptr_t handler, restorer;
4483 if (!access_ok(act, sizeof(*act)) ||
4484 __get_user(handler, &act->sa_handler) ||
4485 __get_user(restorer, &act->sa_restorer) ||
4486 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4487 __get_user(mask, &act->sa_mask))
4490 #ifdef __ARCH_HAS_KA_RESTORER
4491 new_ka.ka_restorer = NULL;
4493 new_ka.sa.sa_handler = compat_ptr(handler);
4494 new_ka.sa.sa_restorer = compat_ptr(restorer);
4495 siginitset(&new_ka.sa.sa_mask, mask);
4498 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4501 if (!access_ok(oact, sizeof(*oact)) ||
4502 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4503 &oact->sa_handler) ||
4504 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4505 &oact->sa_restorer) ||
4506 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4507 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4514 #ifdef CONFIG_SGETMASK_SYSCALL
4517 * For backwards compatibility. Functionality superseded by sigprocmask.
4519 SYSCALL_DEFINE0(sgetmask)
4522 return current->blocked.sig[0];
4525 SYSCALL_DEFINE1(ssetmask, int, newmask)
4527 int old = current->blocked.sig[0];
4530 siginitset(&newset, newmask);
4531 set_current_blocked(&newset);
4535 #endif /* CONFIG_SGETMASK_SYSCALL */
4537 #ifdef __ARCH_WANT_SYS_SIGNAL
4539 * For backwards compatibility. Functionality superseded by sigaction.
4541 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4543 struct k_sigaction new_sa, old_sa;
4546 new_sa.sa.sa_handler = handler;
4547 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4548 sigemptyset(&new_sa.sa.sa_mask);
4550 ret = do_sigaction(sig, &new_sa, &old_sa);
4552 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4554 #endif /* __ARCH_WANT_SYS_SIGNAL */
4556 #ifdef __ARCH_WANT_SYS_PAUSE
4558 SYSCALL_DEFINE0(pause)
4560 while (!signal_pending(current)) {
4561 __set_current_state(TASK_INTERRUPTIBLE);
4564 return -ERESTARTNOHAND;
4569 static int sigsuspend(sigset_t *set)
4571 current->saved_sigmask = current->blocked;
4572 set_current_blocked(set);
4574 while (!signal_pending(current)) {
4575 __set_current_state(TASK_INTERRUPTIBLE);
4578 set_restore_sigmask();
4579 return -ERESTARTNOHAND;
4583 * sys_rt_sigsuspend - replace the signal mask for a value with the
4584 * @unewset value until a signal is received
4585 * @unewset: new signal mask value
4586 * @sigsetsize: size of sigset_t type
4588 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4592 /* XXX: Don't preclude handling different sized sigset_t's. */
4593 if (sigsetsize != sizeof(sigset_t))
4596 if (copy_from_user(&newset, unewset, sizeof(newset)))
4598 return sigsuspend(&newset);
4601 #ifdef CONFIG_COMPAT
4602 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4606 /* XXX: Don't preclude handling different sized sigset_t's. */
4607 if (sigsetsize != sizeof(sigset_t))
4610 if (get_compat_sigset(&newset, unewset))
4612 return sigsuspend(&newset);
4616 #ifdef CONFIG_OLD_SIGSUSPEND
4617 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4620 siginitset(&blocked, mask);
4621 return sigsuspend(&blocked);
4624 #ifdef CONFIG_OLD_SIGSUSPEND3
4625 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4628 siginitset(&blocked, mask);
4629 return sigsuspend(&blocked);
4633 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4638 static inline void siginfo_buildtime_checks(void)
4640 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4642 /* Verify the offsets in the two siginfos match */
4643 #define CHECK_OFFSET(field) \
4644 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4647 CHECK_OFFSET(si_pid);
4648 CHECK_OFFSET(si_uid);
4651 CHECK_OFFSET(si_tid);
4652 CHECK_OFFSET(si_overrun);
4653 CHECK_OFFSET(si_value);
4656 CHECK_OFFSET(si_pid);
4657 CHECK_OFFSET(si_uid);
4658 CHECK_OFFSET(si_value);
4661 CHECK_OFFSET(si_pid);
4662 CHECK_OFFSET(si_uid);
4663 CHECK_OFFSET(si_status);
4664 CHECK_OFFSET(si_utime);
4665 CHECK_OFFSET(si_stime);
4668 CHECK_OFFSET(si_addr);
4669 CHECK_OFFSET(si_trapno);
4670 CHECK_OFFSET(si_addr_lsb);
4671 CHECK_OFFSET(si_lower);
4672 CHECK_OFFSET(si_upper);
4673 CHECK_OFFSET(si_pkey);
4674 CHECK_OFFSET(si_perf_data);
4675 CHECK_OFFSET(si_perf_type);
4678 CHECK_OFFSET(si_band);
4679 CHECK_OFFSET(si_fd);
4682 CHECK_OFFSET(si_call_addr);
4683 CHECK_OFFSET(si_syscall);
4684 CHECK_OFFSET(si_arch);
4688 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4689 offsetof(struct siginfo, si_addr));
4690 if (sizeof(int) == sizeof(void __user *)) {
4691 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4692 sizeof(void __user *));
4694 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4695 sizeof_field(struct siginfo, si_uid)) !=
4696 sizeof(void __user *));
4697 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4698 offsetof(struct siginfo, si_uid));
4700 #ifdef CONFIG_COMPAT
4701 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4702 offsetof(struct compat_siginfo, si_addr));
4703 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4704 sizeof(compat_uptr_t));
4705 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4706 sizeof_field(struct siginfo, si_pid));
4710 void __init signals_init(void)
4712 siginfo_buildtime_checks();
4714 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4717 #ifdef CONFIG_KGDB_KDB
4718 #include <linux/kdb.h>
4720 * kdb_send_sig - Allows kdb to send signals without exposing
4721 * signal internals. This function checks if the required locks are
4722 * available before calling the main signal code, to avoid kdb
4725 void kdb_send_sig(struct task_struct *t, int sig)
4727 static struct task_struct *kdb_prev_t;
4729 if (!spin_trylock(&t->sighand->siglock)) {
4730 kdb_printf("Can't do kill command now.\n"
4731 "The sigmask lock is held somewhere else in "
4732 "kernel, try again later\n");
4735 new_t = kdb_prev_t != t;
4737 if (t->state != TASK_RUNNING && new_t) {
4738 spin_unlock(&t->sighand->siglock);
4739 kdb_printf("Process is not RUNNING, sending a signal from "
4740 "kdb risks deadlock\n"
4741 "on the run queue locks. "
4742 "The signal has _not_ been sent.\n"
4743 "Reissue the kill command if you want to risk "
4747 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4748 spin_unlock(&t->sighand->siglock);
4750 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4753 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4755 #endif /* CONFIG_KGDB_KDB */