1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/signal.h>
54 #include <asm/param.h>
55 #include <linux/uaccess.h>
56 #include <asm/unistd.h>
57 #include <asm/siginfo.h>
58 #include <asm/cacheflush.h>
59 #include <asm/syscall.h> /* for syscall_get_* */
62 * SLAB caches for signal bits.
65 static struct kmem_cache *sigqueue_cachep;
67 int print_fatal_signals __read_mostly;
69 static void __user *sig_handler(struct task_struct *t, int sig)
71 return t->sighand->action[sig - 1].sa.sa_handler;
74 static inline bool sig_handler_ignored(void __user *handler, int sig)
76 /* Is it explicitly or implicitly ignored? */
77 return handler == SIG_IGN ||
78 (handler == SIG_DFL && sig_kernel_ignore(sig));
81 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 handler = sig_handler(t, sig);
87 /* SIGKILL and SIGSTOP may not be sent to the global init */
88 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
91 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
92 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
95 /* Only allow kernel generated signals to this kthread */
96 if (unlikely((t->flags & PF_KTHREAD) &&
97 (handler == SIG_KTHREAD_KERNEL) && !force))
100 return sig_handler_ignored(handler, sig);
103 static bool sig_ignored(struct task_struct *t, int sig, bool force)
106 * Blocked signals are never ignored, since the
107 * signal handler may change by the time it is
110 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 * Tracers may want to know about even ignored signal unless it
115 * is SIGKILL which can't be reported anyway but can be ignored
116 * by SIGNAL_UNKILLABLE task.
118 if (t->ptrace && sig != SIGKILL)
121 return sig_task_ignored(t, sig, force);
125 * Re-calculate pending state from the set of locally pending
126 * signals, globally pending signals, and blocked signals.
128 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
133 switch (_NSIG_WORDS) {
135 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 ready |= signal->sig[i] &~ blocked->sig[i];
139 case 4: ready = signal->sig[3] &~ blocked->sig[3];
140 ready |= signal->sig[2] &~ blocked->sig[2];
141 ready |= signal->sig[1] &~ blocked->sig[1];
142 ready |= signal->sig[0] &~ blocked->sig[0];
145 case 2: ready = signal->sig[1] &~ blocked->sig[1];
146 ready |= signal->sig[0] &~ blocked->sig[0];
149 case 1: ready = signal->sig[0] &~ blocked->sig[0];
154 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
156 static bool recalc_sigpending_tsk(struct task_struct *t)
158 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
159 PENDING(&t->pending, &t->blocked) ||
160 PENDING(&t->signal->shared_pending, &t->blocked) ||
161 cgroup_task_frozen(t)) {
162 set_tsk_thread_flag(t, TIF_SIGPENDING);
167 * We must never clear the flag in another thread, or in current
168 * when it's possible the current syscall is returning -ERESTART*.
169 * So we don't clear it here, and only callers who know they should do.
175 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
176 * This is superfluous when called on current, the wakeup is a harmless no-op.
178 void recalc_sigpending_and_wake(struct task_struct *t)
180 if (recalc_sigpending_tsk(t))
181 signal_wake_up(t, 0);
184 void recalc_sigpending(void)
186 if (!recalc_sigpending_tsk(current) && !freezing(current))
187 clear_thread_flag(TIF_SIGPENDING);
190 EXPORT_SYMBOL(recalc_sigpending);
192 void calculate_sigpending(void)
194 /* Have any signals or users of TIF_SIGPENDING been delayed
197 spin_lock_irq(¤t->sighand->siglock);
198 set_tsk_thread_flag(current, TIF_SIGPENDING);
200 spin_unlock_irq(¤t->sighand->siglock);
203 /* Given the mask, find the first available signal that should be serviced. */
205 #define SYNCHRONOUS_MASK \
206 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
207 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
209 int next_signal(struct sigpending *pending, sigset_t *mask)
211 unsigned long i, *s, *m, x;
214 s = pending->signal.sig;
218 * Handle the first word specially: it contains the
219 * synchronous signals that need to be dequeued first.
223 if (x & SYNCHRONOUS_MASK)
224 x &= SYNCHRONOUS_MASK;
229 switch (_NSIG_WORDS) {
231 for (i = 1; i < _NSIG_WORDS; ++i) {
235 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 sig = ffz(~x) + _NSIG_BPW + 1;
255 static inline void print_dropped_signal(int sig)
257 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
259 if (!print_fatal_signals)
262 if (!__ratelimit(&ratelimit_state))
265 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
266 current->comm, current->pid, sig);
270 * task_set_jobctl_pending - set jobctl pending bits
272 * @mask: pending bits to set
274 * Clear @mask from @task->jobctl. @mask must be subset of
275 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
276 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
277 * cleared. If @task is already being killed or exiting, this function
281 * Must be called with @task->sighand->siglock held.
284 * %true if @mask is set, %false if made noop because @task was dying.
286 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
288 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
289 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
290 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
292 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
295 if (mask & JOBCTL_STOP_SIGMASK)
296 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
298 task->jobctl |= mask;
303 * task_clear_jobctl_trapping - clear jobctl trapping bit
306 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
307 * Clear it and wake up the ptracer. Note that we don't need any further
308 * locking. @task->siglock guarantees that @task->parent points to the
312 * Must be called with @task->sighand->siglock held.
314 void task_clear_jobctl_trapping(struct task_struct *task)
316 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
317 task->jobctl &= ~JOBCTL_TRAPPING;
318 smp_mb(); /* advised by wake_up_bit() */
319 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
324 * task_clear_jobctl_pending - clear jobctl pending bits
326 * @mask: pending bits to clear
328 * Clear @mask from @task->jobctl. @mask must be subset of
329 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
330 * STOP bits are cleared together.
332 * If clearing of @mask leaves no stop or trap pending, this function calls
333 * task_clear_jobctl_trapping().
336 * Must be called with @task->sighand->siglock held.
338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
340 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
342 if (mask & JOBCTL_STOP_PENDING)
343 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
345 task->jobctl &= ~mask;
347 if (!(task->jobctl & JOBCTL_PENDING_MASK))
348 task_clear_jobctl_trapping(task);
352 * task_participate_group_stop - participate in a group stop
353 * @task: task participating in a group stop
355 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
356 * Group stop states are cleared and the group stop count is consumed if
357 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
358 * stop, the appropriate `SIGNAL_*` flags are set.
361 * Must be called with @task->sighand->siglock held.
364 * %true if group stop completion should be notified to the parent, %false
367 static bool task_participate_group_stop(struct task_struct *task)
369 struct signal_struct *sig = task->signal;
370 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
372 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
374 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
379 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
380 sig->group_stop_count--;
383 * Tell the caller to notify completion iff we are entering into a
384 * fresh group stop. Read comment in do_signal_stop() for details.
386 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
387 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
393 void task_join_group_stop(struct task_struct *task)
395 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
396 struct signal_struct *sig = current->signal;
398 if (sig->group_stop_count) {
399 sig->group_stop_count++;
400 mask |= JOBCTL_STOP_CONSUME;
401 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 /* Have the new thread join an on-going signal group stop */
405 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
409 * allocate a new signal queue record
410 * - this may be called without locks if and only if t == current, otherwise an
411 * appropriate lock must be held to stop the target task from exiting
413 static struct sigqueue *
414 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
415 int override_rlimit, const unsigned int sigqueue_flags)
417 struct sigqueue *q = NULL;
418 struct ucounts *ucounts = NULL;
422 * Protect access to @t credentials. This can go away when all
423 * callers hold rcu read lock.
425 * NOTE! A pending signal will hold on to the user refcount,
426 * and we get/put the refcount only when the sigpending count
427 * changes from/to zero.
430 ucounts = task_ucounts(t);
431 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
436 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
437 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
439 print_dropped_signal(sig);
442 if (unlikely(q == NULL)) {
443 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
445 INIT_LIST_HEAD(&q->list);
446 q->flags = sigqueue_flags;
447 q->ucounts = ucounts;
452 static void __sigqueue_free(struct sigqueue *q)
454 if (q->flags & SIGQUEUE_PREALLOC)
457 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
460 kmem_cache_free(sigqueue_cachep, q);
463 void flush_sigqueue(struct sigpending *queue)
467 sigemptyset(&queue->signal);
468 while (!list_empty(&queue->list)) {
469 q = list_entry(queue->list.next, struct sigqueue , list);
470 list_del_init(&q->list);
476 * Flush all pending signals for this kthread.
478 void flush_signals(struct task_struct *t)
482 spin_lock_irqsave(&t->sighand->siglock, flags);
483 clear_tsk_thread_flag(t, TIF_SIGPENDING);
484 flush_sigqueue(&t->pending);
485 flush_sigqueue(&t->signal->shared_pending);
486 spin_unlock_irqrestore(&t->sighand->siglock, flags);
488 EXPORT_SYMBOL(flush_signals);
490 #ifdef CONFIG_POSIX_TIMERS
491 static void __flush_itimer_signals(struct sigpending *pending)
493 sigset_t signal, retain;
494 struct sigqueue *q, *n;
496 signal = pending->signal;
497 sigemptyset(&retain);
499 list_for_each_entry_safe(q, n, &pending->list, list) {
500 int sig = q->info.si_signo;
502 if (likely(q->info.si_code != SI_TIMER)) {
503 sigaddset(&retain, sig);
505 sigdelset(&signal, sig);
506 list_del_init(&q->list);
511 sigorsets(&pending->signal, &signal, &retain);
514 void flush_itimer_signals(void)
516 struct task_struct *tsk = current;
519 spin_lock_irqsave(&tsk->sighand->siglock, flags);
520 __flush_itimer_signals(&tsk->pending);
521 __flush_itimer_signals(&tsk->signal->shared_pending);
522 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
526 void ignore_signals(struct task_struct *t)
530 for (i = 0; i < _NSIG; ++i)
531 t->sighand->action[i].sa.sa_handler = SIG_IGN;
537 * Flush all handlers for a task.
541 flush_signal_handlers(struct task_struct *t, int force_default)
544 struct k_sigaction *ka = &t->sighand->action[0];
545 for (i = _NSIG ; i != 0 ; i--) {
546 if (force_default || ka->sa.sa_handler != SIG_IGN)
547 ka->sa.sa_handler = SIG_DFL;
549 #ifdef __ARCH_HAS_SA_RESTORER
550 ka->sa.sa_restorer = NULL;
552 sigemptyset(&ka->sa.sa_mask);
557 bool unhandled_signal(struct task_struct *tsk, int sig)
559 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
560 if (is_global_init(tsk))
563 if (handler != SIG_IGN && handler != SIG_DFL)
566 /* If dying, we handle all new signals by ignoring them */
567 if (fatal_signal_pending(tsk))
570 /* if ptraced, let the tracer determine */
574 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
577 struct sigqueue *q, *first = NULL;
580 * Collect the siginfo appropriate to this signal. Check if
581 * there is another siginfo for the same signal.
583 list_for_each_entry(q, &list->list, list) {
584 if (q->info.si_signo == sig) {
591 sigdelset(&list->signal, sig);
595 list_del_init(&first->list);
596 copy_siginfo(info, &first->info);
599 (first->flags & SIGQUEUE_PREALLOC) &&
600 (info->si_code == SI_TIMER) &&
601 (info->si_sys_private);
603 __sigqueue_free(first);
606 * Ok, it wasn't in the queue. This must be
607 * a fast-pathed signal or we must have been
608 * out of queue space. So zero out the info.
611 info->si_signo = sig;
613 info->si_code = SI_USER;
619 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
620 kernel_siginfo_t *info, bool *resched_timer)
622 int sig = next_signal(pending, mask);
625 collect_signal(sig, pending, info, resched_timer);
630 * Dequeue a signal and return the element to the caller, which is
631 * expected to free it.
633 * All callers have to hold the siglock.
635 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
636 kernel_siginfo_t *info, enum pid_type *type)
638 bool resched_timer = false;
641 /* We only dequeue private signals from ourselves, we don't let
642 * signalfd steal them
645 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
647 *type = PIDTYPE_TGID;
648 signr = __dequeue_signal(&tsk->signal->shared_pending,
649 mask, info, &resched_timer);
650 #ifdef CONFIG_POSIX_TIMERS
654 * itimers are process shared and we restart periodic
655 * itimers in the signal delivery path to prevent DoS
656 * attacks in the high resolution timer case. This is
657 * compliant with the old way of self-restarting
658 * itimers, as the SIGALRM is a legacy signal and only
659 * queued once. Changing the restart behaviour to
660 * restart the timer in the signal dequeue path is
661 * reducing the timer noise on heavy loaded !highres
664 if (unlikely(signr == SIGALRM)) {
665 struct hrtimer *tmr = &tsk->signal->real_timer;
667 if (!hrtimer_is_queued(tmr) &&
668 tsk->signal->it_real_incr != 0) {
669 hrtimer_forward(tmr, tmr->base->get_time(),
670 tsk->signal->it_real_incr);
671 hrtimer_restart(tmr);
681 if (unlikely(sig_kernel_stop(signr))) {
683 * Set a marker that we have dequeued a stop signal. Our
684 * caller might release the siglock and then the pending
685 * stop signal it is about to process is no longer in the
686 * pending bitmasks, but must still be cleared by a SIGCONT
687 * (and overruled by a SIGKILL). So those cases clear this
688 * shared flag after we've set it. Note that this flag may
689 * remain set after the signal we return is ignored or
690 * handled. That doesn't matter because its only purpose
691 * is to alert stop-signal processing code when another
692 * processor has come along and cleared the flag.
694 current->jobctl |= JOBCTL_STOP_DEQUEUED;
696 #ifdef CONFIG_POSIX_TIMERS
699 * Release the siglock to ensure proper locking order
700 * of timer locks outside of siglocks. Note, we leave
701 * irqs disabled here, since the posix-timers code is
702 * about to disable them again anyway.
704 spin_unlock(&tsk->sighand->siglock);
705 posixtimer_rearm(info);
706 spin_lock(&tsk->sighand->siglock);
708 /* Don't expose the si_sys_private value to userspace */
709 info->si_sys_private = 0;
714 EXPORT_SYMBOL_GPL(dequeue_signal);
716 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
718 struct task_struct *tsk = current;
719 struct sigpending *pending = &tsk->pending;
720 struct sigqueue *q, *sync = NULL;
723 * Might a synchronous signal be in the queue?
725 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
729 * Return the first synchronous signal in the queue.
731 list_for_each_entry(q, &pending->list, list) {
732 /* Synchronous signals have a positive si_code */
733 if ((q->info.si_code > SI_USER) &&
734 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
742 * Check if there is another siginfo for the same signal.
744 list_for_each_entry_continue(q, &pending->list, list) {
745 if (q->info.si_signo == sync->info.si_signo)
749 sigdelset(&pending->signal, sync->info.si_signo);
752 list_del_init(&sync->list);
753 copy_siginfo(info, &sync->info);
754 __sigqueue_free(sync);
755 return info->si_signo;
759 * Tell a process that it has a new active signal..
761 * NOTE! we rely on the previous spin_lock to
762 * lock interrupts for us! We can only be called with
763 * "siglock" held, and the local interrupt must
764 * have been disabled when that got acquired!
766 * No need to set need_resched since signal event passing
767 * goes through ->blocked
769 void signal_wake_up_state(struct task_struct *t, unsigned int state)
771 lockdep_assert_held(&t->sighand->siglock);
773 set_tsk_thread_flag(t, TIF_SIGPENDING);
776 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
777 * case. We don't check t->state here because there is a race with it
778 * executing another processor and just now entering stopped state.
779 * By using wake_up_state, we ensure the process will wake up and
780 * handle its death signal.
782 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
787 * Remove signals in mask from the pending set and queue.
788 * Returns 1 if any signals were found.
790 * All callers must be holding the siglock.
792 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
794 struct sigqueue *q, *n;
797 sigandsets(&m, mask, &s->signal);
798 if (sigisemptyset(&m))
801 sigandnsets(&s->signal, &s->signal, mask);
802 list_for_each_entry_safe(q, n, &s->list, list) {
803 if (sigismember(mask, q->info.si_signo)) {
804 list_del_init(&q->list);
810 static inline int is_si_special(const struct kernel_siginfo *info)
812 return info <= SEND_SIG_PRIV;
815 static inline bool si_fromuser(const struct kernel_siginfo *info)
817 return info == SEND_SIG_NOINFO ||
818 (!is_si_special(info) && SI_FROMUSER(info));
822 * called with RCU read lock from check_kill_permission()
824 static bool kill_ok_by_cred(struct task_struct *t)
826 const struct cred *cred = current_cred();
827 const struct cred *tcred = __task_cred(t);
829 return uid_eq(cred->euid, tcred->suid) ||
830 uid_eq(cred->euid, tcred->uid) ||
831 uid_eq(cred->uid, tcred->suid) ||
832 uid_eq(cred->uid, tcred->uid) ||
833 ns_capable(tcred->user_ns, CAP_KILL);
837 * Bad permissions for sending the signal
838 * - the caller must hold the RCU read lock
840 static int check_kill_permission(int sig, struct kernel_siginfo *info,
841 struct task_struct *t)
846 if (!valid_signal(sig))
849 if (!si_fromuser(info))
852 error = audit_signal_info(sig, t); /* Let audit system see the signal */
856 if (!same_thread_group(current, t) &&
857 !kill_ok_by_cred(t)) {
860 sid = task_session(t);
862 * We don't return the error if sid == NULL. The
863 * task was unhashed, the caller must notice this.
865 if (!sid || sid == task_session(current))
873 return security_task_kill(t, info, sig, NULL);
877 * ptrace_trap_notify - schedule trap to notify ptracer
878 * @t: tracee wanting to notify tracer
880 * This function schedules sticky ptrace trap which is cleared on the next
881 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
884 * If @t is running, STOP trap will be taken. If trapped for STOP and
885 * ptracer is listening for events, tracee is woken up so that it can
886 * re-trap for the new event. If trapped otherwise, STOP trap will be
887 * eventually taken without returning to userland after the existing traps
888 * are finished by PTRACE_CONT.
891 * Must be called with @task->sighand->siglock held.
893 static void ptrace_trap_notify(struct task_struct *t)
895 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
896 lockdep_assert_held(&t->sighand->siglock);
898 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
899 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
903 * Handle magic process-wide effects of stop/continue signals. Unlike
904 * the signal actions, these happen immediately at signal-generation
905 * time regardless of blocking, ignoring, or handling. This does the
906 * actual continuing for SIGCONT, but not the actual stopping for stop
907 * signals. The process stop is done as a signal action for SIG_DFL.
909 * Returns true if the signal should be actually delivered, otherwise
910 * it should be dropped.
912 static bool prepare_signal(int sig, struct task_struct *p, bool force)
914 struct signal_struct *signal = p->signal;
915 struct task_struct *t;
918 if (signal->flags & SIGNAL_GROUP_EXIT) {
919 if (signal->core_state)
920 return sig == SIGKILL;
922 * The process is in the middle of dying, drop the signal.
925 } else if (sig_kernel_stop(sig)) {
927 * This is a stop signal. Remove SIGCONT from all queues.
929 siginitset(&flush, sigmask(SIGCONT));
930 flush_sigqueue_mask(&flush, &signal->shared_pending);
931 for_each_thread(p, t)
932 flush_sigqueue_mask(&flush, &t->pending);
933 } else if (sig == SIGCONT) {
936 * Remove all stop signals from all queues, wake all threads.
938 siginitset(&flush, SIG_KERNEL_STOP_MASK);
939 flush_sigqueue_mask(&flush, &signal->shared_pending);
940 for_each_thread(p, t) {
941 flush_sigqueue_mask(&flush, &t->pending);
942 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
943 if (likely(!(t->ptrace & PT_SEIZED))) {
944 t->jobctl &= ~JOBCTL_STOPPED;
945 wake_up_state(t, __TASK_STOPPED);
947 ptrace_trap_notify(t);
951 * Notify the parent with CLD_CONTINUED if we were stopped.
953 * If we were in the middle of a group stop, we pretend it
954 * was already finished, and then continued. Since SIGCHLD
955 * doesn't queue we report only CLD_STOPPED, as if the next
956 * CLD_CONTINUED was dropped.
959 if (signal->flags & SIGNAL_STOP_STOPPED)
960 why |= SIGNAL_CLD_CONTINUED;
961 else if (signal->group_stop_count)
962 why |= SIGNAL_CLD_STOPPED;
966 * The first thread which returns from do_signal_stop()
967 * will take ->siglock, notice SIGNAL_CLD_MASK, and
968 * notify its parent. See get_signal().
970 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
971 signal->group_stop_count = 0;
972 signal->group_exit_code = 0;
976 return !sig_ignored(p, sig, force);
980 * Test if P wants to take SIG. After we've checked all threads with this,
981 * it's equivalent to finding no threads not blocking SIG. Any threads not
982 * blocking SIG were ruled out because they are not running and already
983 * have pending signals. Such threads will dequeue from the shared queue
984 * as soon as they're available, so putting the signal on the shared queue
985 * will be equivalent to sending it to one such thread.
987 static inline bool wants_signal(int sig, struct task_struct *p)
989 if (sigismember(&p->blocked, sig))
992 if (p->flags & PF_EXITING)
998 if (task_is_stopped_or_traced(p))
1001 return task_curr(p) || !task_sigpending(p);
1004 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1006 struct signal_struct *signal = p->signal;
1007 struct task_struct *t;
1010 * Now find a thread we can wake up to take the signal off the queue.
1012 * Try the suggested task first (may or may not be the main thread).
1014 if (wants_signal(sig, p))
1016 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1018 * There is just one thread and it does not need to be woken.
1019 * It will dequeue unblocked signals before it runs again.
1024 * Otherwise try to find a suitable thread.
1026 t = signal->curr_target;
1027 while (!wants_signal(sig, t)) {
1029 if (t == signal->curr_target)
1031 * No thread needs to be woken.
1032 * Any eligible threads will see
1033 * the signal in the queue soon.
1037 signal->curr_target = t;
1041 * Found a killable thread. If the signal will be fatal,
1042 * then start taking the whole group down immediately.
1044 if (sig_fatal(p, sig) &&
1045 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1046 !sigismember(&t->real_blocked, sig) &&
1047 (sig == SIGKILL || !p->ptrace)) {
1049 * This signal will be fatal to the whole group.
1051 if (!sig_kernel_coredump(sig)) {
1053 * Start a group exit and wake everybody up.
1054 * This way we don't have other threads
1055 * running and doing things after a slower
1056 * thread has the fatal signal pending.
1058 signal->flags = SIGNAL_GROUP_EXIT;
1059 signal->group_exit_code = sig;
1060 signal->group_stop_count = 0;
1063 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1064 sigaddset(&t->pending.signal, SIGKILL);
1065 signal_wake_up(t, 1);
1066 } while_each_thread(p, t);
1072 * The signal is already in the shared-pending queue.
1073 * Tell the chosen thread to wake up and dequeue it.
1075 signal_wake_up(t, sig == SIGKILL);
1079 static inline bool legacy_queue(struct sigpending *signals, int sig)
1081 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1084 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1085 struct task_struct *t, enum pid_type type, bool force)
1087 struct sigpending *pending;
1089 int override_rlimit;
1090 int ret = 0, result;
1092 lockdep_assert_held(&t->sighand->siglock);
1094 result = TRACE_SIGNAL_IGNORED;
1095 if (!prepare_signal(sig, t, force))
1098 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1100 * Short-circuit ignored signals and support queuing
1101 * exactly one non-rt signal, so that we can get more
1102 * detailed information about the cause of the signal.
1104 result = TRACE_SIGNAL_ALREADY_PENDING;
1105 if (legacy_queue(pending, sig))
1108 result = TRACE_SIGNAL_DELIVERED;
1110 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1112 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1116 * Real-time signals must be queued if sent by sigqueue, or
1117 * some other real-time mechanism. It is implementation
1118 * defined whether kill() does so. We attempt to do so, on
1119 * the principle of least surprise, but since kill is not
1120 * allowed to fail with EAGAIN when low on memory we just
1121 * make sure at least one signal gets delivered and don't
1122 * pass on the info struct.
1125 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1127 override_rlimit = 0;
1129 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1132 list_add_tail(&q->list, &pending->list);
1133 switch ((unsigned long) info) {
1134 case (unsigned long) SEND_SIG_NOINFO:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_USER;
1139 q->info.si_pid = task_tgid_nr_ns(current,
1140 task_active_pid_ns(t));
1143 from_kuid_munged(task_cred_xxx(t, user_ns),
1147 case (unsigned long) SEND_SIG_PRIV:
1148 clear_siginfo(&q->info);
1149 q->info.si_signo = sig;
1150 q->info.si_errno = 0;
1151 q->info.si_code = SI_KERNEL;
1156 copy_siginfo(&q->info, info);
1159 } else if (!is_si_special(info) &&
1160 sig >= SIGRTMIN && info->si_code != SI_USER) {
1162 * Queue overflow, abort. We may abort if the
1163 * signal was rt and sent by user using something
1164 * other than kill().
1166 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1171 * This is a silent loss of information. We still
1172 * send the signal, but the *info bits are lost.
1174 result = TRACE_SIGNAL_LOSE_INFO;
1178 signalfd_notify(t, sig);
1179 sigaddset(&pending->signal, sig);
1181 /* Let multiprocess signals appear after on-going forks */
1182 if (type > PIDTYPE_TGID) {
1183 struct multiprocess_signals *delayed;
1184 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1185 sigset_t *signal = &delayed->signal;
1186 /* Can't queue both a stop and a continue signal */
1188 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1189 else if (sig_kernel_stop(sig))
1190 sigdelset(signal, SIGCONT);
1191 sigaddset(signal, sig);
1195 complete_signal(sig, t, type);
1197 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1201 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1204 switch (siginfo_layout(info->si_signo, info->si_code)) {
1213 case SIL_FAULT_TRAPNO:
1214 case SIL_FAULT_MCEERR:
1215 case SIL_FAULT_BNDERR:
1216 case SIL_FAULT_PKUERR:
1217 case SIL_FAULT_PERF_EVENT:
1225 int send_signal_locked(int sig, struct kernel_siginfo *info,
1226 struct task_struct *t, enum pid_type type)
1228 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1231 if (info == SEND_SIG_NOINFO) {
1232 /* Force if sent from an ancestor pid namespace */
1233 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1234 } else if (info == SEND_SIG_PRIV) {
1235 /* Don't ignore kernel generated signals */
1237 } else if (has_si_pid_and_uid(info)) {
1238 /* SIGKILL and SIGSTOP is special or has ids */
1239 struct user_namespace *t_user_ns;
1242 t_user_ns = task_cred_xxx(t, user_ns);
1243 if (current_user_ns() != t_user_ns) {
1244 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1245 info->si_uid = from_kuid_munged(t_user_ns, uid);
1249 /* A kernel generated signal? */
1250 force = (info->si_code == SI_KERNEL);
1252 /* From an ancestor pid namespace? */
1253 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1258 return __send_signal_locked(sig, info, t, type, force);
1261 static void print_fatal_signal(int signr)
1263 struct pt_regs *regs = task_pt_regs(current);
1264 struct file *exe_file;
1266 exe_file = get_task_exe_file(current);
1268 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1269 exe_file, current->comm, signr);
1272 pr_info("%s: potentially unexpected fatal signal %d.\n",
1273 current->comm, signr);
1276 #if defined(__i386__) && !defined(__arch_um__)
1277 pr_info("code at %08lx: ", regs->ip);
1280 for (i = 0; i < 16; i++) {
1283 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1285 pr_cont("%02x ", insn);
1295 static int __init setup_print_fatal_signals(char *str)
1297 get_option (&str, &print_fatal_signals);
1302 __setup("print-fatal-signals=", setup_print_fatal_signals);
1304 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1307 unsigned long flags;
1310 if (lock_task_sighand(p, &flags)) {
1311 ret = send_signal_locked(sig, info, p, type);
1312 unlock_task_sighand(p, &flags);
1319 HANDLER_CURRENT, /* If reachable use the current handler */
1320 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1321 HANDLER_EXIT, /* Only visible as the process exit code */
1325 * Force a signal that the process can't ignore: if necessary
1326 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1328 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1329 * since we do not want to have a signal handler that was blocked
1330 * be invoked when user space had explicitly blocked it.
1332 * We don't want to have recursive SIGSEGV's etc, for example,
1333 * that is why we also clear SIGNAL_UNKILLABLE.
1336 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1337 enum sig_handler handler)
1339 unsigned long int flags;
1340 int ret, blocked, ignored;
1341 struct k_sigaction *action;
1342 int sig = info->si_signo;
1344 spin_lock_irqsave(&t->sighand->siglock, flags);
1345 action = &t->sighand->action[sig-1];
1346 ignored = action->sa.sa_handler == SIG_IGN;
1347 blocked = sigismember(&t->blocked, sig);
1348 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1349 action->sa.sa_handler = SIG_DFL;
1350 if (handler == HANDLER_EXIT)
1351 action->sa.sa_flags |= SA_IMMUTABLE;
1353 sigdelset(&t->blocked, sig);
1354 recalc_sigpending_and_wake(t);
1358 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1359 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1361 if (action->sa.sa_handler == SIG_DFL &&
1362 (!t->ptrace || (handler == HANDLER_EXIT)))
1363 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1364 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1365 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1370 int force_sig_info(struct kernel_siginfo *info)
1372 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1376 * Nuke all other threads in the group.
1378 int zap_other_threads(struct task_struct *p)
1380 struct task_struct *t = p;
1383 p->signal->group_stop_count = 0;
1385 while_each_thread(p, t) {
1386 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1387 /* Don't require de_thread to wait for the vhost_worker */
1388 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1391 /* Don't bother with already dead threads */
1394 sigaddset(&t->pending.signal, SIGKILL);
1395 signal_wake_up(t, 1);
1401 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1402 unsigned long *flags)
1404 struct sighand_struct *sighand;
1408 sighand = rcu_dereference(tsk->sighand);
1409 if (unlikely(sighand == NULL))
1413 * This sighand can be already freed and even reused, but
1414 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1415 * initializes ->siglock: this slab can't go away, it has
1416 * the same object type, ->siglock can't be reinitialized.
1418 * We need to ensure that tsk->sighand is still the same
1419 * after we take the lock, we can race with de_thread() or
1420 * __exit_signal(). In the latter case the next iteration
1421 * must see ->sighand == NULL.
1423 spin_lock_irqsave(&sighand->siglock, *flags);
1424 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1426 spin_unlock_irqrestore(&sighand->siglock, *flags);
1433 #ifdef CONFIG_LOCKDEP
1434 void lockdep_assert_task_sighand_held(struct task_struct *task)
1436 struct sighand_struct *sighand;
1439 sighand = rcu_dereference(task->sighand);
1441 lockdep_assert_held(&sighand->siglock);
1449 * send signal info to all the members of a group
1451 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1452 struct task_struct *p, enum pid_type type)
1457 ret = check_kill_permission(sig, info, p);
1461 ret = do_send_sig_info(sig, info, p, type);
1467 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1468 * control characters do (^C, ^Z etc)
1469 * - the caller must hold at least a readlock on tasklist_lock
1471 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1473 struct task_struct *p = NULL;
1474 int retval, success;
1478 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1479 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1482 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1483 return success ? 0 : retval;
1486 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1489 struct task_struct *p;
1493 p = pid_task(pid, PIDTYPE_PID);
1495 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1497 if (likely(!p || error != -ESRCH))
1501 * The task was unhashed in between, try again. If it
1502 * is dead, pid_task() will return NULL, if we race with
1503 * de_thread() it will find the new leader.
1508 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1512 error = kill_pid_info(sig, info, find_vpid(pid));
1517 static inline bool kill_as_cred_perm(const struct cred *cred,
1518 struct task_struct *target)
1520 const struct cred *pcred = __task_cred(target);
1522 return uid_eq(cred->euid, pcred->suid) ||
1523 uid_eq(cred->euid, pcred->uid) ||
1524 uid_eq(cred->uid, pcred->suid) ||
1525 uid_eq(cred->uid, pcred->uid);
1529 * The usb asyncio usage of siginfo is wrong. The glibc support
1530 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1531 * AKA after the generic fields:
1532 * kernel_pid_t si_pid;
1533 * kernel_uid32_t si_uid;
1534 * sigval_t si_value;
1536 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1537 * after the generic fields is:
1538 * void __user *si_addr;
1540 * This is a practical problem when there is a 64bit big endian kernel
1541 * and a 32bit userspace. As the 32bit address will encoded in the low
1542 * 32bits of the pointer. Those low 32bits will be stored at higher
1543 * address than appear in a 32 bit pointer. So userspace will not
1544 * see the address it was expecting for it's completions.
1546 * There is nothing in the encoding that can allow
1547 * copy_siginfo_to_user32 to detect this confusion of formats, so
1548 * handle this by requiring the caller of kill_pid_usb_asyncio to
1549 * notice when this situration takes place and to store the 32bit
1550 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1553 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1554 struct pid *pid, const struct cred *cred)
1556 struct kernel_siginfo info;
1557 struct task_struct *p;
1558 unsigned long flags;
1561 if (!valid_signal(sig))
1564 clear_siginfo(&info);
1565 info.si_signo = sig;
1566 info.si_errno = errno;
1567 info.si_code = SI_ASYNCIO;
1568 *((sigval_t *)&info.si_pid) = addr;
1571 p = pid_task(pid, PIDTYPE_PID);
1576 if (!kill_as_cred_perm(cred, p)) {
1580 ret = security_task_kill(p, &info, sig, cred);
1585 if (lock_task_sighand(p, &flags)) {
1586 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1587 unlock_task_sighand(p, &flags);
1595 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1598 * kill_something_info() interprets pid in interesting ways just like kill(2).
1600 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1601 * is probably wrong. Should make it like BSD or SYSV.
1604 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1609 return kill_proc_info(sig, info, pid);
1611 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1615 read_lock(&tasklist_lock);
1617 ret = __kill_pgrp_info(sig, info,
1618 pid ? find_vpid(-pid) : task_pgrp(current));
1620 int retval = 0, count = 0;
1621 struct task_struct * p;
1623 for_each_process(p) {
1624 if (task_pid_vnr(p) > 1 &&
1625 !same_thread_group(p, current)) {
1626 int err = group_send_sig_info(sig, info, p,
1633 ret = count ? retval : -ESRCH;
1635 read_unlock(&tasklist_lock);
1641 * These are for backward compatibility with the rest of the kernel source.
1644 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1647 * Make sure legacy kernel users don't send in bad values
1648 * (normal paths check this in check_kill_permission).
1650 if (!valid_signal(sig))
1653 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1655 EXPORT_SYMBOL(send_sig_info);
1657 #define __si_special(priv) \
1658 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1661 send_sig(int sig, struct task_struct *p, int priv)
1663 return send_sig_info(sig, __si_special(priv), p);
1665 EXPORT_SYMBOL(send_sig);
1667 void force_sig(int sig)
1669 struct kernel_siginfo info;
1671 clear_siginfo(&info);
1672 info.si_signo = sig;
1674 info.si_code = SI_KERNEL;
1677 force_sig_info(&info);
1679 EXPORT_SYMBOL(force_sig);
1681 void force_fatal_sig(int sig)
1683 struct kernel_siginfo info;
1685 clear_siginfo(&info);
1686 info.si_signo = sig;
1688 info.si_code = SI_KERNEL;
1691 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1694 void force_exit_sig(int sig)
1696 struct kernel_siginfo info;
1698 clear_siginfo(&info);
1699 info.si_signo = sig;
1701 info.si_code = SI_KERNEL;
1704 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1708 * When things go south during signal handling, we
1709 * will force a SIGSEGV. And if the signal that caused
1710 * the problem was already a SIGSEGV, we'll want to
1711 * make sure we don't even try to deliver the signal..
1713 void force_sigsegv(int sig)
1716 force_fatal_sig(SIGSEGV);
1721 int force_sig_fault_to_task(int sig, int code, void __user *addr
1722 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1723 , struct task_struct *t)
1725 struct kernel_siginfo info;
1727 clear_siginfo(&info);
1728 info.si_signo = sig;
1730 info.si_code = code;
1731 info.si_addr = addr;
1734 info.si_flags = flags;
1737 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1740 int force_sig_fault(int sig, int code, void __user *addr
1741 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1743 return force_sig_fault_to_task(sig, code, addr
1744 ___ARCH_SI_IA64(imm, flags, isr), current);
1747 int send_sig_fault(int sig, int code, void __user *addr
1748 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1749 , struct task_struct *t)
1751 struct kernel_siginfo info;
1753 clear_siginfo(&info);
1754 info.si_signo = sig;
1756 info.si_code = code;
1757 info.si_addr = addr;
1760 info.si_flags = flags;
1763 return send_sig_info(info.si_signo, &info, t);
1766 int force_sig_mceerr(int code, void __user *addr, short lsb)
1768 struct kernel_siginfo info;
1770 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1771 clear_siginfo(&info);
1772 info.si_signo = SIGBUS;
1774 info.si_code = code;
1775 info.si_addr = addr;
1776 info.si_addr_lsb = lsb;
1777 return force_sig_info(&info);
1780 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1782 struct kernel_siginfo info;
1784 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1785 clear_siginfo(&info);
1786 info.si_signo = SIGBUS;
1788 info.si_code = code;
1789 info.si_addr = addr;
1790 info.si_addr_lsb = lsb;
1791 return send_sig_info(info.si_signo, &info, t);
1793 EXPORT_SYMBOL(send_sig_mceerr);
1795 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1797 struct kernel_siginfo info;
1799 clear_siginfo(&info);
1800 info.si_signo = SIGSEGV;
1802 info.si_code = SEGV_BNDERR;
1803 info.si_addr = addr;
1804 info.si_lower = lower;
1805 info.si_upper = upper;
1806 return force_sig_info(&info);
1810 int force_sig_pkuerr(void __user *addr, u32 pkey)
1812 struct kernel_siginfo info;
1814 clear_siginfo(&info);
1815 info.si_signo = SIGSEGV;
1817 info.si_code = SEGV_PKUERR;
1818 info.si_addr = addr;
1819 info.si_pkey = pkey;
1820 return force_sig_info(&info);
1824 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1826 struct kernel_siginfo info;
1828 clear_siginfo(&info);
1829 info.si_signo = SIGTRAP;
1831 info.si_code = TRAP_PERF;
1832 info.si_addr = addr;
1833 info.si_perf_data = sig_data;
1834 info.si_perf_type = type;
1837 * Signals generated by perf events should not terminate the whole
1838 * process if SIGTRAP is blocked, however, delivering the signal
1839 * asynchronously is better than not delivering at all. But tell user
1840 * space if the signal was asynchronous, so it can clearly be
1841 * distinguished from normal synchronous ones.
1843 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1844 TRAP_PERF_FLAG_ASYNC :
1847 return send_sig_info(info.si_signo, &info, current);
1851 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1852 * @syscall: syscall number to send to userland
1853 * @reason: filter-supplied reason code to send to userland (via si_errno)
1854 * @force_coredump: true to trigger a coredump
1856 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1858 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1860 struct kernel_siginfo info;
1862 clear_siginfo(&info);
1863 info.si_signo = SIGSYS;
1864 info.si_code = SYS_SECCOMP;
1865 info.si_call_addr = (void __user *)KSTK_EIP(current);
1866 info.si_errno = reason;
1867 info.si_arch = syscall_get_arch(current);
1868 info.si_syscall = syscall;
1869 return force_sig_info_to_task(&info, current,
1870 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1873 /* For the crazy architectures that include trap information in
1874 * the errno field, instead of an actual errno value.
1876 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1878 struct kernel_siginfo info;
1880 clear_siginfo(&info);
1881 info.si_signo = SIGTRAP;
1882 info.si_errno = errno;
1883 info.si_code = TRAP_HWBKPT;
1884 info.si_addr = addr;
1885 return force_sig_info(&info);
1888 /* For the rare architectures that include trap information using
1891 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1893 struct kernel_siginfo info;
1895 clear_siginfo(&info);
1896 info.si_signo = sig;
1898 info.si_code = code;
1899 info.si_addr = addr;
1900 info.si_trapno = trapno;
1901 return force_sig_info(&info);
1904 /* For the rare architectures that include trap information using
1907 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1908 struct task_struct *t)
1910 struct kernel_siginfo info;
1912 clear_siginfo(&info);
1913 info.si_signo = sig;
1915 info.si_code = code;
1916 info.si_addr = addr;
1917 info.si_trapno = trapno;
1918 return send_sig_info(info.si_signo, &info, t);
1921 int kill_pgrp(struct pid *pid, int sig, int priv)
1925 read_lock(&tasklist_lock);
1926 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1927 read_unlock(&tasklist_lock);
1931 EXPORT_SYMBOL(kill_pgrp);
1933 int kill_pid(struct pid *pid, int sig, int priv)
1935 return kill_pid_info(sig, __si_special(priv), pid);
1937 EXPORT_SYMBOL(kill_pid);
1940 * These functions support sending signals using preallocated sigqueue
1941 * structures. This is needed "because realtime applications cannot
1942 * afford to lose notifications of asynchronous events, like timer
1943 * expirations or I/O completions". In the case of POSIX Timers
1944 * we allocate the sigqueue structure from the timer_create. If this
1945 * allocation fails we are able to report the failure to the application
1946 * with an EAGAIN error.
1948 struct sigqueue *sigqueue_alloc(void)
1950 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1953 void sigqueue_free(struct sigqueue *q)
1955 unsigned long flags;
1956 spinlock_t *lock = ¤t->sighand->siglock;
1958 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1960 * We must hold ->siglock while testing q->list
1961 * to serialize with collect_signal() or with
1962 * __exit_signal()->flush_sigqueue().
1964 spin_lock_irqsave(lock, flags);
1965 q->flags &= ~SIGQUEUE_PREALLOC;
1967 * If it is queued it will be freed when dequeued,
1968 * like the "regular" sigqueue.
1970 if (!list_empty(&q->list))
1972 spin_unlock_irqrestore(lock, flags);
1978 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1980 int sig = q->info.si_signo;
1981 struct sigpending *pending;
1982 struct task_struct *t;
1983 unsigned long flags;
1986 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1992 * This function is used by POSIX timers to deliver a timer signal.
1993 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1994 * set), the signal must be delivered to the specific thread (queues
1997 * Where type is not PIDTYPE_PID, signals must be delivered to the
1998 * process. In this case, prefer to deliver to current if it is in
1999 * the same thread group as the target process, which avoids
2000 * unnecessarily waking up a potentially idle task.
2002 t = pid_task(pid, type);
2005 if (type != PIDTYPE_PID && same_thread_group(t, current))
2007 if (!likely(lock_task_sighand(t, &flags)))
2010 ret = 1; /* the signal is ignored */
2011 result = TRACE_SIGNAL_IGNORED;
2012 if (!prepare_signal(sig, t, false))
2016 if (unlikely(!list_empty(&q->list))) {
2018 * If an SI_TIMER entry is already queue just increment
2019 * the overrun count.
2021 BUG_ON(q->info.si_code != SI_TIMER);
2022 q->info.si_overrun++;
2023 result = TRACE_SIGNAL_ALREADY_PENDING;
2026 q->info.si_overrun = 0;
2028 signalfd_notify(t, sig);
2029 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2030 list_add_tail(&q->list, &pending->list);
2031 sigaddset(&pending->signal, sig);
2032 complete_signal(sig, t, type);
2033 result = TRACE_SIGNAL_DELIVERED;
2035 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2036 unlock_task_sighand(t, &flags);
2042 static void do_notify_pidfd(struct task_struct *task)
2046 WARN_ON(task->exit_state == 0);
2047 pid = task_pid(task);
2048 wake_up_all(&pid->wait_pidfd);
2052 * Let a parent know about the death of a child.
2053 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2055 * Returns true if our parent ignored us and so we've switched to
2058 bool do_notify_parent(struct task_struct *tsk, int sig)
2060 struct kernel_siginfo info;
2061 unsigned long flags;
2062 struct sighand_struct *psig;
2063 bool autoreap = false;
2066 WARN_ON_ONCE(sig == -1);
2068 /* do_notify_parent_cldstop should have been called instead. */
2069 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2071 WARN_ON_ONCE(!tsk->ptrace &&
2072 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2074 /* Wake up all pidfd waiters */
2075 do_notify_pidfd(tsk);
2077 if (sig != SIGCHLD) {
2079 * This is only possible if parent == real_parent.
2080 * Check if it has changed security domain.
2082 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2086 clear_siginfo(&info);
2087 info.si_signo = sig;
2090 * We are under tasklist_lock here so our parent is tied to
2091 * us and cannot change.
2093 * task_active_pid_ns will always return the same pid namespace
2094 * until a task passes through release_task.
2096 * write_lock() currently calls preempt_disable() which is the
2097 * same as rcu_read_lock(), but according to Oleg, this is not
2098 * correct to rely on this
2101 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2102 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2106 task_cputime(tsk, &utime, &stime);
2107 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2108 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2110 info.si_status = tsk->exit_code & 0x7f;
2111 if (tsk->exit_code & 0x80)
2112 info.si_code = CLD_DUMPED;
2113 else if (tsk->exit_code & 0x7f)
2114 info.si_code = CLD_KILLED;
2116 info.si_code = CLD_EXITED;
2117 info.si_status = tsk->exit_code >> 8;
2120 psig = tsk->parent->sighand;
2121 spin_lock_irqsave(&psig->siglock, flags);
2122 if (!tsk->ptrace && sig == SIGCHLD &&
2123 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2124 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2126 * We are exiting and our parent doesn't care. POSIX.1
2127 * defines special semantics for setting SIGCHLD to SIG_IGN
2128 * or setting the SA_NOCLDWAIT flag: we should be reaped
2129 * automatically and not left for our parent's wait4 call.
2130 * Rather than having the parent do it as a magic kind of
2131 * signal handler, we just set this to tell do_exit that we
2132 * can be cleaned up without becoming a zombie. Note that
2133 * we still call __wake_up_parent in this case, because a
2134 * blocked sys_wait4 might now return -ECHILD.
2136 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2137 * is implementation-defined: we do (if you don't want
2138 * it, just use SIG_IGN instead).
2141 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2145 * Send with __send_signal as si_pid and si_uid are in the
2146 * parent's namespaces.
2148 if (valid_signal(sig) && sig)
2149 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2150 __wake_up_parent(tsk, tsk->parent);
2151 spin_unlock_irqrestore(&psig->siglock, flags);
2157 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2158 * @tsk: task reporting the state change
2159 * @for_ptracer: the notification is for ptracer
2160 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2162 * Notify @tsk's parent that the stopped/continued state has changed. If
2163 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2164 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2167 * Must be called with tasklist_lock at least read locked.
2169 static void do_notify_parent_cldstop(struct task_struct *tsk,
2170 bool for_ptracer, int why)
2172 struct kernel_siginfo info;
2173 unsigned long flags;
2174 struct task_struct *parent;
2175 struct sighand_struct *sighand;
2179 parent = tsk->parent;
2181 tsk = tsk->group_leader;
2182 parent = tsk->real_parent;
2185 clear_siginfo(&info);
2186 info.si_signo = SIGCHLD;
2189 * see comment in do_notify_parent() about the following 4 lines
2192 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2193 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2196 task_cputime(tsk, &utime, &stime);
2197 info.si_utime = nsec_to_clock_t(utime);
2198 info.si_stime = nsec_to_clock_t(stime);
2203 info.si_status = SIGCONT;
2206 info.si_status = tsk->signal->group_exit_code & 0x7f;
2209 info.si_status = tsk->exit_code & 0x7f;
2215 sighand = parent->sighand;
2216 spin_lock_irqsave(&sighand->siglock, flags);
2217 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2218 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2219 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2221 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2223 __wake_up_parent(tsk, parent);
2224 spin_unlock_irqrestore(&sighand->siglock, flags);
2228 * This must be called with current->sighand->siglock held.
2230 * This should be the path for all ptrace stops.
2231 * We always set current->last_siginfo while stopped here.
2232 * That makes it a way to test a stopped process for
2233 * being ptrace-stopped vs being job-control-stopped.
2235 * Returns the signal the ptracer requested the code resume
2236 * with. If the code did not stop because the tracer is gone,
2237 * the stop signal remains unchanged unless clear_code.
2239 static int ptrace_stop(int exit_code, int why, unsigned long message,
2240 kernel_siginfo_t *info)
2241 __releases(¤t->sighand->siglock)
2242 __acquires(¤t->sighand->siglock)
2244 bool gstop_done = false;
2246 if (arch_ptrace_stop_needed()) {
2248 * The arch code has something special to do before a
2249 * ptrace stop. This is allowed to block, e.g. for faults
2250 * on user stack pages. We can't keep the siglock while
2251 * calling arch_ptrace_stop, so we must release it now.
2252 * To preserve proper semantics, we must do this before
2253 * any signal bookkeeping like checking group_stop_count.
2255 spin_unlock_irq(¤t->sighand->siglock);
2257 spin_lock_irq(¤t->sighand->siglock);
2261 * After this point ptrace_signal_wake_up or signal_wake_up
2262 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2263 * signal comes in. Handle previous ptrace_unlinks and fatal
2264 * signals here to prevent ptrace_stop sleeping in schedule.
2266 if (!current->ptrace || __fatal_signal_pending(current))
2269 set_special_state(TASK_TRACED);
2270 current->jobctl |= JOBCTL_TRACED;
2273 * We're committing to trapping. TRACED should be visible before
2274 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2275 * Also, transition to TRACED and updates to ->jobctl should be
2276 * atomic with respect to siglock and should be done after the arch
2277 * hook as siglock is released and regrabbed across it.
2282 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2284 * set_current_state() smp_wmb();
2286 * wait_task_stopped()
2287 * task_stopped_code()
2288 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2292 current->ptrace_message = message;
2293 current->last_siginfo = info;
2294 current->exit_code = exit_code;
2297 * If @why is CLD_STOPPED, we're trapping to participate in a group
2298 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2299 * across siglock relocks since INTERRUPT was scheduled, PENDING
2300 * could be clear now. We act as if SIGCONT is received after
2301 * TASK_TRACED is entered - ignore it.
2303 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2304 gstop_done = task_participate_group_stop(current);
2306 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2307 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2308 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2309 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2311 /* entering a trap, clear TRAPPING */
2312 task_clear_jobctl_trapping(current);
2314 spin_unlock_irq(¤t->sighand->siglock);
2315 read_lock(&tasklist_lock);
2317 * Notify parents of the stop.
2319 * While ptraced, there are two parents - the ptracer and
2320 * the real_parent of the group_leader. The ptracer should
2321 * know about every stop while the real parent is only
2322 * interested in the completion of group stop. The states
2323 * for the two don't interact with each other. Notify
2324 * separately unless they're gonna be duplicates.
2326 if (current->ptrace)
2327 do_notify_parent_cldstop(current, true, why);
2328 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2329 do_notify_parent_cldstop(current, false, why);
2332 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2333 * One a PREEMPTION kernel this can result in preemption requirement
2334 * which will be fulfilled after read_unlock() and the ptracer will be
2336 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2337 * this task wait in schedule(). If this task gets preempted then it
2338 * remains enqueued on the runqueue. The ptracer will observe this and
2339 * then sleep for a delay of one HZ tick. In the meantime this task
2340 * gets scheduled, enters schedule() and will wait for the ptracer.
2342 * This preemption point is not bad from a correctness point of
2343 * view but extends the runtime by one HZ tick time due to the
2344 * ptracer's sleep. The preempt-disable section ensures that there
2345 * will be no preemption between unlock and schedule() and so
2346 * improving the performance since the ptracer will observe that
2347 * the tracee is scheduled out once it gets on the CPU.
2349 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2350 * Therefore the task can be preempted after do_notify_parent_cldstop()
2351 * before unlocking tasklist_lock so there is no benefit in doing this.
2353 * In fact disabling preemption is harmful on PREEMPT_RT because
2354 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2355 * with preemption disabled due to the 'sleeping' spinlock
2356 * substitution of RT.
2358 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2360 read_unlock(&tasklist_lock);
2361 cgroup_enter_frozen();
2362 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2363 preempt_enable_no_resched();
2365 cgroup_leave_frozen(true);
2368 * We are back. Now reacquire the siglock before touching
2369 * last_siginfo, so that we are sure to have synchronized with
2370 * any signal-sending on another CPU that wants to examine it.
2372 spin_lock_irq(¤t->sighand->siglock);
2373 exit_code = current->exit_code;
2374 current->last_siginfo = NULL;
2375 current->ptrace_message = 0;
2376 current->exit_code = 0;
2378 /* LISTENING can be set only during STOP traps, clear it */
2379 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2382 * Queued signals ignored us while we were stopped for tracing.
2383 * So check for any that we should take before resuming user mode.
2384 * This sets TIF_SIGPENDING, but never clears it.
2386 recalc_sigpending_tsk(current);
2390 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2392 kernel_siginfo_t info;
2394 clear_siginfo(&info);
2395 info.si_signo = signr;
2396 info.si_code = exit_code;
2397 info.si_pid = task_pid_vnr(current);
2398 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2400 /* Let the debugger run. */
2401 return ptrace_stop(exit_code, why, message, &info);
2404 int ptrace_notify(int exit_code, unsigned long message)
2408 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2409 if (unlikely(task_work_pending(current)))
2412 spin_lock_irq(¤t->sighand->siglock);
2413 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2414 spin_unlock_irq(¤t->sighand->siglock);
2419 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2420 * @signr: signr causing group stop if initiating
2422 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2423 * and participate in it. If already set, participate in the existing
2424 * group stop. If participated in a group stop (and thus slept), %true is
2425 * returned with siglock released.
2427 * If ptraced, this function doesn't handle stop itself. Instead,
2428 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2429 * untouched. The caller must ensure that INTERRUPT trap handling takes
2430 * places afterwards.
2433 * Must be called with @current->sighand->siglock held, which is released
2437 * %false if group stop is already cancelled or ptrace trap is scheduled.
2438 * %true if participated in group stop.
2440 static bool do_signal_stop(int signr)
2441 __releases(¤t->sighand->siglock)
2443 struct signal_struct *sig = current->signal;
2445 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2446 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2447 struct task_struct *t;
2449 /* signr will be recorded in task->jobctl for retries */
2450 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2452 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2453 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2454 unlikely(sig->group_exec_task))
2457 * There is no group stop already in progress. We must
2460 * While ptraced, a task may be resumed while group stop is
2461 * still in effect and then receive a stop signal and
2462 * initiate another group stop. This deviates from the
2463 * usual behavior as two consecutive stop signals can't
2464 * cause two group stops when !ptraced. That is why we
2465 * also check !task_is_stopped(t) below.
2467 * The condition can be distinguished by testing whether
2468 * SIGNAL_STOP_STOPPED is already set. Don't generate
2469 * group_exit_code in such case.
2471 * This is not necessary for SIGNAL_STOP_CONTINUED because
2472 * an intervening stop signal is required to cause two
2473 * continued events regardless of ptrace.
2475 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2476 sig->group_exit_code = signr;
2478 sig->group_stop_count = 0;
2480 if (task_set_jobctl_pending(current, signr | gstop))
2481 sig->group_stop_count++;
2484 while_each_thread(current, t) {
2486 * Setting state to TASK_STOPPED for a group
2487 * stop is always done with the siglock held,
2488 * so this check has no races.
2490 if (!task_is_stopped(t) &&
2491 task_set_jobctl_pending(t, signr | gstop)) {
2492 sig->group_stop_count++;
2493 if (likely(!(t->ptrace & PT_SEIZED)))
2494 signal_wake_up(t, 0);
2496 ptrace_trap_notify(t);
2501 if (likely(!current->ptrace)) {
2505 * If there are no other threads in the group, or if there
2506 * is a group stop in progress and we are the last to stop,
2507 * report to the parent.
2509 if (task_participate_group_stop(current))
2510 notify = CLD_STOPPED;
2512 current->jobctl |= JOBCTL_STOPPED;
2513 set_special_state(TASK_STOPPED);
2514 spin_unlock_irq(¤t->sighand->siglock);
2517 * Notify the parent of the group stop completion. Because
2518 * we're not holding either the siglock or tasklist_lock
2519 * here, ptracer may attach inbetween; however, this is for
2520 * group stop and should always be delivered to the real
2521 * parent of the group leader. The new ptracer will get
2522 * its notification when this task transitions into
2526 read_lock(&tasklist_lock);
2527 do_notify_parent_cldstop(current, false, notify);
2528 read_unlock(&tasklist_lock);
2531 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2532 cgroup_enter_frozen();
2537 * While ptraced, group stop is handled by STOP trap.
2538 * Schedule it and let the caller deal with it.
2540 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2546 * do_jobctl_trap - take care of ptrace jobctl traps
2548 * When PT_SEIZED, it's used for both group stop and explicit
2549 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2550 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2551 * the stop signal; otherwise, %SIGTRAP.
2553 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2554 * number as exit_code and no siginfo.
2557 * Must be called with @current->sighand->siglock held, which may be
2558 * released and re-acquired before returning with intervening sleep.
2560 static void do_jobctl_trap(void)
2562 struct signal_struct *signal = current->signal;
2563 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2565 if (current->ptrace & PT_SEIZED) {
2566 if (!signal->group_stop_count &&
2567 !(signal->flags & SIGNAL_STOP_STOPPED))
2569 WARN_ON_ONCE(!signr);
2570 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2573 WARN_ON_ONCE(!signr);
2574 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2579 * do_freezer_trap - handle the freezer jobctl trap
2581 * Puts the task into frozen state, if only the task is not about to quit.
2582 * In this case it drops JOBCTL_TRAP_FREEZE.
2585 * Must be called with @current->sighand->siglock held,
2586 * which is always released before returning.
2588 static void do_freezer_trap(void)
2589 __releases(¤t->sighand->siglock)
2592 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2593 * let's make another loop to give it a chance to be handled.
2594 * In any case, we'll return back.
2596 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2597 JOBCTL_TRAP_FREEZE) {
2598 spin_unlock_irq(¤t->sighand->siglock);
2603 * Now we're sure that there is no pending fatal signal and no
2604 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2605 * immediately (if there is a non-fatal signal pending), and
2606 * put the task into sleep.
2608 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2609 clear_thread_flag(TIF_SIGPENDING);
2610 spin_unlock_irq(¤t->sighand->siglock);
2611 cgroup_enter_frozen();
2615 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2618 * We do not check sig_kernel_stop(signr) but set this marker
2619 * unconditionally because we do not know whether debugger will
2620 * change signr. This flag has no meaning unless we are going
2621 * to stop after return from ptrace_stop(). In this case it will
2622 * be checked in do_signal_stop(), we should only stop if it was
2623 * not cleared by SIGCONT while we were sleeping. See also the
2624 * comment in dequeue_signal().
2626 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2627 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2629 /* We're back. Did the debugger cancel the sig? */
2634 * Update the siginfo structure if the signal has
2635 * changed. If the debugger wanted something
2636 * specific in the siginfo structure then it should
2637 * have updated *info via PTRACE_SETSIGINFO.
2639 if (signr != info->si_signo) {
2640 clear_siginfo(info);
2641 info->si_signo = signr;
2643 info->si_code = SI_USER;
2645 info->si_pid = task_pid_vnr(current->parent);
2646 info->si_uid = from_kuid_munged(current_user_ns(),
2647 task_uid(current->parent));
2651 /* If the (new) signal is now blocked, requeue it. */
2652 if (sigismember(¤t->blocked, signr) ||
2653 fatal_signal_pending(current)) {
2654 send_signal_locked(signr, info, current, type);
2661 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2663 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2665 case SIL_FAULT_TRAPNO:
2666 case SIL_FAULT_MCEERR:
2667 case SIL_FAULT_BNDERR:
2668 case SIL_FAULT_PKUERR:
2669 case SIL_FAULT_PERF_EVENT:
2670 ksig->info.si_addr = arch_untagged_si_addr(
2671 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2683 bool get_signal(struct ksignal *ksig)
2685 struct sighand_struct *sighand = current->sighand;
2686 struct signal_struct *signal = current->signal;
2689 clear_notify_signal();
2690 if (unlikely(task_work_pending(current)))
2693 if (!task_sigpending(current))
2696 if (unlikely(uprobe_deny_signal()))
2700 * Do this once, we can't return to user-mode if freezing() == T.
2701 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2702 * thus do not need another check after return.
2707 spin_lock_irq(&sighand->siglock);
2710 * Every stopped thread goes here after wakeup. Check to see if
2711 * we should notify the parent, prepare_signal(SIGCONT) encodes
2712 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2714 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2717 if (signal->flags & SIGNAL_CLD_CONTINUED)
2718 why = CLD_CONTINUED;
2722 signal->flags &= ~SIGNAL_CLD_MASK;
2724 spin_unlock_irq(&sighand->siglock);
2727 * Notify the parent that we're continuing. This event is
2728 * always per-process and doesn't make whole lot of sense
2729 * for ptracers, who shouldn't consume the state via
2730 * wait(2) either, but, for backward compatibility, notify
2731 * the ptracer of the group leader too unless it's gonna be
2734 read_lock(&tasklist_lock);
2735 do_notify_parent_cldstop(current, false, why);
2737 if (ptrace_reparented(current->group_leader))
2738 do_notify_parent_cldstop(current->group_leader,
2740 read_unlock(&tasklist_lock);
2746 struct k_sigaction *ka;
2749 /* Has this task already been marked for death? */
2750 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2751 signal->group_exec_task) {
2752 clear_siginfo(&ksig->info);
2753 ksig->info.si_signo = signr = SIGKILL;
2754 sigdelset(¤t->pending.signal, SIGKILL);
2755 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2756 &sighand->action[SIGKILL - 1]);
2757 recalc_sigpending();
2761 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2765 if (unlikely(current->jobctl &
2766 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2767 if (current->jobctl & JOBCTL_TRAP_MASK) {
2769 spin_unlock_irq(&sighand->siglock);
2770 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2777 * If the task is leaving the frozen state, let's update
2778 * cgroup counters and reset the frozen bit.
2780 if (unlikely(cgroup_task_frozen(current))) {
2781 spin_unlock_irq(&sighand->siglock);
2782 cgroup_leave_frozen(false);
2787 * Signals generated by the execution of an instruction
2788 * need to be delivered before any other pending signals
2789 * so that the instruction pointer in the signal stack
2790 * frame points to the faulting instruction.
2793 signr = dequeue_synchronous_signal(&ksig->info);
2795 signr = dequeue_signal(current, ¤t->blocked,
2796 &ksig->info, &type);
2799 break; /* will return 0 */
2801 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2802 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2803 signr = ptrace_signal(signr, &ksig->info, type);
2808 ka = &sighand->action[signr-1];
2810 /* Trace actually delivered signals. */
2811 trace_signal_deliver(signr, &ksig->info, ka);
2813 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2815 if (ka->sa.sa_handler != SIG_DFL) {
2816 /* Run the handler. */
2819 if (ka->sa.sa_flags & SA_ONESHOT)
2820 ka->sa.sa_handler = SIG_DFL;
2822 break; /* will return non-zero "signr" value */
2826 * Now we are doing the default action for this signal.
2828 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2832 * Global init gets no signals it doesn't want.
2833 * Container-init gets no signals it doesn't want from same
2836 * Note that if global/container-init sees a sig_kernel_only()
2837 * signal here, the signal must have been generated internally
2838 * or must have come from an ancestor namespace. In either
2839 * case, the signal cannot be dropped.
2841 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2842 !sig_kernel_only(signr))
2845 if (sig_kernel_stop(signr)) {
2847 * The default action is to stop all threads in
2848 * the thread group. The job control signals
2849 * do nothing in an orphaned pgrp, but SIGSTOP
2850 * always works. Note that siglock needs to be
2851 * dropped during the call to is_orphaned_pgrp()
2852 * because of lock ordering with tasklist_lock.
2853 * This allows an intervening SIGCONT to be posted.
2854 * We need to check for that and bail out if necessary.
2856 if (signr != SIGSTOP) {
2857 spin_unlock_irq(&sighand->siglock);
2859 /* signals can be posted during this window */
2861 if (is_current_pgrp_orphaned())
2864 spin_lock_irq(&sighand->siglock);
2867 if (likely(do_signal_stop(ksig->info.si_signo))) {
2868 /* It released the siglock. */
2873 * We didn't actually stop, due to a race
2874 * with SIGCONT or something like that.
2880 spin_unlock_irq(&sighand->siglock);
2881 if (unlikely(cgroup_task_frozen(current)))
2882 cgroup_leave_frozen(true);
2885 * Anything else is fatal, maybe with a core dump.
2887 current->flags |= PF_SIGNALED;
2889 if (sig_kernel_coredump(signr)) {
2890 if (print_fatal_signals)
2891 print_fatal_signal(ksig->info.si_signo);
2892 proc_coredump_connector(current);
2894 * If it was able to dump core, this kills all
2895 * other threads in the group and synchronizes with
2896 * their demise. If we lost the race with another
2897 * thread getting here, it set group_exit_code
2898 * first and our do_group_exit call below will use
2899 * that value and ignore the one we pass it.
2901 do_coredump(&ksig->info);
2905 * PF_USER_WORKER threads will catch and exit on fatal signals
2906 * themselves. They have cleanup that must be performed, so
2907 * we cannot call do_exit() on their behalf.
2909 if (current->flags & PF_USER_WORKER)
2913 * Death signals, no core dump.
2915 do_group_exit(ksig->info.si_signo);
2918 spin_unlock_irq(&sighand->siglock);
2922 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2923 hide_si_addr_tag_bits(ksig);
2925 return ksig->sig > 0;
2929 * signal_delivered - called after signal delivery to update blocked signals
2930 * @ksig: kernel signal struct
2931 * @stepping: nonzero if debugger single-step or block-step in use
2933 * This function should be called when a signal has successfully been
2934 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2935 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2936 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2938 static void signal_delivered(struct ksignal *ksig, int stepping)
2942 /* A signal was successfully delivered, and the
2943 saved sigmask was stored on the signal frame,
2944 and will be restored by sigreturn. So we can
2945 simply clear the restore sigmask flag. */
2946 clear_restore_sigmask();
2948 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2949 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2950 sigaddset(&blocked, ksig->sig);
2951 set_current_blocked(&blocked);
2952 if (current->sas_ss_flags & SS_AUTODISARM)
2953 sas_ss_reset(current);
2955 ptrace_notify(SIGTRAP, 0);
2958 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2961 force_sigsegv(ksig->sig);
2963 signal_delivered(ksig, stepping);
2967 * It could be that complete_signal() picked us to notify about the
2968 * group-wide signal. Other threads should be notified now to take
2969 * the shared signals in @which since we will not.
2971 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2974 struct task_struct *t;
2976 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2977 if (sigisemptyset(&retarget))
2981 while_each_thread(tsk, t) {
2982 if (t->flags & PF_EXITING)
2985 if (!has_pending_signals(&retarget, &t->blocked))
2987 /* Remove the signals this thread can handle. */
2988 sigandsets(&retarget, &retarget, &t->blocked);
2990 if (!task_sigpending(t))
2991 signal_wake_up(t, 0);
2993 if (sigisemptyset(&retarget))
2998 void exit_signals(struct task_struct *tsk)
3004 * @tsk is about to have PF_EXITING set - lock out users which
3005 * expect stable threadgroup.
3007 cgroup_threadgroup_change_begin(tsk);
3009 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3010 sched_mm_cid_exit_signals(tsk);
3011 tsk->flags |= PF_EXITING;
3012 cgroup_threadgroup_change_end(tsk);
3016 spin_lock_irq(&tsk->sighand->siglock);
3018 * From now this task is not visible for group-wide signals,
3019 * see wants_signal(), do_signal_stop().
3021 sched_mm_cid_exit_signals(tsk);
3022 tsk->flags |= PF_EXITING;
3024 cgroup_threadgroup_change_end(tsk);
3026 if (!task_sigpending(tsk))
3029 unblocked = tsk->blocked;
3030 signotset(&unblocked);
3031 retarget_shared_pending(tsk, &unblocked);
3033 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3034 task_participate_group_stop(tsk))
3035 group_stop = CLD_STOPPED;
3037 spin_unlock_irq(&tsk->sighand->siglock);
3040 * If group stop has completed, deliver the notification. This
3041 * should always go to the real parent of the group leader.
3043 if (unlikely(group_stop)) {
3044 read_lock(&tasklist_lock);
3045 do_notify_parent_cldstop(tsk, false, group_stop);
3046 read_unlock(&tasklist_lock);
3051 * System call entry points.
3055 * sys_restart_syscall - restart a system call
3057 SYSCALL_DEFINE0(restart_syscall)
3059 struct restart_block *restart = ¤t->restart_block;
3060 return restart->fn(restart);
3063 long do_no_restart_syscall(struct restart_block *param)
3068 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3070 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3071 sigset_t newblocked;
3072 /* A set of now blocked but previously unblocked signals. */
3073 sigandnsets(&newblocked, newset, ¤t->blocked);
3074 retarget_shared_pending(tsk, &newblocked);
3076 tsk->blocked = *newset;
3077 recalc_sigpending();
3081 * set_current_blocked - change current->blocked mask
3084 * It is wrong to change ->blocked directly, this helper should be used
3085 * to ensure the process can't miss a shared signal we are going to block.
3087 void set_current_blocked(sigset_t *newset)
3089 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3090 __set_current_blocked(newset);
3093 void __set_current_blocked(const sigset_t *newset)
3095 struct task_struct *tsk = current;
3098 * In case the signal mask hasn't changed, there is nothing we need
3099 * to do. The current->blocked shouldn't be modified by other task.
3101 if (sigequalsets(&tsk->blocked, newset))
3104 spin_lock_irq(&tsk->sighand->siglock);
3105 __set_task_blocked(tsk, newset);
3106 spin_unlock_irq(&tsk->sighand->siglock);
3110 * This is also useful for kernel threads that want to temporarily
3111 * (or permanently) block certain signals.
3113 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3114 * interface happily blocks "unblockable" signals like SIGKILL
3117 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3119 struct task_struct *tsk = current;
3122 /* Lockless, only current can change ->blocked, never from irq */
3124 *oldset = tsk->blocked;
3128 sigorsets(&newset, &tsk->blocked, set);
3131 sigandnsets(&newset, &tsk->blocked, set);
3140 __set_current_blocked(&newset);
3143 EXPORT_SYMBOL(sigprocmask);
3146 * The api helps set app-provided sigmasks.
3148 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3149 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3151 * Note that it does set_restore_sigmask() in advance, so it must be always
3152 * paired with restore_saved_sigmask_unless() before return from syscall.
3154 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3160 if (sigsetsize != sizeof(sigset_t))
3162 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3165 set_restore_sigmask();
3166 current->saved_sigmask = current->blocked;
3167 set_current_blocked(&kmask);
3172 #ifdef CONFIG_COMPAT
3173 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3180 if (sigsetsize != sizeof(compat_sigset_t))
3182 if (get_compat_sigset(&kmask, umask))
3185 set_restore_sigmask();
3186 current->saved_sigmask = current->blocked;
3187 set_current_blocked(&kmask);
3194 * sys_rt_sigprocmask - change the list of currently blocked signals
3195 * @how: whether to add, remove, or set signals
3196 * @nset: stores pending signals
3197 * @oset: previous value of signal mask if non-null
3198 * @sigsetsize: size of sigset_t type
3200 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3201 sigset_t __user *, oset, size_t, sigsetsize)
3203 sigset_t old_set, new_set;
3206 /* XXX: Don't preclude handling different sized sigset_t's. */
3207 if (sigsetsize != sizeof(sigset_t))
3210 old_set = current->blocked;
3213 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3215 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3217 error = sigprocmask(how, &new_set, NULL);
3223 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3230 #ifdef CONFIG_COMPAT
3231 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3232 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3234 sigset_t old_set = current->blocked;
3236 /* XXX: Don't preclude handling different sized sigset_t's. */
3237 if (sigsetsize != sizeof(sigset_t))
3243 if (get_compat_sigset(&new_set, nset))
3245 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3247 error = sigprocmask(how, &new_set, NULL);
3251 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3255 static void do_sigpending(sigset_t *set)
3257 spin_lock_irq(¤t->sighand->siglock);
3258 sigorsets(set, ¤t->pending.signal,
3259 ¤t->signal->shared_pending.signal);
3260 spin_unlock_irq(¤t->sighand->siglock);
3262 /* Outside the lock because only this thread touches it. */
3263 sigandsets(set, ¤t->blocked, set);
3267 * sys_rt_sigpending - examine a pending signal that has been raised
3269 * @uset: stores pending signals
3270 * @sigsetsize: size of sigset_t type or larger
3272 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3276 if (sigsetsize > sizeof(*uset))
3279 do_sigpending(&set);
3281 if (copy_to_user(uset, &set, sigsetsize))
3287 #ifdef CONFIG_COMPAT
3288 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3289 compat_size_t, sigsetsize)
3293 if (sigsetsize > sizeof(*uset))
3296 do_sigpending(&set);
3298 return put_compat_sigset(uset, &set, sigsetsize);
3302 static const struct {
3303 unsigned char limit, layout;
3305 [SIGILL] = { NSIGILL, SIL_FAULT },
3306 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3307 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3308 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3309 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3311 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3313 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3314 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3315 [SIGSYS] = { NSIGSYS, SIL_SYS },
3318 static bool known_siginfo_layout(unsigned sig, int si_code)
3320 if (si_code == SI_KERNEL)
3322 else if ((si_code > SI_USER)) {
3323 if (sig_specific_sicodes(sig)) {
3324 if (si_code <= sig_sicodes[sig].limit)
3327 else if (si_code <= NSIGPOLL)
3330 else if (si_code >= SI_DETHREAD)
3332 else if (si_code == SI_ASYNCNL)
3337 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3339 enum siginfo_layout layout = SIL_KILL;
3340 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3341 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3342 (si_code <= sig_sicodes[sig].limit)) {
3343 layout = sig_sicodes[sig].layout;
3344 /* Handle the exceptions */
3345 if ((sig == SIGBUS) &&
3346 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3347 layout = SIL_FAULT_MCEERR;
3348 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3349 layout = SIL_FAULT_BNDERR;
3351 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3352 layout = SIL_FAULT_PKUERR;
3354 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3355 layout = SIL_FAULT_PERF_EVENT;
3356 else if (IS_ENABLED(CONFIG_SPARC) &&
3357 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3358 layout = SIL_FAULT_TRAPNO;
3359 else if (IS_ENABLED(CONFIG_ALPHA) &&
3361 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3362 layout = SIL_FAULT_TRAPNO;
3364 else if (si_code <= NSIGPOLL)
3367 if (si_code == SI_TIMER)
3369 else if (si_code == SI_SIGIO)
3371 else if (si_code < 0)
3377 static inline char __user *si_expansion(const siginfo_t __user *info)
3379 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3382 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3384 char __user *expansion = si_expansion(to);
3385 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3387 if (clear_user(expansion, SI_EXPANSION_SIZE))
3392 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3393 const siginfo_t __user *from)
3395 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3396 char __user *expansion = si_expansion(from);
3397 char buf[SI_EXPANSION_SIZE];
3400 * An unknown si_code might need more than
3401 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3402 * extra bytes are 0. This guarantees copy_siginfo_to_user
3403 * will return this data to userspace exactly.
3405 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3407 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3415 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3416 const siginfo_t __user *from)
3418 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3420 to->si_signo = signo;
3421 return post_copy_siginfo_from_user(to, from);
3424 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3426 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3428 return post_copy_siginfo_from_user(to, from);
3431 #ifdef CONFIG_COMPAT
3433 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3434 * @to: compat siginfo destination
3435 * @from: kernel siginfo source
3437 * Note: This function does not work properly for the SIGCHLD on x32, but
3438 * fortunately it doesn't have to. The only valid callers for this function are
3439 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3440 * The latter does not care because SIGCHLD will never cause a coredump.
3442 void copy_siginfo_to_external32(struct compat_siginfo *to,
3443 const struct kernel_siginfo *from)
3445 memset(to, 0, sizeof(*to));
3447 to->si_signo = from->si_signo;
3448 to->si_errno = from->si_errno;
3449 to->si_code = from->si_code;
3450 switch(siginfo_layout(from->si_signo, from->si_code)) {
3452 to->si_pid = from->si_pid;
3453 to->si_uid = from->si_uid;
3456 to->si_tid = from->si_tid;
3457 to->si_overrun = from->si_overrun;
3458 to->si_int = from->si_int;
3461 to->si_band = from->si_band;
3462 to->si_fd = from->si_fd;
3465 to->si_addr = ptr_to_compat(from->si_addr);
3467 case SIL_FAULT_TRAPNO:
3468 to->si_addr = ptr_to_compat(from->si_addr);
3469 to->si_trapno = from->si_trapno;
3471 case SIL_FAULT_MCEERR:
3472 to->si_addr = ptr_to_compat(from->si_addr);
3473 to->si_addr_lsb = from->si_addr_lsb;
3475 case SIL_FAULT_BNDERR:
3476 to->si_addr = ptr_to_compat(from->si_addr);
3477 to->si_lower = ptr_to_compat(from->si_lower);
3478 to->si_upper = ptr_to_compat(from->si_upper);
3480 case SIL_FAULT_PKUERR:
3481 to->si_addr = ptr_to_compat(from->si_addr);
3482 to->si_pkey = from->si_pkey;
3484 case SIL_FAULT_PERF_EVENT:
3485 to->si_addr = ptr_to_compat(from->si_addr);
3486 to->si_perf_data = from->si_perf_data;
3487 to->si_perf_type = from->si_perf_type;
3488 to->si_perf_flags = from->si_perf_flags;
3491 to->si_pid = from->si_pid;
3492 to->si_uid = from->si_uid;
3493 to->si_status = from->si_status;
3494 to->si_utime = from->si_utime;
3495 to->si_stime = from->si_stime;
3498 to->si_pid = from->si_pid;
3499 to->si_uid = from->si_uid;
3500 to->si_int = from->si_int;
3503 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3504 to->si_syscall = from->si_syscall;
3505 to->si_arch = from->si_arch;
3510 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3511 const struct kernel_siginfo *from)
3513 struct compat_siginfo new;
3515 copy_siginfo_to_external32(&new, from);
3516 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3521 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3522 const struct compat_siginfo *from)
3525 to->si_signo = from->si_signo;
3526 to->si_errno = from->si_errno;
3527 to->si_code = from->si_code;
3528 switch(siginfo_layout(from->si_signo, from->si_code)) {
3530 to->si_pid = from->si_pid;
3531 to->si_uid = from->si_uid;
3534 to->si_tid = from->si_tid;
3535 to->si_overrun = from->si_overrun;
3536 to->si_int = from->si_int;
3539 to->si_band = from->si_band;
3540 to->si_fd = from->si_fd;
3543 to->si_addr = compat_ptr(from->si_addr);
3545 case SIL_FAULT_TRAPNO:
3546 to->si_addr = compat_ptr(from->si_addr);
3547 to->si_trapno = from->si_trapno;
3549 case SIL_FAULT_MCEERR:
3550 to->si_addr = compat_ptr(from->si_addr);
3551 to->si_addr_lsb = from->si_addr_lsb;
3553 case SIL_FAULT_BNDERR:
3554 to->si_addr = compat_ptr(from->si_addr);
3555 to->si_lower = compat_ptr(from->si_lower);
3556 to->si_upper = compat_ptr(from->si_upper);
3558 case SIL_FAULT_PKUERR:
3559 to->si_addr = compat_ptr(from->si_addr);
3560 to->si_pkey = from->si_pkey;
3562 case SIL_FAULT_PERF_EVENT:
3563 to->si_addr = compat_ptr(from->si_addr);
3564 to->si_perf_data = from->si_perf_data;
3565 to->si_perf_type = from->si_perf_type;
3566 to->si_perf_flags = from->si_perf_flags;
3569 to->si_pid = from->si_pid;
3570 to->si_uid = from->si_uid;
3571 to->si_status = from->si_status;
3572 #ifdef CONFIG_X86_X32_ABI
3573 if (in_x32_syscall()) {
3574 to->si_utime = from->_sifields._sigchld_x32._utime;
3575 to->si_stime = from->_sifields._sigchld_x32._stime;
3579 to->si_utime = from->si_utime;
3580 to->si_stime = from->si_stime;
3584 to->si_pid = from->si_pid;
3585 to->si_uid = from->si_uid;
3586 to->si_int = from->si_int;
3589 to->si_call_addr = compat_ptr(from->si_call_addr);
3590 to->si_syscall = from->si_syscall;
3591 to->si_arch = from->si_arch;
3597 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3598 const struct compat_siginfo __user *ufrom)
3600 struct compat_siginfo from;
3602 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3605 from.si_signo = signo;
3606 return post_copy_siginfo_from_user32(to, &from);
3609 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3610 const struct compat_siginfo __user *ufrom)
3612 struct compat_siginfo from;
3614 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3617 return post_copy_siginfo_from_user32(to, &from);
3619 #endif /* CONFIG_COMPAT */
3622 * do_sigtimedwait - wait for queued signals specified in @which
3623 * @which: queued signals to wait for
3624 * @info: if non-null, the signal's siginfo is returned here
3625 * @ts: upper bound on process time suspension
3627 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3628 const struct timespec64 *ts)
3630 ktime_t *to = NULL, timeout = KTIME_MAX;
3631 struct task_struct *tsk = current;
3632 sigset_t mask = *which;
3637 if (!timespec64_valid(ts))
3639 timeout = timespec64_to_ktime(*ts);
3644 * Invert the set of allowed signals to get those we want to block.
3646 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3649 spin_lock_irq(&tsk->sighand->siglock);
3650 sig = dequeue_signal(tsk, &mask, info, &type);
3651 if (!sig && timeout) {
3653 * None ready, temporarily unblock those we're interested
3654 * while we are sleeping in so that we'll be awakened when
3655 * they arrive. Unblocking is always fine, we can avoid
3656 * set_current_blocked().
3658 tsk->real_blocked = tsk->blocked;
3659 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3660 recalc_sigpending();
3661 spin_unlock_irq(&tsk->sighand->siglock);
3663 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3664 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3666 spin_lock_irq(&tsk->sighand->siglock);
3667 __set_task_blocked(tsk, &tsk->real_blocked);
3668 sigemptyset(&tsk->real_blocked);
3669 sig = dequeue_signal(tsk, &mask, info, &type);
3671 spin_unlock_irq(&tsk->sighand->siglock);
3675 return ret ? -EINTR : -EAGAIN;
3679 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3681 * @uthese: queued signals to wait for
3682 * @uinfo: if non-null, the signal's siginfo is returned here
3683 * @uts: upper bound on process time suspension
3684 * @sigsetsize: size of sigset_t type
3686 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3687 siginfo_t __user *, uinfo,
3688 const struct __kernel_timespec __user *, uts,
3692 struct timespec64 ts;
3693 kernel_siginfo_t info;
3696 /* XXX: Don't preclude handling different sized sigset_t's. */
3697 if (sigsetsize != sizeof(sigset_t))
3700 if (copy_from_user(&these, uthese, sizeof(these)))
3704 if (get_timespec64(&ts, uts))
3708 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3710 if (ret > 0 && uinfo) {
3711 if (copy_siginfo_to_user(uinfo, &info))
3718 #ifdef CONFIG_COMPAT_32BIT_TIME
3719 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3720 siginfo_t __user *, uinfo,
3721 const struct old_timespec32 __user *, uts,
3725 struct timespec64 ts;
3726 kernel_siginfo_t info;
3729 if (sigsetsize != sizeof(sigset_t))
3732 if (copy_from_user(&these, uthese, sizeof(these)))
3736 if (get_old_timespec32(&ts, uts))
3740 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3742 if (ret > 0 && uinfo) {
3743 if (copy_siginfo_to_user(uinfo, &info))
3751 #ifdef CONFIG_COMPAT
3752 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3753 struct compat_siginfo __user *, uinfo,
3754 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3757 struct timespec64 t;
3758 kernel_siginfo_t info;
3761 if (sigsetsize != sizeof(sigset_t))
3764 if (get_compat_sigset(&s, uthese))
3768 if (get_timespec64(&t, uts))
3772 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3774 if (ret > 0 && uinfo) {
3775 if (copy_siginfo_to_user32(uinfo, &info))
3782 #ifdef CONFIG_COMPAT_32BIT_TIME
3783 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3784 struct compat_siginfo __user *, uinfo,
3785 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3788 struct timespec64 t;
3789 kernel_siginfo_t info;
3792 if (sigsetsize != sizeof(sigset_t))
3795 if (get_compat_sigset(&s, uthese))
3799 if (get_old_timespec32(&t, uts))
3803 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3805 if (ret > 0 && uinfo) {
3806 if (copy_siginfo_to_user32(uinfo, &info))
3815 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3817 clear_siginfo(info);
3818 info->si_signo = sig;
3820 info->si_code = SI_USER;
3821 info->si_pid = task_tgid_vnr(current);
3822 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3826 * sys_kill - send a signal to a process
3827 * @pid: the PID of the process
3828 * @sig: signal to be sent
3830 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3832 struct kernel_siginfo info;
3834 prepare_kill_siginfo(sig, &info);
3836 return kill_something_info(sig, &info, pid);
3840 * Verify that the signaler and signalee either are in the same pid namespace
3841 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3844 static bool access_pidfd_pidns(struct pid *pid)
3846 struct pid_namespace *active = task_active_pid_ns(current);
3847 struct pid_namespace *p = ns_of_pid(pid);
3860 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3861 siginfo_t __user *info)
3863 #ifdef CONFIG_COMPAT
3865 * Avoid hooking up compat syscalls and instead handle necessary
3866 * conversions here. Note, this is a stop-gap measure and should not be
3867 * considered a generic solution.
3869 if (in_compat_syscall())
3870 return copy_siginfo_from_user32(
3871 kinfo, (struct compat_siginfo __user *)info);
3873 return copy_siginfo_from_user(kinfo, info);
3876 static struct pid *pidfd_to_pid(const struct file *file)
3880 pid = pidfd_pid(file);
3884 return tgid_pidfd_to_pid(file);
3888 * sys_pidfd_send_signal - Signal a process through a pidfd
3889 * @pidfd: file descriptor of the process
3890 * @sig: signal to send
3891 * @info: signal info
3892 * @flags: future flags
3894 * The syscall currently only signals via PIDTYPE_PID which covers
3895 * kill(<positive-pid>, <signal>. It does not signal threads or process
3897 * In order to extend the syscall to threads and process groups the @flags
3898 * argument should be used. In essence, the @flags argument will determine
3899 * what is signaled and not the file descriptor itself. Put in other words,
3900 * grouping is a property of the flags argument not a property of the file
3903 * Return: 0 on success, negative errno on failure
3905 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3906 siginfo_t __user *, info, unsigned int, flags)
3911 kernel_siginfo_t kinfo;
3913 /* Enforce flags be set to 0 until we add an extension. */
3921 /* Is this a pidfd? */
3922 pid = pidfd_to_pid(f.file);
3929 if (!access_pidfd_pidns(pid))
3933 ret = copy_siginfo_from_user_any(&kinfo, info);
3938 if (unlikely(sig != kinfo.si_signo))
3941 /* Only allow sending arbitrary signals to yourself. */
3943 if ((task_pid(current) != pid) &&
3944 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3947 prepare_kill_siginfo(sig, &kinfo);
3950 ret = kill_pid_info(sig, &kinfo, pid);
3958 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3960 struct task_struct *p;
3964 p = find_task_by_vpid(pid);
3965 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3966 error = check_kill_permission(sig, info, p);
3968 * The null signal is a permissions and process existence
3969 * probe. No signal is actually delivered.
3971 if (!error && sig) {
3972 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3974 * If lock_task_sighand() failed we pretend the task
3975 * dies after receiving the signal. The window is tiny,
3976 * and the signal is private anyway.
3978 if (unlikely(error == -ESRCH))
3987 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3989 struct kernel_siginfo info;
3991 clear_siginfo(&info);
3992 info.si_signo = sig;
3994 info.si_code = SI_TKILL;
3995 info.si_pid = task_tgid_vnr(current);
3996 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3998 return do_send_specific(tgid, pid, sig, &info);
4002 * sys_tgkill - send signal to one specific thread
4003 * @tgid: the thread group ID of the thread
4004 * @pid: the PID of the thread
4005 * @sig: signal to be sent
4007 * This syscall also checks the @tgid and returns -ESRCH even if the PID
4008 * exists but it's not belonging to the target process anymore. This
4009 * method solves the problem of threads exiting and PIDs getting reused.
4011 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4013 /* This is only valid for single tasks */
4014 if (pid <= 0 || tgid <= 0)
4017 return do_tkill(tgid, pid, sig);
4021 * sys_tkill - send signal to one specific task
4022 * @pid: the PID of the task
4023 * @sig: signal to be sent
4025 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4027 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4029 /* This is only valid for single tasks */
4033 return do_tkill(0, pid, sig);
4036 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4038 /* Not even root can pretend to send signals from the kernel.
4039 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4041 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4042 (task_pid_vnr(current) != pid))
4045 /* POSIX.1b doesn't mention process groups. */
4046 return kill_proc_info(sig, info, pid);
4050 * sys_rt_sigqueueinfo - send signal information to a signal
4051 * @pid: the PID of the thread
4052 * @sig: signal to be sent
4053 * @uinfo: signal info to be sent
4055 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4056 siginfo_t __user *, uinfo)
4058 kernel_siginfo_t info;
4059 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4062 return do_rt_sigqueueinfo(pid, sig, &info);
4065 #ifdef CONFIG_COMPAT
4066 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4069 struct compat_siginfo __user *, uinfo)
4071 kernel_siginfo_t info;
4072 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4075 return do_rt_sigqueueinfo(pid, sig, &info);
4079 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4081 /* This is only valid for single tasks */
4082 if (pid <= 0 || tgid <= 0)
4085 /* Not even root can pretend to send signals from the kernel.
4086 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4088 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4089 (task_pid_vnr(current) != pid))
4092 return do_send_specific(tgid, pid, sig, info);
4095 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4096 siginfo_t __user *, uinfo)
4098 kernel_siginfo_t info;
4099 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4102 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4105 #ifdef CONFIG_COMPAT
4106 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4110 struct compat_siginfo __user *, uinfo)
4112 kernel_siginfo_t info;
4113 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4116 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4121 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4123 void kernel_sigaction(int sig, __sighandler_t action)
4125 spin_lock_irq(¤t->sighand->siglock);
4126 current->sighand->action[sig - 1].sa.sa_handler = action;
4127 if (action == SIG_IGN) {
4131 sigaddset(&mask, sig);
4133 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4134 flush_sigqueue_mask(&mask, ¤t->pending);
4135 recalc_sigpending();
4137 spin_unlock_irq(¤t->sighand->siglock);
4139 EXPORT_SYMBOL(kernel_sigaction);
4141 void __weak sigaction_compat_abi(struct k_sigaction *act,
4142 struct k_sigaction *oact)
4146 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4148 struct task_struct *p = current, *t;
4149 struct k_sigaction *k;
4152 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4155 k = &p->sighand->action[sig-1];
4157 spin_lock_irq(&p->sighand->siglock);
4158 if (k->sa.sa_flags & SA_IMMUTABLE) {
4159 spin_unlock_irq(&p->sighand->siglock);
4166 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4167 * e.g. by having an architecture use the bit in their uapi.
4169 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4172 * Clear unknown flag bits in order to allow userspace to detect missing
4173 * support for flag bits and to allow the kernel to use non-uapi bits
4177 act->sa.sa_flags &= UAPI_SA_FLAGS;
4179 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4181 sigaction_compat_abi(act, oact);
4184 sigdelsetmask(&act->sa.sa_mask,
4185 sigmask(SIGKILL) | sigmask(SIGSTOP));
4189 * "Setting a signal action to SIG_IGN for a signal that is
4190 * pending shall cause the pending signal to be discarded,
4191 * whether or not it is blocked."
4193 * "Setting a signal action to SIG_DFL for a signal that is
4194 * pending and whose default action is to ignore the signal
4195 * (for example, SIGCHLD), shall cause the pending signal to
4196 * be discarded, whether or not it is blocked"
4198 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4200 sigaddset(&mask, sig);
4201 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4202 for_each_thread(p, t)
4203 flush_sigqueue_mask(&mask, &t->pending);
4207 spin_unlock_irq(&p->sighand->siglock);
4211 #ifdef CONFIG_DYNAMIC_SIGFRAME
4212 static inline void sigaltstack_lock(void)
4213 __acquires(¤t->sighand->siglock)
4215 spin_lock_irq(¤t->sighand->siglock);
4218 static inline void sigaltstack_unlock(void)
4219 __releases(¤t->sighand->siglock)
4221 spin_unlock_irq(¤t->sighand->siglock);
4224 static inline void sigaltstack_lock(void) { }
4225 static inline void sigaltstack_unlock(void) { }
4229 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4232 struct task_struct *t = current;
4236 memset(oss, 0, sizeof(stack_t));
4237 oss->ss_sp = (void __user *) t->sas_ss_sp;
4238 oss->ss_size = t->sas_ss_size;
4239 oss->ss_flags = sas_ss_flags(sp) |
4240 (current->sas_ss_flags & SS_FLAG_BITS);
4244 void __user *ss_sp = ss->ss_sp;
4245 size_t ss_size = ss->ss_size;
4246 unsigned ss_flags = ss->ss_flags;
4249 if (unlikely(on_sig_stack(sp)))
4252 ss_mode = ss_flags & ~SS_FLAG_BITS;
4253 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4258 * Return before taking any locks if no actual
4259 * sigaltstack changes were requested.
4261 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4262 t->sas_ss_size == ss_size &&
4263 t->sas_ss_flags == ss_flags)
4267 if (ss_mode == SS_DISABLE) {
4271 if (unlikely(ss_size < min_ss_size))
4273 if (!sigaltstack_size_valid(ss_size))
4277 t->sas_ss_sp = (unsigned long) ss_sp;
4278 t->sas_ss_size = ss_size;
4279 t->sas_ss_flags = ss_flags;
4281 sigaltstack_unlock();
4286 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4290 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4292 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4293 current_user_stack_pointer(),
4295 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4300 int restore_altstack(const stack_t __user *uss)
4303 if (copy_from_user(&new, uss, sizeof(stack_t)))
4305 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4307 /* squash all but EFAULT for now */
4311 int __save_altstack(stack_t __user *uss, unsigned long sp)
4313 struct task_struct *t = current;
4314 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4315 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4316 __put_user(t->sas_ss_size, &uss->ss_size);
4320 #ifdef CONFIG_COMPAT
4321 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4322 compat_stack_t __user *uoss_ptr)
4328 compat_stack_t uss32;
4329 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4331 uss.ss_sp = compat_ptr(uss32.ss_sp);
4332 uss.ss_flags = uss32.ss_flags;
4333 uss.ss_size = uss32.ss_size;
4335 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4336 compat_user_stack_pointer(),
4337 COMPAT_MINSIGSTKSZ);
4338 if (ret >= 0 && uoss_ptr) {
4340 memset(&old, 0, sizeof(old));
4341 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4342 old.ss_flags = uoss.ss_flags;
4343 old.ss_size = uoss.ss_size;
4344 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4350 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4351 const compat_stack_t __user *, uss_ptr,
4352 compat_stack_t __user *, uoss_ptr)
4354 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4357 int compat_restore_altstack(const compat_stack_t __user *uss)
4359 int err = do_compat_sigaltstack(uss, NULL);
4360 /* squash all but -EFAULT for now */
4361 return err == -EFAULT ? err : 0;
4364 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4367 struct task_struct *t = current;
4368 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4370 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4371 __put_user(t->sas_ss_size, &uss->ss_size);
4376 #ifdef __ARCH_WANT_SYS_SIGPENDING
4379 * sys_sigpending - examine pending signals
4380 * @uset: where mask of pending signal is returned
4382 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4386 if (sizeof(old_sigset_t) > sizeof(*uset))
4389 do_sigpending(&set);
4391 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4397 #ifdef CONFIG_COMPAT
4398 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4402 do_sigpending(&set);
4404 return put_user(set.sig[0], set32);
4410 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4412 * sys_sigprocmask - examine and change blocked signals
4413 * @how: whether to add, remove, or set signals
4414 * @nset: signals to add or remove (if non-null)
4415 * @oset: previous value of signal mask if non-null
4417 * Some platforms have their own version with special arguments;
4418 * others support only sys_rt_sigprocmask.
4421 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4422 old_sigset_t __user *, oset)
4424 old_sigset_t old_set, new_set;
4425 sigset_t new_blocked;
4427 old_set = current->blocked.sig[0];
4430 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4433 new_blocked = current->blocked;
4437 sigaddsetmask(&new_blocked, new_set);
4440 sigdelsetmask(&new_blocked, new_set);
4443 new_blocked.sig[0] = new_set;
4449 set_current_blocked(&new_blocked);
4453 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4459 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4461 #ifndef CONFIG_ODD_RT_SIGACTION
4463 * sys_rt_sigaction - alter an action taken by a process
4464 * @sig: signal to be sent
4465 * @act: new sigaction
4466 * @oact: used to save the previous sigaction
4467 * @sigsetsize: size of sigset_t type
4469 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4470 const struct sigaction __user *, act,
4471 struct sigaction __user *, oact,
4474 struct k_sigaction new_sa, old_sa;
4477 /* XXX: Don't preclude handling different sized sigset_t's. */
4478 if (sigsetsize != sizeof(sigset_t))
4481 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4484 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4488 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4493 #ifdef CONFIG_COMPAT
4494 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4495 const struct compat_sigaction __user *, act,
4496 struct compat_sigaction __user *, oact,
4497 compat_size_t, sigsetsize)
4499 struct k_sigaction new_ka, old_ka;
4500 #ifdef __ARCH_HAS_SA_RESTORER
4501 compat_uptr_t restorer;
4505 /* XXX: Don't preclude handling different sized sigset_t's. */
4506 if (sigsetsize != sizeof(compat_sigset_t))
4510 compat_uptr_t handler;
4511 ret = get_user(handler, &act->sa_handler);
4512 new_ka.sa.sa_handler = compat_ptr(handler);
4513 #ifdef __ARCH_HAS_SA_RESTORER
4514 ret |= get_user(restorer, &act->sa_restorer);
4515 new_ka.sa.sa_restorer = compat_ptr(restorer);
4517 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4518 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4523 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4525 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4527 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4528 sizeof(oact->sa_mask));
4529 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4530 #ifdef __ARCH_HAS_SA_RESTORER
4531 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4532 &oact->sa_restorer);
4538 #endif /* !CONFIG_ODD_RT_SIGACTION */
4540 #ifdef CONFIG_OLD_SIGACTION
4541 SYSCALL_DEFINE3(sigaction, int, sig,
4542 const struct old_sigaction __user *, act,
4543 struct old_sigaction __user *, oact)
4545 struct k_sigaction new_ka, old_ka;
4550 if (!access_ok(act, sizeof(*act)) ||
4551 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4552 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4553 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4554 __get_user(mask, &act->sa_mask))
4556 #ifdef __ARCH_HAS_KA_RESTORER
4557 new_ka.ka_restorer = NULL;
4559 siginitset(&new_ka.sa.sa_mask, mask);
4562 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4565 if (!access_ok(oact, sizeof(*oact)) ||
4566 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4567 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4568 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4569 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4576 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4577 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4578 const struct compat_old_sigaction __user *, act,
4579 struct compat_old_sigaction __user *, oact)
4581 struct k_sigaction new_ka, old_ka;
4583 compat_old_sigset_t mask;
4584 compat_uptr_t handler, restorer;
4587 if (!access_ok(act, sizeof(*act)) ||
4588 __get_user(handler, &act->sa_handler) ||
4589 __get_user(restorer, &act->sa_restorer) ||
4590 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4591 __get_user(mask, &act->sa_mask))
4594 #ifdef __ARCH_HAS_KA_RESTORER
4595 new_ka.ka_restorer = NULL;
4597 new_ka.sa.sa_handler = compat_ptr(handler);
4598 new_ka.sa.sa_restorer = compat_ptr(restorer);
4599 siginitset(&new_ka.sa.sa_mask, mask);
4602 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4605 if (!access_ok(oact, sizeof(*oact)) ||
4606 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4607 &oact->sa_handler) ||
4608 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4609 &oact->sa_restorer) ||
4610 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4611 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4618 #ifdef CONFIG_SGETMASK_SYSCALL
4621 * For backwards compatibility. Functionality superseded by sigprocmask.
4623 SYSCALL_DEFINE0(sgetmask)
4626 return current->blocked.sig[0];
4629 SYSCALL_DEFINE1(ssetmask, int, newmask)
4631 int old = current->blocked.sig[0];
4634 siginitset(&newset, newmask);
4635 set_current_blocked(&newset);
4639 #endif /* CONFIG_SGETMASK_SYSCALL */
4641 #ifdef __ARCH_WANT_SYS_SIGNAL
4643 * For backwards compatibility. Functionality superseded by sigaction.
4645 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4647 struct k_sigaction new_sa, old_sa;
4650 new_sa.sa.sa_handler = handler;
4651 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4652 sigemptyset(&new_sa.sa.sa_mask);
4654 ret = do_sigaction(sig, &new_sa, &old_sa);
4656 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4658 #endif /* __ARCH_WANT_SYS_SIGNAL */
4660 #ifdef __ARCH_WANT_SYS_PAUSE
4662 SYSCALL_DEFINE0(pause)
4664 while (!signal_pending(current)) {
4665 __set_current_state(TASK_INTERRUPTIBLE);
4668 return -ERESTARTNOHAND;
4673 static int sigsuspend(sigset_t *set)
4675 current->saved_sigmask = current->blocked;
4676 set_current_blocked(set);
4678 while (!signal_pending(current)) {
4679 __set_current_state(TASK_INTERRUPTIBLE);
4682 set_restore_sigmask();
4683 return -ERESTARTNOHAND;
4687 * sys_rt_sigsuspend - replace the signal mask for a value with the
4688 * @unewset value until a signal is received
4689 * @unewset: new signal mask value
4690 * @sigsetsize: size of sigset_t type
4692 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4696 /* XXX: Don't preclude handling different sized sigset_t's. */
4697 if (sigsetsize != sizeof(sigset_t))
4700 if (copy_from_user(&newset, unewset, sizeof(newset)))
4702 return sigsuspend(&newset);
4705 #ifdef CONFIG_COMPAT
4706 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4710 /* XXX: Don't preclude handling different sized sigset_t's. */
4711 if (sigsetsize != sizeof(sigset_t))
4714 if (get_compat_sigset(&newset, unewset))
4716 return sigsuspend(&newset);
4720 #ifdef CONFIG_OLD_SIGSUSPEND
4721 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4724 siginitset(&blocked, mask);
4725 return sigsuspend(&blocked);
4728 #ifdef CONFIG_OLD_SIGSUSPEND3
4729 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4732 siginitset(&blocked, mask);
4733 return sigsuspend(&blocked);
4737 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4742 static inline void siginfo_buildtime_checks(void)
4744 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4746 /* Verify the offsets in the two siginfos match */
4747 #define CHECK_OFFSET(field) \
4748 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4751 CHECK_OFFSET(si_pid);
4752 CHECK_OFFSET(si_uid);
4755 CHECK_OFFSET(si_tid);
4756 CHECK_OFFSET(si_overrun);
4757 CHECK_OFFSET(si_value);
4760 CHECK_OFFSET(si_pid);
4761 CHECK_OFFSET(si_uid);
4762 CHECK_OFFSET(si_value);
4765 CHECK_OFFSET(si_pid);
4766 CHECK_OFFSET(si_uid);
4767 CHECK_OFFSET(si_status);
4768 CHECK_OFFSET(si_utime);
4769 CHECK_OFFSET(si_stime);
4772 CHECK_OFFSET(si_addr);
4773 CHECK_OFFSET(si_trapno);
4774 CHECK_OFFSET(si_addr_lsb);
4775 CHECK_OFFSET(si_lower);
4776 CHECK_OFFSET(si_upper);
4777 CHECK_OFFSET(si_pkey);
4778 CHECK_OFFSET(si_perf_data);
4779 CHECK_OFFSET(si_perf_type);
4780 CHECK_OFFSET(si_perf_flags);
4783 CHECK_OFFSET(si_band);
4784 CHECK_OFFSET(si_fd);
4787 CHECK_OFFSET(si_call_addr);
4788 CHECK_OFFSET(si_syscall);
4789 CHECK_OFFSET(si_arch);
4793 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4794 offsetof(struct siginfo, si_addr));
4795 if (sizeof(int) == sizeof(void __user *)) {
4796 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4797 sizeof(void __user *));
4799 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4800 sizeof_field(struct siginfo, si_uid)) !=
4801 sizeof(void __user *));
4802 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4803 offsetof(struct siginfo, si_uid));
4805 #ifdef CONFIG_COMPAT
4806 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4807 offsetof(struct compat_siginfo, si_addr));
4808 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4809 sizeof(compat_uptr_t));
4810 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4811 sizeof_field(struct siginfo, si_pid));
4815 #if defined(CONFIG_SYSCTL)
4816 static struct ctl_table signal_debug_table[] = {
4817 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4819 .procname = "exception-trace",
4820 .data = &show_unhandled_signals,
4821 .maxlen = sizeof(int),
4823 .proc_handler = proc_dointvec
4829 static int __init init_signal_sysctls(void)
4831 register_sysctl_init("debug", signal_debug_table);
4834 early_initcall(init_signal_sysctls);
4835 #endif /* CONFIG_SYSCTL */
4837 void __init signals_init(void)
4839 siginfo_buildtime_checks();
4841 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4844 #ifdef CONFIG_KGDB_KDB
4845 #include <linux/kdb.h>
4847 * kdb_send_sig - Allows kdb to send signals without exposing
4848 * signal internals. This function checks if the required locks are
4849 * available before calling the main signal code, to avoid kdb
4852 void kdb_send_sig(struct task_struct *t, int sig)
4854 static struct task_struct *kdb_prev_t;
4856 if (!spin_trylock(&t->sighand->siglock)) {
4857 kdb_printf("Can't do kill command now.\n"
4858 "The sigmask lock is held somewhere else in "
4859 "kernel, try again later\n");
4862 new_t = kdb_prev_t != t;
4864 if (!task_is_running(t) && new_t) {
4865 spin_unlock(&t->sighand->siglock);
4866 kdb_printf("Process is not RUNNING, sending a signal from "
4867 "kdb risks deadlock\n"
4868 "on the run queue locks. "
4869 "The signal has _not_ been sent.\n"
4870 "Reissue the kill command if you want to risk "
4874 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4875 spin_unlock(&t->sighand->siglock);
4877 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4880 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4882 #endif /* CONFIG_KGDB_KDB */