1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/signal.h>
54 #include <asm/param.h>
55 #include <linux/uaccess.h>
56 #include <asm/unistd.h>
57 #include <asm/siginfo.h>
58 #include <asm/cacheflush.h>
59 #include <asm/syscall.h> /* for syscall_get_* */
62 * SLAB caches for signal bits.
65 static struct kmem_cache *sigqueue_cachep;
67 int print_fatal_signals __read_mostly;
69 static void __user *sig_handler(struct task_struct *t, int sig)
71 return t->sighand->action[sig - 1].sa.sa_handler;
74 static inline bool sig_handler_ignored(void __user *handler, int sig)
76 /* Is it explicitly or implicitly ignored? */
77 return handler == SIG_IGN ||
78 (handler == SIG_DFL && sig_kernel_ignore(sig));
81 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 handler = sig_handler(t, sig);
87 /* SIGKILL and SIGSTOP may not be sent to the global init */
88 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
91 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
92 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
95 /* Only allow kernel generated signals to this kthread */
96 if (unlikely((t->flags & PF_KTHREAD) &&
97 (handler == SIG_KTHREAD_KERNEL) && !force))
100 return sig_handler_ignored(handler, sig);
103 static bool sig_ignored(struct task_struct *t, int sig, bool force)
106 * Blocked signals are never ignored, since the
107 * signal handler may change by the time it is
110 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 * Tracers may want to know about even ignored signal unless it
115 * is SIGKILL which can't be reported anyway but can be ignored
116 * by SIGNAL_UNKILLABLE task.
118 if (t->ptrace && sig != SIGKILL)
121 return sig_task_ignored(t, sig, force);
125 * Re-calculate pending state from the set of locally pending
126 * signals, globally pending signals, and blocked signals.
128 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
133 switch (_NSIG_WORDS) {
135 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 ready |= signal->sig[i] &~ blocked->sig[i];
139 case 4: ready = signal->sig[3] &~ blocked->sig[3];
140 ready |= signal->sig[2] &~ blocked->sig[2];
141 ready |= signal->sig[1] &~ blocked->sig[1];
142 ready |= signal->sig[0] &~ blocked->sig[0];
145 case 2: ready = signal->sig[1] &~ blocked->sig[1];
146 ready |= signal->sig[0] &~ blocked->sig[0];
149 case 1: ready = signal->sig[0] &~ blocked->sig[0];
154 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
156 static bool recalc_sigpending_tsk(struct task_struct *t)
158 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
159 PENDING(&t->pending, &t->blocked) ||
160 PENDING(&t->signal->shared_pending, &t->blocked) ||
161 cgroup_task_frozen(t)) {
162 set_tsk_thread_flag(t, TIF_SIGPENDING);
167 * We must never clear the flag in another thread, or in current
168 * when it's possible the current syscall is returning -ERESTART*.
169 * So we don't clear it here, and only callers who know they should do.
175 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
176 * This is superfluous when called on current, the wakeup is a harmless no-op.
178 void recalc_sigpending_and_wake(struct task_struct *t)
180 if (recalc_sigpending_tsk(t))
181 signal_wake_up(t, 0);
184 void recalc_sigpending(void)
186 if (!recalc_sigpending_tsk(current) && !freezing(current))
187 clear_thread_flag(TIF_SIGPENDING);
190 EXPORT_SYMBOL(recalc_sigpending);
192 void calculate_sigpending(void)
194 /* Have any signals or users of TIF_SIGPENDING been delayed
197 spin_lock_irq(¤t->sighand->siglock);
198 set_tsk_thread_flag(current, TIF_SIGPENDING);
200 spin_unlock_irq(¤t->sighand->siglock);
203 /* Given the mask, find the first available signal that should be serviced. */
205 #define SYNCHRONOUS_MASK \
206 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
207 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
209 int next_signal(struct sigpending *pending, sigset_t *mask)
211 unsigned long i, *s, *m, x;
214 s = pending->signal.sig;
218 * Handle the first word specially: it contains the
219 * synchronous signals that need to be dequeued first.
223 if (x & SYNCHRONOUS_MASK)
224 x &= SYNCHRONOUS_MASK;
229 switch (_NSIG_WORDS) {
231 for (i = 1; i < _NSIG_WORDS; ++i) {
235 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 sig = ffz(~x) + _NSIG_BPW + 1;
255 static inline void print_dropped_signal(int sig)
257 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
259 if (!print_fatal_signals)
262 if (!__ratelimit(&ratelimit_state))
265 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
266 current->comm, current->pid, sig);
270 * task_set_jobctl_pending - set jobctl pending bits
272 * @mask: pending bits to set
274 * Clear @mask from @task->jobctl. @mask must be subset of
275 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
276 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
277 * cleared. If @task is already being killed or exiting, this function
281 * Must be called with @task->sighand->siglock held.
284 * %true if @mask is set, %false if made noop because @task was dying.
286 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
288 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
289 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
290 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
292 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
295 if (mask & JOBCTL_STOP_SIGMASK)
296 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
298 task->jobctl |= mask;
303 * task_clear_jobctl_trapping - clear jobctl trapping bit
306 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
307 * Clear it and wake up the ptracer. Note that we don't need any further
308 * locking. @task->siglock guarantees that @task->parent points to the
312 * Must be called with @task->sighand->siglock held.
314 void task_clear_jobctl_trapping(struct task_struct *task)
316 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
317 task->jobctl &= ~JOBCTL_TRAPPING;
318 smp_mb(); /* advised by wake_up_bit() */
319 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
324 * task_clear_jobctl_pending - clear jobctl pending bits
326 * @mask: pending bits to clear
328 * Clear @mask from @task->jobctl. @mask must be subset of
329 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
330 * STOP bits are cleared together.
332 * If clearing of @mask leaves no stop or trap pending, this function calls
333 * task_clear_jobctl_trapping().
336 * Must be called with @task->sighand->siglock held.
338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
340 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
342 if (mask & JOBCTL_STOP_PENDING)
343 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
345 task->jobctl &= ~mask;
347 if (!(task->jobctl & JOBCTL_PENDING_MASK))
348 task_clear_jobctl_trapping(task);
352 * task_participate_group_stop - participate in a group stop
353 * @task: task participating in a group stop
355 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
356 * Group stop states are cleared and the group stop count is consumed if
357 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
358 * stop, the appropriate `SIGNAL_*` flags are set.
361 * Must be called with @task->sighand->siglock held.
364 * %true if group stop completion should be notified to the parent, %false
367 static bool task_participate_group_stop(struct task_struct *task)
369 struct signal_struct *sig = task->signal;
370 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
372 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
374 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
379 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
380 sig->group_stop_count--;
383 * Tell the caller to notify completion iff we are entering into a
384 * fresh group stop. Read comment in do_signal_stop() for details.
386 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
387 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
393 void task_join_group_stop(struct task_struct *task)
395 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
396 struct signal_struct *sig = current->signal;
398 if (sig->group_stop_count) {
399 sig->group_stop_count++;
400 mask |= JOBCTL_STOP_CONSUME;
401 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 /* Have the new thread join an on-going signal group stop */
405 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
409 * allocate a new signal queue record
410 * - this may be called without locks if and only if t == current, otherwise an
411 * appropriate lock must be held to stop the target task from exiting
413 static struct sigqueue *
414 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
415 int override_rlimit, const unsigned int sigqueue_flags)
417 struct sigqueue *q = NULL;
418 struct ucounts *ucounts = NULL;
422 * Protect access to @t credentials. This can go away when all
423 * callers hold rcu read lock.
425 * NOTE! A pending signal will hold on to the user refcount,
426 * and we get/put the refcount only when the sigpending count
427 * changes from/to zero.
430 ucounts = task_ucounts(t);
431 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
436 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
437 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
439 print_dropped_signal(sig);
442 if (unlikely(q == NULL)) {
443 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
445 INIT_LIST_HEAD(&q->list);
446 q->flags = sigqueue_flags;
447 q->ucounts = ucounts;
452 static void __sigqueue_free(struct sigqueue *q)
454 if (q->flags & SIGQUEUE_PREALLOC)
457 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
460 kmem_cache_free(sigqueue_cachep, q);
463 void flush_sigqueue(struct sigpending *queue)
467 sigemptyset(&queue->signal);
468 while (!list_empty(&queue->list)) {
469 q = list_entry(queue->list.next, struct sigqueue , list);
470 list_del_init(&q->list);
476 * Flush all pending signals for this kthread.
478 void flush_signals(struct task_struct *t)
482 spin_lock_irqsave(&t->sighand->siglock, flags);
483 clear_tsk_thread_flag(t, TIF_SIGPENDING);
484 flush_sigqueue(&t->pending);
485 flush_sigqueue(&t->signal->shared_pending);
486 spin_unlock_irqrestore(&t->sighand->siglock, flags);
488 EXPORT_SYMBOL(flush_signals);
490 #ifdef CONFIG_POSIX_TIMERS
491 static void __flush_itimer_signals(struct sigpending *pending)
493 sigset_t signal, retain;
494 struct sigqueue *q, *n;
496 signal = pending->signal;
497 sigemptyset(&retain);
499 list_for_each_entry_safe(q, n, &pending->list, list) {
500 int sig = q->info.si_signo;
502 if (likely(q->info.si_code != SI_TIMER)) {
503 sigaddset(&retain, sig);
505 sigdelset(&signal, sig);
506 list_del_init(&q->list);
511 sigorsets(&pending->signal, &signal, &retain);
514 void flush_itimer_signals(void)
516 struct task_struct *tsk = current;
519 spin_lock_irqsave(&tsk->sighand->siglock, flags);
520 __flush_itimer_signals(&tsk->pending);
521 __flush_itimer_signals(&tsk->signal->shared_pending);
522 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
526 void ignore_signals(struct task_struct *t)
530 for (i = 0; i < _NSIG; ++i)
531 t->sighand->action[i].sa.sa_handler = SIG_IGN;
537 * Flush all handlers for a task.
541 flush_signal_handlers(struct task_struct *t, int force_default)
544 struct k_sigaction *ka = &t->sighand->action[0];
545 for (i = _NSIG ; i != 0 ; i--) {
546 if (force_default || ka->sa.sa_handler != SIG_IGN)
547 ka->sa.sa_handler = SIG_DFL;
549 #ifdef __ARCH_HAS_SA_RESTORER
550 ka->sa.sa_restorer = NULL;
552 sigemptyset(&ka->sa.sa_mask);
557 bool unhandled_signal(struct task_struct *tsk, int sig)
559 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
560 if (is_global_init(tsk))
563 if (handler != SIG_IGN && handler != SIG_DFL)
566 /* If dying, we handle all new signals by ignoring them */
567 if (fatal_signal_pending(tsk))
570 /* if ptraced, let the tracer determine */
574 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
577 struct sigqueue *q, *first = NULL;
580 * Collect the siginfo appropriate to this signal. Check if
581 * there is another siginfo for the same signal.
583 list_for_each_entry(q, &list->list, list) {
584 if (q->info.si_signo == sig) {
591 sigdelset(&list->signal, sig);
595 list_del_init(&first->list);
596 copy_siginfo(info, &first->info);
599 (first->flags & SIGQUEUE_PREALLOC) &&
600 (info->si_code == SI_TIMER) &&
601 (info->si_sys_private);
603 __sigqueue_free(first);
606 * Ok, it wasn't in the queue. This must be
607 * a fast-pathed signal or we must have been
608 * out of queue space. So zero out the info.
611 info->si_signo = sig;
613 info->si_code = SI_USER;
619 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
620 kernel_siginfo_t *info, bool *resched_timer)
622 int sig = next_signal(pending, mask);
625 collect_signal(sig, pending, info, resched_timer);
630 * Dequeue a signal and return the element to the caller, which is
631 * expected to free it.
633 * All callers have to hold the siglock.
635 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
636 kernel_siginfo_t *info, enum pid_type *type)
638 bool resched_timer = false;
641 /* We only dequeue private signals from ourselves, we don't let
642 * signalfd steal them
645 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
647 *type = PIDTYPE_TGID;
648 signr = __dequeue_signal(&tsk->signal->shared_pending,
649 mask, info, &resched_timer);
650 #ifdef CONFIG_POSIX_TIMERS
654 * itimers are process shared and we restart periodic
655 * itimers in the signal delivery path to prevent DoS
656 * attacks in the high resolution timer case. This is
657 * compliant with the old way of self-restarting
658 * itimers, as the SIGALRM is a legacy signal and only
659 * queued once. Changing the restart behaviour to
660 * restart the timer in the signal dequeue path is
661 * reducing the timer noise on heavy loaded !highres
664 if (unlikely(signr == SIGALRM)) {
665 struct hrtimer *tmr = &tsk->signal->real_timer;
667 if (!hrtimer_is_queued(tmr) &&
668 tsk->signal->it_real_incr != 0) {
669 hrtimer_forward(tmr, tmr->base->get_time(),
670 tsk->signal->it_real_incr);
671 hrtimer_restart(tmr);
681 if (unlikely(sig_kernel_stop(signr))) {
683 * Set a marker that we have dequeued a stop signal. Our
684 * caller might release the siglock and then the pending
685 * stop signal it is about to process is no longer in the
686 * pending bitmasks, but must still be cleared by a SIGCONT
687 * (and overruled by a SIGKILL). So those cases clear this
688 * shared flag after we've set it. Note that this flag may
689 * remain set after the signal we return is ignored or
690 * handled. That doesn't matter because its only purpose
691 * is to alert stop-signal processing code when another
692 * processor has come along and cleared the flag.
694 current->jobctl |= JOBCTL_STOP_DEQUEUED;
696 #ifdef CONFIG_POSIX_TIMERS
699 * Release the siglock to ensure proper locking order
700 * of timer locks outside of siglocks. Note, we leave
701 * irqs disabled here, since the posix-timers code is
702 * about to disable them again anyway.
704 spin_unlock(&tsk->sighand->siglock);
705 posixtimer_rearm(info);
706 spin_lock(&tsk->sighand->siglock);
708 /* Don't expose the si_sys_private value to userspace */
709 info->si_sys_private = 0;
714 EXPORT_SYMBOL_GPL(dequeue_signal);
716 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
718 struct task_struct *tsk = current;
719 struct sigpending *pending = &tsk->pending;
720 struct sigqueue *q, *sync = NULL;
723 * Might a synchronous signal be in the queue?
725 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
729 * Return the first synchronous signal in the queue.
731 list_for_each_entry(q, &pending->list, list) {
732 /* Synchronous signals have a positive si_code */
733 if ((q->info.si_code > SI_USER) &&
734 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
742 * Check if there is another siginfo for the same signal.
744 list_for_each_entry_continue(q, &pending->list, list) {
745 if (q->info.si_signo == sync->info.si_signo)
749 sigdelset(&pending->signal, sync->info.si_signo);
752 list_del_init(&sync->list);
753 copy_siginfo(info, &sync->info);
754 __sigqueue_free(sync);
755 return info->si_signo;
759 * Tell a process that it has a new active signal..
761 * NOTE! we rely on the previous spin_lock to
762 * lock interrupts for us! We can only be called with
763 * "siglock" held, and the local interrupt must
764 * have been disabled when that got acquired!
766 * No need to set need_resched since signal event passing
767 * goes through ->blocked
769 void signal_wake_up_state(struct task_struct *t, unsigned int state)
771 lockdep_assert_held(&t->sighand->siglock);
773 set_tsk_thread_flag(t, TIF_SIGPENDING);
776 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
777 * case. We don't check t->state here because there is a race with it
778 * executing another processor and just now entering stopped state.
779 * By using wake_up_state, we ensure the process will wake up and
780 * handle its death signal.
782 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
787 * Remove signals in mask from the pending set and queue.
788 * Returns 1 if any signals were found.
790 * All callers must be holding the siglock.
792 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
794 struct sigqueue *q, *n;
797 sigandsets(&m, mask, &s->signal);
798 if (sigisemptyset(&m))
801 sigandnsets(&s->signal, &s->signal, mask);
802 list_for_each_entry_safe(q, n, &s->list, list) {
803 if (sigismember(mask, q->info.si_signo)) {
804 list_del_init(&q->list);
810 static inline int is_si_special(const struct kernel_siginfo *info)
812 return info <= SEND_SIG_PRIV;
815 static inline bool si_fromuser(const struct kernel_siginfo *info)
817 return info == SEND_SIG_NOINFO ||
818 (!is_si_special(info) && SI_FROMUSER(info));
822 * called with RCU read lock from check_kill_permission()
824 static bool kill_ok_by_cred(struct task_struct *t)
826 const struct cred *cred = current_cred();
827 const struct cred *tcred = __task_cred(t);
829 return uid_eq(cred->euid, tcred->suid) ||
830 uid_eq(cred->euid, tcred->uid) ||
831 uid_eq(cred->uid, tcred->suid) ||
832 uid_eq(cred->uid, tcred->uid) ||
833 ns_capable(tcred->user_ns, CAP_KILL);
837 * Bad permissions for sending the signal
838 * - the caller must hold the RCU read lock
840 static int check_kill_permission(int sig, struct kernel_siginfo *info,
841 struct task_struct *t)
846 if (!valid_signal(sig))
849 if (!si_fromuser(info))
852 error = audit_signal_info(sig, t); /* Let audit system see the signal */
856 if (!same_thread_group(current, t) &&
857 !kill_ok_by_cred(t)) {
860 sid = task_session(t);
862 * We don't return the error if sid == NULL. The
863 * task was unhashed, the caller must notice this.
865 if (!sid || sid == task_session(current))
873 return security_task_kill(t, info, sig, NULL);
877 * ptrace_trap_notify - schedule trap to notify ptracer
878 * @t: tracee wanting to notify tracer
880 * This function schedules sticky ptrace trap which is cleared on the next
881 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
884 * If @t is running, STOP trap will be taken. If trapped for STOP and
885 * ptracer is listening for events, tracee is woken up so that it can
886 * re-trap for the new event. If trapped otherwise, STOP trap will be
887 * eventually taken without returning to userland after the existing traps
888 * are finished by PTRACE_CONT.
891 * Must be called with @task->sighand->siglock held.
893 static void ptrace_trap_notify(struct task_struct *t)
895 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
896 lockdep_assert_held(&t->sighand->siglock);
898 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
899 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
903 * Handle magic process-wide effects of stop/continue signals. Unlike
904 * the signal actions, these happen immediately at signal-generation
905 * time regardless of blocking, ignoring, or handling. This does the
906 * actual continuing for SIGCONT, but not the actual stopping for stop
907 * signals. The process stop is done as a signal action for SIG_DFL.
909 * Returns true if the signal should be actually delivered, otherwise
910 * it should be dropped.
912 static bool prepare_signal(int sig, struct task_struct *p, bool force)
914 struct signal_struct *signal = p->signal;
915 struct task_struct *t;
918 if (signal->flags & SIGNAL_GROUP_EXIT) {
919 if (signal->core_state)
920 return sig == SIGKILL;
922 * The process is in the middle of dying, drop the signal.
925 } else if (sig_kernel_stop(sig)) {
927 * This is a stop signal. Remove SIGCONT from all queues.
929 siginitset(&flush, sigmask(SIGCONT));
930 flush_sigqueue_mask(&flush, &signal->shared_pending);
931 for_each_thread(p, t)
932 flush_sigqueue_mask(&flush, &t->pending);
933 } else if (sig == SIGCONT) {
936 * Remove all stop signals from all queues, wake all threads.
938 siginitset(&flush, SIG_KERNEL_STOP_MASK);
939 flush_sigqueue_mask(&flush, &signal->shared_pending);
940 for_each_thread(p, t) {
941 flush_sigqueue_mask(&flush, &t->pending);
942 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
943 if (likely(!(t->ptrace & PT_SEIZED))) {
944 t->jobctl &= ~JOBCTL_STOPPED;
945 wake_up_state(t, __TASK_STOPPED);
947 ptrace_trap_notify(t);
951 * Notify the parent with CLD_CONTINUED if we were stopped.
953 * If we were in the middle of a group stop, we pretend it
954 * was already finished, and then continued. Since SIGCHLD
955 * doesn't queue we report only CLD_STOPPED, as if the next
956 * CLD_CONTINUED was dropped.
959 if (signal->flags & SIGNAL_STOP_STOPPED)
960 why |= SIGNAL_CLD_CONTINUED;
961 else if (signal->group_stop_count)
962 why |= SIGNAL_CLD_STOPPED;
966 * The first thread which returns from do_signal_stop()
967 * will take ->siglock, notice SIGNAL_CLD_MASK, and
968 * notify its parent. See get_signal().
970 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
971 signal->group_stop_count = 0;
972 signal->group_exit_code = 0;
976 return !sig_ignored(p, sig, force);
980 * Test if P wants to take SIG. After we've checked all threads with this,
981 * it's equivalent to finding no threads not blocking SIG. Any threads not
982 * blocking SIG were ruled out because they are not running and already
983 * have pending signals. Such threads will dequeue from the shared queue
984 * as soon as they're available, so putting the signal on the shared queue
985 * will be equivalent to sending it to one such thread.
987 static inline bool wants_signal(int sig, struct task_struct *p)
989 if (sigismember(&p->blocked, sig))
992 if (p->flags & PF_EXITING)
998 if (task_is_stopped_or_traced(p))
1001 return task_curr(p) || !task_sigpending(p);
1004 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1006 struct signal_struct *signal = p->signal;
1007 struct task_struct *t;
1010 * Now find a thread we can wake up to take the signal off the queue.
1012 * Try the suggested task first (may or may not be the main thread).
1014 if (wants_signal(sig, p))
1016 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1018 * There is just one thread and it does not need to be woken.
1019 * It will dequeue unblocked signals before it runs again.
1024 * Otherwise try to find a suitable thread.
1026 t = signal->curr_target;
1027 while (!wants_signal(sig, t)) {
1029 if (t == signal->curr_target)
1031 * No thread needs to be woken.
1032 * Any eligible threads will see
1033 * the signal in the queue soon.
1037 signal->curr_target = t;
1041 * Found a killable thread. If the signal will be fatal,
1042 * then start taking the whole group down immediately.
1044 if (sig_fatal(p, sig) &&
1045 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1046 !sigismember(&t->real_blocked, sig) &&
1047 (sig == SIGKILL || !p->ptrace)) {
1049 * This signal will be fatal to the whole group.
1051 if (!sig_kernel_coredump(sig)) {
1053 * Start a group exit and wake everybody up.
1054 * This way we don't have other threads
1055 * running and doing things after a slower
1056 * thread has the fatal signal pending.
1058 signal->flags = SIGNAL_GROUP_EXIT;
1059 signal->group_exit_code = sig;
1060 signal->group_stop_count = 0;
1063 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1064 sigaddset(&t->pending.signal, SIGKILL);
1065 signal_wake_up(t, 1);
1066 } while_each_thread(p, t);
1072 * The signal is already in the shared-pending queue.
1073 * Tell the chosen thread to wake up and dequeue it.
1075 signal_wake_up(t, sig == SIGKILL);
1079 static inline bool legacy_queue(struct sigpending *signals, int sig)
1081 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1084 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1085 struct task_struct *t, enum pid_type type, bool force)
1087 struct sigpending *pending;
1089 int override_rlimit;
1090 int ret = 0, result;
1092 lockdep_assert_held(&t->sighand->siglock);
1094 result = TRACE_SIGNAL_IGNORED;
1095 if (!prepare_signal(sig, t, force))
1098 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1100 * Short-circuit ignored signals and support queuing
1101 * exactly one non-rt signal, so that we can get more
1102 * detailed information about the cause of the signal.
1104 result = TRACE_SIGNAL_ALREADY_PENDING;
1105 if (legacy_queue(pending, sig))
1108 result = TRACE_SIGNAL_DELIVERED;
1110 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1112 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1116 * Real-time signals must be queued if sent by sigqueue, or
1117 * some other real-time mechanism. It is implementation
1118 * defined whether kill() does so. We attempt to do so, on
1119 * the principle of least surprise, but since kill is not
1120 * allowed to fail with EAGAIN when low on memory we just
1121 * make sure at least one signal gets delivered and don't
1122 * pass on the info struct.
1125 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1127 override_rlimit = 0;
1129 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1132 list_add_tail(&q->list, &pending->list);
1133 switch ((unsigned long) info) {
1134 case (unsigned long) SEND_SIG_NOINFO:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_USER;
1139 q->info.si_pid = task_tgid_nr_ns(current,
1140 task_active_pid_ns(t));
1143 from_kuid_munged(task_cred_xxx(t, user_ns),
1147 case (unsigned long) SEND_SIG_PRIV:
1148 clear_siginfo(&q->info);
1149 q->info.si_signo = sig;
1150 q->info.si_errno = 0;
1151 q->info.si_code = SI_KERNEL;
1156 copy_siginfo(&q->info, info);
1159 } else if (!is_si_special(info) &&
1160 sig >= SIGRTMIN && info->si_code != SI_USER) {
1162 * Queue overflow, abort. We may abort if the
1163 * signal was rt and sent by user using something
1164 * other than kill().
1166 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1171 * This is a silent loss of information. We still
1172 * send the signal, but the *info bits are lost.
1174 result = TRACE_SIGNAL_LOSE_INFO;
1178 signalfd_notify(t, sig);
1179 sigaddset(&pending->signal, sig);
1181 /* Let multiprocess signals appear after on-going forks */
1182 if (type > PIDTYPE_TGID) {
1183 struct multiprocess_signals *delayed;
1184 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1185 sigset_t *signal = &delayed->signal;
1186 /* Can't queue both a stop and a continue signal */
1188 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1189 else if (sig_kernel_stop(sig))
1190 sigdelset(signal, SIGCONT);
1191 sigaddset(signal, sig);
1195 complete_signal(sig, t, type);
1197 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1201 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1204 switch (siginfo_layout(info->si_signo, info->si_code)) {
1213 case SIL_FAULT_TRAPNO:
1214 case SIL_FAULT_MCEERR:
1215 case SIL_FAULT_BNDERR:
1216 case SIL_FAULT_PKUERR:
1217 case SIL_FAULT_PERF_EVENT:
1225 int send_signal_locked(int sig, struct kernel_siginfo *info,
1226 struct task_struct *t, enum pid_type type)
1228 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1231 if (info == SEND_SIG_NOINFO) {
1232 /* Force if sent from an ancestor pid namespace */
1233 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1234 } else if (info == SEND_SIG_PRIV) {
1235 /* Don't ignore kernel generated signals */
1237 } else if (has_si_pid_and_uid(info)) {
1238 /* SIGKILL and SIGSTOP is special or has ids */
1239 struct user_namespace *t_user_ns;
1242 t_user_ns = task_cred_xxx(t, user_ns);
1243 if (current_user_ns() != t_user_ns) {
1244 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1245 info->si_uid = from_kuid_munged(t_user_ns, uid);
1249 /* A kernel generated signal? */
1250 force = (info->si_code == SI_KERNEL);
1252 /* From an ancestor pid namespace? */
1253 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1258 return __send_signal_locked(sig, info, t, type, force);
1261 static void print_fatal_signal(int signr)
1263 struct pt_regs *regs = task_pt_regs(current);
1264 struct file *exe_file;
1266 exe_file = get_task_exe_file(current);
1268 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1269 exe_file, current->comm, signr);
1272 pr_info("%s: potentially unexpected fatal signal %d.\n",
1273 current->comm, signr);
1276 #if defined(__i386__) && !defined(__arch_um__)
1277 pr_info("code at %08lx: ", regs->ip);
1280 for (i = 0; i < 16; i++) {
1283 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1285 pr_cont("%02x ", insn);
1295 static int __init setup_print_fatal_signals(char *str)
1297 get_option (&str, &print_fatal_signals);
1302 __setup("print-fatal-signals=", setup_print_fatal_signals);
1304 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1307 unsigned long flags;
1310 if (lock_task_sighand(p, &flags)) {
1311 ret = send_signal_locked(sig, info, p, type);
1312 unlock_task_sighand(p, &flags);
1319 HANDLER_CURRENT, /* If reachable use the current handler */
1320 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1321 HANDLER_EXIT, /* Only visible as the process exit code */
1325 * Force a signal that the process can't ignore: if necessary
1326 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1328 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1329 * since we do not want to have a signal handler that was blocked
1330 * be invoked when user space had explicitly blocked it.
1332 * We don't want to have recursive SIGSEGV's etc, for example,
1333 * that is why we also clear SIGNAL_UNKILLABLE.
1336 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1337 enum sig_handler handler)
1339 unsigned long int flags;
1340 int ret, blocked, ignored;
1341 struct k_sigaction *action;
1342 int sig = info->si_signo;
1344 spin_lock_irqsave(&t->sighand->siglock, flags);
1345 action = &t->sighand->action[sig-1];
1346 ignored = action->sa.sa_handler == SIG_IGN;
1347 blocked = sigismember(&t->blocked, sig);
1348 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1349 action->sa.sa_handler = SIG_DFL;
1350 if (handler == HANDLER_EXIT)
1351 action->sa.sa_flags |= SA_IMMUTABLE;
1353 sigdelset(&t->blocked, sig);
1354 recalc_sigpending_and_wake(t);
1358 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1359 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1361 if (action->sa.sa_handler == SIG_DFL &&
1362 (!t->ptrace || (handler == HANDLER_EXIT)))
1363 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1364 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1365 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1370 int force_sig_info(struct kernel_siginfo *info)
1372 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1376 * Nuke all other threads in the group.
1378 int zap_other_threads(struct task_struct *p)
1380 struct task_struct *t = p;
1383 p->signal->group_stop_count = 0;
1385 while_each_thread(p, t) {
1386 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1387 /* Don't require de_thread to wait for the vhost_worker */
1388 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1391 /* Don't bother with already dead threads */
1394 sigaddset(&t->pending.signal, SIGKILL);
1395 signal_wake_up(t, 1);
1401 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1402 unsigned long *flags)
1404 struct sighand_struct *sighand;
1408 sighand = rcu_dereference(tsk->sighand);
1409 if (unlikely(sighand == NULL))
1413 * This sighand can be already freed and even reused, but
1414 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1415 * initializes ->siglock: this slab can't go away, it has
1416 * the same object type, ->siglock can't be reinitialized.
1418 * We need to ensure that tsk->sighand is still the same
1419 * after we take the lock, we can race with de_thread() or
1420 * __exit_signal(). In the latter case the next iteration
1421 * must see ->sighand == NULL.
1423 spin_lock_irqsave(&sighand->siglock, *flags);
1424 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1426 spin_unlock_irqrestore(&sighand->siglock, *flags);
1433 #ifdef CONFIG_LOCKDEP
1434 void lockdep_assert_task_sighand_held(struct task_struct *task)
1436 struct sighand_struct *sighand;
1439 sighand = rcu_dereference(task->sighand);
1441 lockdep_assert_held(&sighand->siglock);
1449 * send signal info to all the members of a group
1451 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1452 struct task_struct *p, enum pid_type type)
1457 ret = check_kill_permission(sig, info, p);
1461 ret = do_send_sig_info(sig, info, p, type);
1467 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1468 * control characters do (^C, ^Z etc)
1469 * - the caller must hold at least a readlock on tasklist_lock
1471 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1473 struct task_struct *p = NULL;
1474 int retval, success;
1478 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1479 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1482 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1483 return success ? 0 : retval;
1486 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1489 struct task_struct *p;
1493 p = pid_task(pid, PIDTYPE_PID);
1495 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1497 if (likely(!p || error != -ESRCH))
1501 * The task was unhashed in between, try again. If it
1502 * is dead, pid_task() will return NULL, if we race with
1503 * de_thread() it will find the new leader.
1508 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1512 error = kill_pid_info(sig, info, find_vpid(pid));
1517 static inline bool kill_as_cred_perm(const struct cred *cred,
1518 struct task_struct *target)
1520 const struct cred *pcred = __task_cred(target);
1522 return uid_eq(cred->euid, pcred->suid) ||
1523 uid_eq(cred->euid, pcred->uid) ||
1524 uid_eq(cred->uid, pcred->suid) ||
1525 uid_eq(cred->uid, pcred->uid);
1529 * The usb asyncio usage of siginfo is wrong. The glibc support
1530 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1531 * AKA after the generic fields:
1532 * kernel_pid_t si_pid;
1533 * kernel_uid32_t si_uid;
1534 * sigval_t si_value;
1536 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1537 * after the generic fields is:
1538 * void __user *si_addr;
1540 * This is a practical problem when there is a 64bit big endian kernel
1541 * and a 32bit userspace. As the 32bit address will encoded in the low
1542 * 32bits of the pointer. Those low 32bits will be stored at higher
1543 * address than appear in a 32 bit pointer. So userspace will not
1544 * see the address it was expecting for it's completions.
1546 * There is nothing in the encoding that can allow
1547 * copy_siginfo_to_user32 to detect this confusion of formats, so
1548 * handle this by requiring the caller of kill_pid_usb_asyncio to
1549 * notice when this situration takes place and to store the 32bit
1550 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1553 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1554 struct pid *pid, const struct cred *cred)
1556 struct kernel_siginfo info;
1557 struct task_struct *p;
1558 unsigned long flags;
1561 if (!valid_signal(sig))
1564 clear_siginfo(&info);
1565 info.si_signo = sig;
1566 info.si_errno = errno;
1567 info.si_code = SI_ASYNCIO;
1568 *((sigval_t *)&info.si_pid) = addr;
1571 p = pid_task(pid, PIDTYPE_PID);
1576 if (!kill_as_cred_perm(cred, p)) {
1580 ret = security_task_kill(p, &info, sig, cred);
1585 if (lock_task_sighand(p, &flags)) {
1586 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1587 unlock_task_sighand(p, &flags);
1595 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1598 * kill_something_info() interprets pid in interesting ways just like kill(2).
1600 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1601 * is probably wrong. Should make it like BSD or SYSV.
1604 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1609 return kill_proc_info(sig, info, pid);
1611 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1615 read_lock(&tasklist_lock);
1617 ret = __kill_pgrp_info(sig, info,
1618 pid ? find_vpid(-pid) : task_pgrp(current));
1620 int retval = 0, count = 0;
1621 struct task_struct * p;
1623 for_each_process(p) {
1624 if (task_pid_vnr(p) > 1 &&
1625 !same_thread_group(p, current)) {
1626 int err = group_send_sig_info(sig, info, p,
1633 ret = count ? retval : -ESRCH;
1635 read_unlock(&tasklist_lock);
1641 * These are for backward compatibility with the rest of the kernel source.
1644 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1647 * Make sure legacy kernel users don't send in bad values
1648 * (normal paths check this in check_kill_permission).
1650 if (!valid_signal(sig))
1653 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1655 EXPORT_SYMBOL(send_sig_info);
1657 #define __si_special(priv) \
1658 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1661 send_sig(int sig, struct task_struct *p, int priv)
1663 return send_sig_info(sig, __si_special(priv), p);
1665 EXPORT_SYMBOL(send_sig);
1667 void force_sig(int sig)
1669 struct kernel_siginfo info;
1671 clear_siginfo(&info);
1672 info.si_signo = sig;
1674 info.si_code = SI_KERNEL;
1677 force_sig_info(&info);
1679 EXPORT_SYMBOL(force_sig);
1681 void force_fatal_sig(int sig)
1683 struct kernel_siginfo info;
1685 clear_siginfo(&info);
1686 info.si_signo = sig;
1688 info.si_code = SI_KERNEL;
1691 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1694 void force_exit_sig(int sig)
1696 struct kernel_siginfo info;
1698 clear_siginfo(&info);
1699 info.si_signo = sig;
1701 info.si_code = SI_KERNEL;
1704 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1708 * When things go south during signal handling, we
1709 * will force a SIGSEGV. And if the signal that caused
1710 * the problem was already a SIGSEGV, we'll want to
1711 * make sure we don't even try to deliver the signal..
1713 void force_sigsegv(int sig)
1716 force_fatal_sig(SIGSEGV);
1721 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1722 struct task_struct *t)
1724 struct kernel_siginfo info;
1726 clear_siginfo(&info);
1727 info.si_signo = sig;
1729 info.si_code = code;
1730 info.si_addr = addr;
1731 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1734 int force_sig_fault(int sig, int code, void __user *addr)
1736 return force_sig_fault_to_task(sig, code, addr, current);
1739 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1741 struct kernel_siginfo info;
1743 clear_siginfo(&info);
1744 info.si_signo = sig;
1746 info.si_code = code;
1747 info.si_addr = addr;
1748 return send_sig_info(info.si_signo, &info, t);
1751 int force_sig_mceerr(int code, void __user *addr, short lsb)
1753 struct kernel_siginfo info;
1755 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1756 clear_siginfo(&info);
1757 info.si_signo = SIGBUS;
1759 info.si_code = code;
1760 info.si_addr = addr;
1761 info.si_addr_lsb = lsb;
1762 return force_sig_info(&info);
1765 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1767 struct kernel_siginfo info;
1769 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1770 clear_siginfo(&info);
1771 info.si_signo = SIGBUS;
1773 info.si_code = code;
1774 info.si_addr = addr;
1775 info.si_addr_lsb = lsb;
1776 return send_sig_info(info.si_signo, &info, t);
1778 EXPORT_SYMBOL(send_sig_mceerr);
1780 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1782 struct kernel_siginfo info;
1784 clear_siginfo(&info);
1785 info.si_signo = SIGSEGV;
1787 info.si_code = SEGV_BNDERR;
1788 info.si_addr = addr;
1789 info.si_lower = lower;
1790 info.si_upper = upper;
1791 return force_sig_info(&info);
1795 int force_sig_pkuerr(void __user *addr, u32 pkey)
1797 struct kernel_siginfo info;
1799 clear_siginfo(&info);
1800 info.si_signo = SIGSEGV;
1802 info.si_code = SEGV_PKUERR;
1803 info.si_addr = addr;
1804 info.si_pkey = pkey;
1805 return force_sig_info(&info);
1809 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1811 struct kernel_siginfo info;
1813 clear_siginfo(&info);
1814 info.si_signo = SIGTRAP;
1816 info.si_code = TRAP_PERF;
1817 info.si_addr = addr;
1818 info.si_perf_data = sig_data;
1819 info.si_perf_type = type;
1822 * Signals generated by perf events should not terminate the whole
1823 * process if SIGTRAP is blocked, however, delivering the signal
1824 * asynchronously is better than not delivering at all. But tell user
1825 * space if the signal was asynchronous, so it can clearly be
1826 * distinguished from normal synchronous ones.
1828 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1829 TRAP_PERF_FLAG_ASYNC :
1832 return send_sig_info(info.si_signo, &info, current);
1836 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1837 * @syscall: syscall number to send to userland
1838 * @reason: filter-supplied reason code to send to userland (via si_errno)
1839 * @force_coredump: true to trigger a coredump
1841 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1843 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1845 struct kernel_siginfo info;
1847 clear_siginfo(&info);
1848 info.si_signo = SIGSYS;
1849 info.si_code = SYS_SECCOMP;
1850 info.si_call_addr = (void __user *)KSTK_EIP(current);
1851 info.si_errno = reason;
1852 info.si_arch = syscall_get_arch(current);
1853 info.si_syscall = syscall;
1854 return force_sig_info_to_task(&info, current,
1855 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1858 /* For the crazy architectures that include trap information in
1859 * the errno field, instead of an actual errno value.
1861 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1863 struct kernel_siginfo info;
1865 clear_siginfo(&info);
1866 info.si_signo = SIGTRAP;
1867 info.si_errno = errno;
1868 info.si_code = TRAP_HWBKPT;
1869 info.si_addr = addr;
1870 return force_sig_info(&info);
1873 /* For the rare architectures that include trap information using
1876 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1878 struct kernel_siginfo info;
1880 clear_siginfo(&info);
1881 info.si_signo = sig;
1883 info.si_code = code;
1884 info.si_addr = addr;
1885 info.si_trapno = trapno;
1886 return force_sig_info(&info);
1889 /* For the rare architectures that include trap information using
1892 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1893 struct task_struct *t)
1895 struct kernel_siginfo info;
1897 clear_siginfo(&info);
1898 info.si_signo = sig;
1900 info.si_code = code;
1901 info.si_addr = addr;
1902 info.si_trapno = trapno;
1903 return send_sig_info(info.si_signo, &info, t);
1906 int kill_pgrp(struct pid *pid, int sig, int priv)
1910 read_lock(&tasklist_lock);
1911 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1912 read_unlock(&tasklist_lock);
1916 EXPORT_SYMBOL(kill_pgrp);
1918 int kill_pid(struct pid *pid, int sig, int priv)
1920 return kill_pid_info(sig, __si_special(priv), pid);
1922 EXPORT_SYMBOL(kill_pid);
1925 * These functions support sending signals using preallocated sigqueue
1926 * structures. This is needed "because realtime applications cannot
1927 * afford to lose notifications of asynchronous events, like timer
1928 * expirations or I/O completions". In the case of POSIX Timers
1929 * we allocate the sigqueue structure from the timer_create. If this
1930 * allocation fails we are able to report the failure to the application
1931 * with an EAGAIN error.
1933 struct sigqueue *sigqueue_alloc(void)
1935 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1938 void sigqueue_free(struct sigqueue *q)
1940 unsigned long flags;
1941 spinlock_t *lock = ¤t->sighand->siglock;
1943 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1945 * We must hold ->siglock while testing q->list
1946 * to serialize with collect_signal() or with
1947 * __exit_signal()->flush_sigqueue().
1949 spin_lock_irqsave(lock, flags);
1950 q->flags &= ~SIGQUEUE_PREALLOC;
1952 * If it is queued it will be freed when dequeued,
1953 * like the "regular" sigqueue.
1955 if (!list_empty(&q->list))
1957 spin_unlock_irqrestore(lock, flags);
1963 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1965 int sig = q->info.si_signo;
1966 struct sigpending *pending;
1967 struct task_struct *t;
1968 unsigned long flags;
1971 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1977 * This function is used by POSIX timers to deliver a timer signal.
1978 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1979 * set), the signal must be delivered to the specific thread (queues
1982 * Where type is not PIDTYPE_PID, signals must be delivered to the
1983 * process. In this case, prefer to deliver to current if it is in
1984 * the same thread group as the target process, which avoids
1985 * unnecessarily waking up a potentially idle task.
1987 t = pid_task(pid, type);
1990 if (type != PIDTYPE_PID && same_thread_group(t, current))
1992 if (!likely(lock_task_sighand(t, &flags)))
1995 ret = 1; /* the signal is ignored */
1996 result = TRACE_SIGNAL_IGNORED;
1997 if (!prepare_signal(sig, t, false))
2001 if (unlikely(!list_empty(&q->list))) {
2003 * If an SI_TIMER entry is already queue just increment
2004 * the overrun count.
2006 BUG_ON(q->info.si_code != SI_TIMER);
2007 q->info.si_overrun++;
2008 result = TRACE_SIGNAL_ALREADY_PENDING;
2011 q->info.si_overrun = 0;
2013 signalfd_notify(t, sig);
2014 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2015 list_add_tail(&q->list, &pending->list);
2016 sigaddset(&pending->signal, sig);
2017 complete_signal(sig, t, type);
2018 result = TRACE_SIGNAL_DELIVERED;
2020 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2021 unlock_task_sighand(t, &flags);
2027 static void do_notify_pidfd(struct task_struct *task)
2031 WARN_ON(task->exit_state == 0);
2032 pid = task_pid(task);
2033 wake_up_all(&pid->wait_pidfd);
2037 * Let a parent know about the death of a child.
2038 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2040 * Returns true if our parent ignored us and so we've switched to
2043 bool do_notify_parent(struct task_struct *tsk, int sig)
2045 struct kernel_siginfo info;
2046 unsigned long flags;
2047 struct sighand_struct *psig;
2048 bool autoreap = false;
2051 WARN_ON_ONCE(sig == -1);
2053 /* do_notify_parent_cldstop should have been called instead. */
2054 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2056 WARN_ON_ONCE(!tsk->ptrace &&
2057 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2059 /* Wake up all pidfd waiters */
2060 do_notify_pidfd(tsk);
2062 if (sig != SIGCHLD) {
2064 * This is only possible if parent == real_parent.
2065 * Check if it has changed security domain.
2067 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2071 clear_siginfo(&info);
2072 info.si_signo = sig;
2075 * We are under tasklist_lock here so our parent is tied to
2076 * us and cannot change.
2078 * task_active_pid_ns will always return the same pid namespace
2079 * until a task passes through release_task.
2081 * write_lock() currently calls preempt_disable() which is the
2082 * same as rcu_read_lock(), but according to Oleg, this is not
2083 * correct to rely on this
2086 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2087 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2091 task_cputime(tsk, &utime, &stime);
2092 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2093 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2095 info.si_status = tsk->exit_code & 0x7f;
2096 if (tsk->exit_code & 0x80)
2097 info.si_code = CLD_DUMPED;
2098 else if (tsk->exit_code & 0x7f)
2099 info.si_code = CLD_KILLED;
2101 info.si_code = CLD_EXITED;
2102 info.si_status = tsk->exit_code >> 8;
2105 psig = tsk->parent->sighand;
2106 spin_lock_irqsave(&psig->siglock, flags);
2107 if (!tsk->ptrace && sig == SIGCHLD &&
2108 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2109 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2111 * We are exiting and our parent doesn't care. POSIX.1
2112 * defines special semantics for setting SIGCHLD to SIG_IGN
2113 * or setting the SA_NOCLDWAIT flag: we should be reaped
2114 * automatically and not left for our parent's wait4 call.
2115 * Rather than having the parent do it as a magic kind of
2116 * signal handler, we just set this to tell do_exit that we
2117 * can be cleaned up without becoming a zombie. Note that
2118 * we still call __wake_up_parent in this case, because a
2119 * blocked sys_wait4 might now return -ECHILD.
2121 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2122 * is implementation-defined: we do (if you don't want
2123 * it, just use SIG_IGN instead).
2126 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2130 * Send with __send_signal as si_pid and si_uid are in the
2131 * parent's namespaces.
2133 if (valid_signal(sig) && sig)
2134 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2135 __wake_up_parent(tsk, tsk->parent);
2136 spin_unlock_irqrestore(&psig->siglock, flags);
2142 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2143 * @tsk: task reporting the state change
2144 * @for_ptracer: the notification is for ptracer
2145 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2147 * Notify @tsk's parent that the stopped/continued state has changed. If
2148 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2149 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2152 * Must be called with tasklist_lock at least read locked.
2154 static void do_notify_parent_cldstop(struct task_struct *tsk,
2155 bool for_ptracer, int why)
2157 struct kernel_siginfo info;
2158 unsigned long flags;
2159 struct task_struct *parent;
2160 struct sighand_struct *sighand;
2164 parent = tsk->parent;
2166 tsk = tsk->group_leader;
2167 parent = tsk->real_parent;
2170 clear_siginfo(&info);
2171 info.si_signo = SIGCHLD;
2174 * see comment in do_notify_parent() about the following 4 lines
2177 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2178 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2181 task_cputime(tsk, &utime, &stime);
2182 info.si_utime = nsec_to_clock_t(utime);
2183 info.si_stime = nsec_to_clock_t(stime);
2188 info.si_status = SIGCONT;
2191 info.si_status = tsk->signal->group_exit_code & 0x7f;
2194 info.si_status = tsk->exit_code & 0x7f;
2200 sighand = parent->sighand;
2201 spin_lock_irqsave(&sighand->siglock, flags);
2202 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2203 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2204 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2206 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2208 __wake_up_parent(tsk, parent);
2209 spin_unlock_irqrestore(&sighand->siglock, flags);
2213 * This must be called with current->sighand->siglock held.
2215 * This should be the path for all ptrace stops.
2216 * We always set current->last_siginfo while stopped here.
2217 * That makes it a way to test a stopped process for
2218 * being ptrace-stopped vs being job-control-stopped.
2220 * Returns the signal the ptracer requested the code resume
2221 * with. If the code did not stop because the tracer is gone,
2222 * the stop signal remains unchanged unless clear_code.
2224 static int ptrace_stop(int exit_code, int why, unsigned long message,
2225 kernel_siginfo_t *info)
2226 __releases(¤t->sighand->siglock)
2227 __acquires(¤t->sighand->siglock)
2229 bool gstop_done = false;
2231 if (arch_ptrace_stop_needed()) {
2233 * The arch code has something special to do before a
2234 * ptrace stop. This is allowed to block, e.g. for faults
2235 * on user stack pages. We can't keep the siglock while
2236 * calling arch_ptrace_stop, so we must release it now.
2237 * To preserve proper semantics, we must do this before
2238 * any signal bookkeeping like checking group_stop_count.
2240 spin_unlock_irq(¤t->sighand->siglock);
2242 spin_lock_irq(¤t->sighand->siglock);
2246 * After this point ptrace_signal_wake_up or signal_wake_up
2247 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2248 * signal comes in. Handle previous ptrace_unlinks and fatal
2249 * signals here to prevent ptrace_stop sleeping in schedule.
2251 if (!current->ptrace || __fatal_signal_pending(current))
2254 set_special_state(TASK_TRACED);
2255 current->jobctl |= JOBCTL_TRACED;
2258 * We're committing to trapping. TRACED should be visible before
2259 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2260 * Also, transition to TRACED and updates to ->jobctl should be
2261 * atomic with respect to siglock and should be done after the arch
2262 * hook as siglock is released and regrabbed across it.
2267 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2269 * set_current_state() smp_wmb();
2271 * wait_task_stopped()
2272 * task_stopped_code()
2273 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2277 current->ptrace_message = message;
2278 current->last_siginfo = info;
2279 current->exit_code = exit_code;
2282 * If @why is CLD_STOPPED, we're trapping to participate in a group
2283 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2284 * across siglock relocks since INTERRUPT was scheduled, PENDING
2285 * could be clear now. We act as if SIGCONT is received after
2286 * TASK_TRACED is entered - ignore it.
2288 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2289 gstop_done = task_participate_group_stop(current);
2291 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2292 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2293 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2294 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2296 /* entering a trap, clear TRAPPING */
2297 task_clear_jobctl_trapping(current);
2299 spin_unlock_irq(¤t->sighand->siglock);
2300 read_lock(&tasklist_lock);
2302 * Notify parents of the stop.
2304 * While ptraced, there are two parents - the ptracer and
2305 * the real_parent of the group_leader. The ptracer should
2306 * know about every stop while the real parent is only
2307 * interested in the completion of group stop. The states
2308 * for the two don't interact with each other. Notify
2309 * separately unless they're gonna be duplicates.
2311 if (current->ptrace)
2312 do_notify_parent_cldstop(current, true, why);
2313 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2314 do_notify_parent_cldstop(current, false, why);
2317 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2318 * One a PREEMPTION kernel this can result in preemption requirement
2319 * which will be fulfilled after read_unlock() and the ptracer will be
2321 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2322 * this task wait in schedule(). If this task gets preempted then it
2323 * remains enqueued on the runqueue. The ptracer will observe this and
2324 * then sleep for a delay of one HZ tick. In the meantime this task
2325 * gets scheduled, enters schedule() and will wait for the ptracer.
2327 * This preemption point is not bad from a correctness point of
2328 * view but extends the runtime by one HZ tick time due to the
2329 * ptracer's sleep. The preempt-disable section ensures that there
2330 * will be no preemption between unlock and schedule() and so
2331 * improving the performance since the ptracer will observe that
2332 * the tracee is scheduled out once it gets on the CPU.
2334 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2335 * Therefore the task can be preempted after do_notify_parent_cldstop()
2336 * before unlocking tasklist_lock so there is no benefit in doing this.
2338 * In fact disabling preemption is harmful on PREEMPT_RT because
2339 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2340 * with preemption disabled due to the 'sleeping' spinlock
2341 * substitution of RT.
2343 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2345 read_unlock(&tasklist_lock);
2346 cgroup_enter_frozen();
2347 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2348 preempt_enable_no_resched();
2350 cgroup_leave_frozen(true);
2353 * We are back. Now reacquire the siglock before touching
2354 * last_siginfo, so that we are sure to have synchronized with
2355 * any signal-sending on another CPU that wants to examine it.
2357 spin_lock_irq(¤t->sighand->siglock);
2358 exit_code = current->exit_code;
2359 current->last_siginfo = NULL;
2360 current->ptrace_message = 0;
2361 current->exit_code = 0;
2363 /* LISTENING can be set only during STOP traps, clear it */
2364 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2367 * Queued signals ignored us while we were stopped for tracing.
2368 * So check for any that we should take before resuming user mode.
2369 * This sets TIF_SIGPENDING, but never clears it.
2371 recalc_sigpending_tsk(current);
2375 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2377 kernel_siginfo_t info;
2379 clear_siginfo(&info);
2380 info.si_signo = signr;
2381 info.si_code = exit_code;
2382 info.si_pid = task_pid_vnr(current);
2383 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2385 /* Let the debugger run. */
2386 return ptrace_stop(exit_code, why, message, &info);
2389 int ptrace_notify(int exit_code, unsigned long message)
2393 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2394 if (unlikely(task_work_pending(current)))
2397 spin_lock_irq(¤t->sighand->siglock);
2398 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2399 spin_unlock_irq(¤t->sighand->siglock);
2404 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2405 * @signr: signr causing group stop if initiating
2407 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2408 * and participate in it. If already set, participate in the existing
2409 * group stop. If participated in a group stop (and thus slept), %true is
2410 * returned with siglock released.
2412 * If ptraced, this function doesn't handle stop itself. Instead,
2413 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2414 * untouched. The caller must ensure that INTERRUPT trap handling takes
2415 * places afterwards.
2418 * Must be called with @current->sighand->siglock held, which is released
2422 * %false if group stop is already cancelled or ptrace trap is scheduled.
2423 * %true if participated in group stop.
2425 static bool do_signal_stop(int signr)
2426 __releases(¤t->sighand->siglock)
2428 struct signal_struct *sig = current->signal;
2430 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2431 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2432 struct task_struct *t;
2434 /* signr will be recorded in task->jobctl for retries */
2435 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2437 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2438 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2439 unlikely(sig->group_exec_task))
2442 * There is no group stop already in progress. We must
2445 * While ptraced, a task may be resumed while group stop is
2446 * still in effect and then receive a stop signal and
2447 * initiate another group stop. This deviates from the
2448 * usual behavior as two consecutive stop signals can't
2449 * cause two group stops when !ptraced. That is why we
2450 * also check !task_is_stopped(t) below.
2452 * The condition can be distinguished by testing whether
2453 * SIGNAL_STOP_STOPPED is already set. Don't generate
2454 * group_exit_code in such case.
2456 * This is not necessary for SIGNAL_STOP_CONTINUED because
2457 * an intervening stop signal is required to cause two
2458 * continued events regardless of ptrace.
2460 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2461 sig->group_exit_code = signr;
2463 sig->group_stop_count = 0;
2465 if (task_set_jobctl_pending(current, signr | gstop))
2466 sig->group_stop_count++;
2469 while_each_thread(current, t) {
2471 * Setting state to TASK_STOPPED for a group
2472 * stop is always done with the siglock held,
2473 * so this check has no races.
2475 if (!task_is_stopped(t) &&
2476 task_set_jobctl_pending(t, signr | gstop)) {
2477 sig->group_stop_count++;
2478 if (likely(!(t->ptrace & PT_SEIZED)))
2479 signal_wake_up(t, 0);
2481 ptrace_trap_notify(t);
2486 if (likely(!current->ptrace)) {
2490 * If there are no other threads in the group, or if there
2491 * is a group stop in progress and we are the last to stop,
2492 * report to the parent.
2494 if (task_participate_group_stop(current))
2495 notify = CLD_STOPPED;
2497 current->jobctl |= JOBCTL_STOPPED;
2498 set_special_state(TASK_STOPPED);
2499 spin_unlock_irq(¤t->sighand->siglock);
2502 * Notify the parent of the group stop completion. Because
2503 * we're not holding either the siglock or tasklist_lock
2504 * here, ptracer may attach inbetween; however, this is for
2505 * group stop and should always be delivered to the real
2506 * parent of the group leader. The new ptracer will get
2507 * its notification when this task transitions into
2511 read_lock(&tasklist_lock);
2512 do_notify_parent_cldstop(current, false, notify);
2513 read_unlock(&tasklist_lock);
2516 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2517 cgroup_enter_frozen();
2522 * While ptraced, group stop is handled by STOP trap.
2523 * Schedule it and let the caller deal with it.
2525 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2531 * do_jobctl_trap - take care of ptrace jobctl traps
2533 * When PT_SEIZED, it's used for both group stop and explicit
2534 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2535 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2536 * the stop signal; otherwise, %SIGTRAP.
2538 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2539 * number as exit_code and no siginfo.
2542 * Must be called with @current->sighand->siglock held, which may be
2543 * released and re-acquired before returning with intervening sleep.
2545 static void do_jobctl_trap(void)
2547 struct signal_struct *signal = current->signal;
2548 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2550 if (current->ptrace & PT_SEIZED) {
2551 if (!signal->group_stop_count &&
2552 !(signal->flags & SIGNAL_STOP_STOPPED))
2554 WARN_ON_ONCE(!signr);
2555 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2558 WARN_ON_ONCE(!signr);
2559 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2564 * do_freezer_trap - handle the freezer jobctl trap
2566 * Puts the task into frozen state, if only the task is not about to quit.
2567 * In this case it drops JOBCTL_TRAP_FREEZE.
2570 * Must be called with @current->sighand->siglock held,
2571 * which is always released before returning.
2573 static void do_freezer_trap(void)
2574 __releases(¤t->sighand->siglock)
2577 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2578 * let's make another loop to give it a chance to be handled.
2579 * In any case, we'll return back.
2581 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2582 JOBCTL_TRAP_FREEZE) {
2583 spin_unlock_irq(¤t->sighand->siglock);
2588 * Now we're sure that there is no pending fatal signal and no
2589 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2590 * immediately (if there is a non-fatal signal pending), and
2591 * put the task into sleep.
2593 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2594 clear_thread_flag(TIF_SIGPENDING);
2595 spin_unlock_irq(¤t->sighand->siglock);
2596 cgroup_enter_frozen();
2600 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2603 * We do not check sig_kernel_stop(signr) but set this marker
2604 * unconditionally because we do not know whether debugger will
2605 * change signr. This flag has no meaning unless we are going
2606 * to stop after return from ptrace_stop(). In this case it will
2607 * be checked in do_signal_stop(), we should only stop if it was
2608 * not cleared by SIGCONT while we were sleeping. See also the
2609 * comment in dequeue_signal().
2611 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2612 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2614 /* We're back. Did the debugger cancel the sig? */
2619 * Update the siginfo structure if the signal has
2620 * changed. If the debugger wanted something
2621 * specific in the siginfo structure then it should
2622 * have updated *info via PTRACE_SETSIGINFO.
2624 if (signr != info->si_signo) {
2625 clear_siginfo(info);
2626 info->si_signo = signr;
2628 info->si_code = SI_USER;
2630 info->si_pid = task_pid_vnr(current->parent);
2631 info->si_uid = from_kuid_munged(current_user_ns(),
2632 task_uid(current->parent));
2636 /* If the (new) signal is now blocked, requeue it. */
2637 if (sigismember(¤t->blocked, signr) ||
2638 fatal_signal_pending(current)) {
2639 send_signal_locked(signr, info, current, type);
2646 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2648 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2650 case SIL_FAULT_TRAPNO:
2651 case SIL_FAULT_MCEERR:
2652 case SIL_FAULT_BNDERR:
2653 case SIL_FAULT_PKUERR:
2654 case SIL_FAULT_PERF_EVENT:
2655 ksig->info.si_addr = arch_untagged_si_addr(
2656 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2668 bool get_signal(struct ksignal *ksig)
2670 struct sighand_struct *sighand = current->sighand;
2671 struct signal_struct *signal = current->signal;
2674 clear_notify_signal();
2675 if (unlikely(task_work_pending(current)))
2678 if (!task_sigpending(current))
2681 if (unlikely(uprobe_deny_signal()))
2685 * Do this once, we can't return to user-mode if freezing() == T.
2686 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2687 * thus do not need another check after return.
2692 spin_lock_irq(&sighand->siglock);
2695 * Every stopped thread goes here after wakeup. Check to see if
2696 * we should notify the parent, prepare_signal(SIGCONT) encodes
2697 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2699 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2702 if (signal->flags & SIGNAL_CLD_CONTINUED)
2703 why = CLD_CONTINUED;
2707 signal->flags &= ~SIGNAL_CLD_MASK;
2709 spin_unlock_irq(&sighand->siglock);
2712 * Notify the parent that we're continuing. This event is
2713 * always per-process and doesn't make whole lot of sense
2714 * for ptracers, who shouldn't consume the state via
2715 * wait(2) either, but, for backward compatibility, notify
2716 * the ptracer of the group leader too unless it's gonna be
2719 read_lock(&tasklist_lock);
2720 do_notify_parent_cldstop(current, false, why);
2722 if (ptrace_reparented(current->group_leader))
2723 do_notify_parent_cldstop(current->group_leader,
2725 read_unlock(&tasklist_lock);
2731 struct k_sigaction *ka;
2734 /* Has this task already been marked for death? */
2735 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2736 signal->group_exec_task) {
2737 clear_siginfo(&ksig->info);
2738 ksig->info.si_signo = signr = SIGKILL;
2739 sigdelset(¤t->pending.signal, SIGKILL);
2740 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2741 &sighand->action[SIGKILL - 1]);
2742 recalc_sigpending();
2746 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2750 if (unlikely(current->jobctl &
2751 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2752 if (current->jobctl & JOBCTL_TRAP_MASK) {
2754 spin_unlock_irq(&sighand->siglock);
2755 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2762 * If the task is leaving the frozen state, let's update
2763 * cgroup counters and reset the frozen bit.
2765 if (unlikely(cgroup_task_frozen(current))) {
2766 spin_unlock_irq(&sighand->siglock);
2767 cgroup_leave_frozen(false);
2772 * Signals generated by the execution of an instruction
2773 * need to be delivered before any other pending signals
2774 * so that the instruction pointer in the signal stack
2775 * frame points to the faulting instruction.
2778 signr = dequeue_synchronous_signal(&ksig->info);
2780 signr = dequeue_signal(current, ¤t->blocked,
2781 &ksig->info, &type);
2784 break; /* will return 0 */
2786 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2787 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2788 signr = ptrace_signal(signr, &ksig->info, type);
2793 ka = &sighand->action[signr-1];
2795 /* Trace actually delivered signals. */
2796 trace_signal_deliver(signr, &ksig->info, ka);
2798 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2800 if (ka->sa.sa_handler != SIG_DFL) {
2801 /* Run the handler. */
2804 if (ka->sa.sa_flags & SA_ONESHOT)
2805 ka->sa.sa_handler = SIG_DFL;
2807 break; /* will return non-zero "signr" value */
2811 * Now we are doing the default action for this signal.
2813 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2817 * Global init gets no signals it doesn't want.
2818 * Container-init gets no signals it doesn't want from same
2821 * Note that if global/container-init sees a sig_kernel_only()
2822 * signal here, the signal must have been generated internally
2823 * or must have come from an ancestor namespace. In either
2824 * case, the signal cannot be dropped.
2826 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2827 !sig_kernel_only(signr))
2830 if (sig_kernel_stop(signr)) {
2832 * The default action is to stop all threads in
2833 * the thread group. The job control signals
2834 * do nothing in an orphaned pgrp, but SIGSTOP
2835 * always works. Note that siglock needs to be
2836 * dropped during the call to is_orphaned_pgrp()
2837 * because of lock ordering with tasklist_lock.
2838 * This allows an intervening SIGCONT to be posted.
2839 * We need to check for that and bail out if necessary.
2841 if (signr != SIGSTOP) {
2842 spin_unlock_irq(&sighand->siglock);
2844 /* signals can be posted during this window */
2846 if (is_current_pgrp_orphaned())
2849 spin_lock_irq(&sighand->siglock);
2852 if (likely(do_signal_stop(ksig->info.si_signo))) {
2853 /* It released the siglock. */
2858 * We didn't actually stop, due to a race
2859 * with SIGCONT or something like that.
2865 spin_unlock_irq(&sighand->siglock);
2866 if (unlikely(cgroup_task_frozen(current)))
2867 cgroup_leave_frozen(true);
2870 * Anything else is fatal, maybe with a core dump.
2872 current->flags |= PF_SIGNALED;
2874 if (sig_kernel_coredump(signr)) {
2875 if (print_fatal_signals)
2876 print_fatal_signal(ksig->info.si_signo);
2877 proc_coredump_connector(current);
2879 * If it was able to dump core, this kills all
2880 * other threads in the group and synchronizes with
2881 * their demise. If we lost the race with another
2882 * thread getting here, it set group_exit_code
2883 * first and our do_group_exit call below will use
2884 * that value and ignore the one we pass it.
2886 do_coredump(&ksig->info);
2890 * PF_USER_WORKER threads will catch and exit on fatal signals
2891 * themselves. They have cleanup that must be performed, so
2892 * we cannot call do_exit() on their behalf.
2894 if (current->flags & PF_USER_WORKER)
2898 * Death signals, no core dump.
2900 do_group_exit(ksig->info.si_signo);
2903 spin_unlock_irq(&sighand->siglock);
2907 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2908 hide_si_addr_tag_bits(ksig);
2910 return ksig->sig > 0;
2914 * signal_delivered - called after signal delivery to update blocked signals
2915 * @ksig: kernel signal struct
2916 * @stepping: nonzero if debugger single-step or block-step in use
2918 * This function should be called when a signal has successfully been
2919 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2920 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2921 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2923 static void signal_delivered(struct ksignal *ksig, int stepping)
2927 /* A signal was successfully delivered, and the
2928 saved sigmask was stored on the signal frame,
2929 and will be restored by sigreturn. So we can
2930 simply clear the restore sigmask flag. */
2931 clear_restore_sigmask();
2933 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2934 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2935 sigaddset(&blocked, ksig->sig);
2936 set_current_blocked(&blocked);
2937 if (current->sas_ss_flags & SS_AUTODISARM)
2938 sas_ss_reset(current);
2940 ptrace_notify(SIGTRAP, 0);
2943 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2946 force_sigsegv(ksig->sig);
2948 signal_delivered(ksig, stepping);
2952 * It could be that complete_signal() picked us to notify about the
2953 * group-wide signal. Other threads should be notified now to take
2954 * the shared signals in @which since we will not.
2956 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2959 struct task_struct *t;
2961 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2962 if (sigisemptyset(&retarget))
2966 while_each_thread(tsk, t) {
2967 if (t->flags & PF_EXITING)
2970 if (!has_pending_signals(&retarget, &t->blocked))
2972 /* Remove the signals this thread can handle. */
2973 sigandsets(&retarget, &retarget, &t->blocked);
2975 if (!task_sigpending(t))
2976 signal_wake_up(t, 0);
2978 if (sigisemptyset(&retarget))
2983 void exit_signals(struct task_struct *tsk)
2989 * @tsk is about to have PF_EXITING set - lock out users which
2990 * expect stable threadgroup.
2992 cgroup_threadgroup_change_begin(tsk);
2994 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2995 sched_mm_cid_exit_signals(tsk);
2996 tsk->flags |= PF_EXITING;
2997 cgroup_threadgroup_change_end(tsk);
3001 spin_lock_irq(&tsk->sighand->siglock);
3003 * From now this task is not visible for group-wide signals,
3004 * see wants_signal(), do_signal_stop().
3006 sched_mm_cid_exit_signals(tsk);
3007 tsk->flags |= PF_EXITING;
3009 cgroup_threadgroup_change_end(tsk);
3011 if (!task_sigpending(tsk))
3014 unblocked = tsk->blocked;
3015 signotset(&unblocked);
3016 retarget_shared_pending(tsk, &unblocked);
3018 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3019 task_participate_group_stop(tsk))
3020 group_stop = CLD_STOPPED;
3022 spin_unlock_irq(&tsk->sighand->siglock);
3025 * If group stop has completed, deliver the notification. This
3026 * should always go to the real parent of the group leader.
3028 if (unlikely(group_stop)) {
3029 read_lock(&tasklist_lock);
3030 do_notify_parent_cldstop(tsk, false, group_stop);
3031 read_unlock(&tasklist_lock);
3036 * System call entry points.
3040 * sys_restart_syscall - restart a system call
3042 SYSCALL_DEFINE0(restart_syscall)
3044 struct restart_block *restart = ¤t->restart_block;
3045 return restart->fn(restart);
3048 long do_no_restart_syscall(struct restart_block *param)
3053 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3055 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3056 sigset_t newblocked;
3057 /* A set of now blocked but previously unblocked signals. */
3058 sigandnsets(&newblocked, newset, ¤t->blocked);
3059 retarget_shared_pending(tsk, &newblocked);
3061 tsk->blocked = *newset;
3062 recalc_sigpending();
3066 * set_current_blocked - change current->blocked mask
3069 * It is wrong to change ->blocked directly, this helper should be used
3070 * to ensure the process can't miss a shared signal we are going to block.
3072 void set_current_blocked(sigset_t *newset)
3074 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3075 __set_current_blocked(newset);
3078 void __set_current_blocked(const sigset_t *newset)
3080 struct task_struct *tsk = current;
3083 * In case the signal mask hasn't changed, there is nothing we need
3084 * to do. The current->blocked shouldn't be modified by other task.
3086 if (sigequalsets(&tsk->blocked, newset))
3089 spin_lock_irq(&tsk->sighand->siglock);
3090 __set_task_blocked(tsk, newset);
3091 spin_unlock_irq(&tsk->sighand->siglock);
3095 * This is also useful for kernel threads that want to temporarily
3096 * (or permanently) block certain signals.
3098 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3099 * interface happily blocks "unblockable" signals like SIGKILL
3102 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3104 struct task_struct *tsk = current;
3107 /* Lockless, only current can change ->blocked, never from irq */
3109 *oldset = tsk->blocked;
3113 sigorsets(&newset, &tsk->blocked, set);
3116 sigandnsets(&newset, &tsk->blocked, set);
3125 __set_current_blocked(&newset);
3128 EXPORT_SYMBOL(sigprocmask);
3131 * The api helps set app-provided sigmasks.
3133 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3134 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3136 * Note that it does set_restore_sigmask() in advance, so it must be always
3137 * paired with restore_saved_sigmask_unless() before return from syscall.
3139 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3145 if (sigsetsize != sizeof(sigset_t))
3147 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3150 set_restore_sigmask();
3151 current->saved_sigmask = current->blocked;
3152 set_current_blocked(&kmask);
3157 #ifdef CONFIG_COMPAT
3158 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3165 if (sigsetsize != sizeof(compat_sigset_t))
3167 if (get_compat_sigset(&kmask, umask))
3170 set_restore_sigmask();
3171 current->saved_sigmask = current->blocked;
3172 set_current_blocked(&kmask);
3179 * sys_rt_sigprocmask - change the list of currently blocked signals
3180 * @how: whether to add, remove, or set signals
3181 * @nset: stores pending signals
3182 * @oset: previous value of signal mask if non-null
3183 * @sigsetsize: size of sigset_t type
3185 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3186 sigset_t __user *, oset, size_t, sigsetsize)
3188 sigset_t old_set, new_set;
3191 /* XXX: Don't preclude handling different sized sigset_t's. */
3192 if (sigsetsize != sizeof(sigset_t))
3195 old_set = current->blocked;
3198 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3200 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3202 error = sigprocmask(how, &new_set, NULL);
3208 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3215 #ifdef CONFIG_COMPAT
3216 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3217 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3219 sigset_t old_set = current->blocked;
3221 /* XXX: Don't preclude handling different sized sigset_t's. */
3222 if (sigsetsize != sizeof(sigset_t))
3228 if (get_compat_sigset(&new_set, nset))
3230 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3232 error = sigprocmask(how, &new_set, NULL);
3236 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3240 static void do_sigpending(sigset_t *set)
3242 spin_lock_irq(¤t->sighand->siglock);
3243 sigorsets(set, ¤t->pending.signal,
3244 ¤t->signal->shared_pending.signal);
3245 spin_unlock_irq(¤t->sighand->siglock);
3247 /* Outside the lock because only this thread touches it. */
3248 sigandsets(set, ¤t->blocked, set);
3252 * sys_rt_sigpending - examine a pending signal that has been raised
3254 * @uset: stores pending signals
3255 * @sigsetsize: size of sigset_t type or larger
3257 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3261 if (sigsetsize > sizeof(*uset))
3264 do_sigpending(&set);
3266 if (copy_to_user(uset, &set, sigsetsize))
3272 #ifdef CONFIG_COMPAT
3273 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3274 compat_size_t, sigsetsize)
3278 if (sigsetsize > sizeof(*uset))
3281 do_sigpending(&set);
3283 return put_compat_sigset(uset, &set, sigsetsize);
3287 static const struct {
3288 unsigned char limit, layout;
3290 [SIGILL] = { NSIGILL, SIL_FAULT },
3291 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3292 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3293 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3294 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3296 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3298 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3299 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3300 [SIGSYS] = { NSIGSYS, SIL_SYS },
3303 static bool known_siginfo_layout(unsigned sig, int si_code)
3305 if (si_code == SI_KERNEL)
3307 else if ((si_code > SI_USER)) {
3308 if (sig_specific_sicodes(sig)) {
3309 if (si_code <= sig_sicodes[sig].limit)
3312 else if (si_code <= NSIGPOLL)
3315 else if (si_code >= SI_DETHREAD)
3317 else if (si_code == SI_ASYNCNL)
3322 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3324 enum siginfo_layout layout = SIL_KILL;
3325 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3326 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3327 (si_code <= sig_sicodes[sig].limit)) {
3328 layout = sig_sicodes[sig].layout;
3329 /* Handle the exceptions */
3330 if ((sig == SIGBUS) &&
3331 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3332 layout = SIL_FAULT_MCEERR;
3333 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3334 layout = SIL_FAULT_BNDERR;
3336 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3337 layout = SIL_FAULT_PKUERR;
3339 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3340 layout = SIL_FAULT_PERF_EVENT;
3341 else if (IS_ENABLED(CONFIG_SPARC) &&
3342 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3343 layout = SIL_FAULT_TRAPNO;
3344 else if (IS_ENABLED(CONFIG_ALPHA) &&
3346 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3347 layout = SIL_FAULT_TRAPNO;
3349 else if (si_code <= NSIGPOLL)
3352 if (si_code == SI_TIMER)
3354 else if (si_code == SI_SIGIO)
3356 else if (si_code < 0)
3362 static inline char __user *si_expansion(const siginfo_t __user *info)
3364 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3367 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3369 char __user *expansion = si_expansion(to);
3370 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3372 if (clear_user(expansion, SI_EXPANSION_SIZE))
3377 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3378 const siginfo_t __user *from)
3380 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3381 char __user *expansion = si_expansion(from);
3382 char buf[SI_EXPANSION_SIZE];
3385 * An unknown si_code might need more than
3386 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3387 * extra bytes are 0. This guarantees copy_siginfo_to_user
3388 * will return this data to userspace exactly.
3390 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3392 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3400 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3401 const siginfo_t __user *from)
3403 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3405 to->si_signo = signo;
3406 return post_copy_siginfo_from_user(to, from);
3409 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3411 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3413 return post_copy_siginfo_from_user(to, from);
3416 #ifdef CONFIG_COMPAT
3418 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3419 * @to: compat siginfo destination
3420 * @from: kernel siginfo source
3422 * Note: This function does not work properly for the SIGCHLD on x32, but
3423 * fortunately it doesn't have to. The only valid callers for this function are
3424 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3425 * The latter does not care because SIGCHLD will never cause a coredump.
3427 void copy_siginfo_to_external32(struct compat_siginfo *to,
3428 const struct kernel_siginfo *from)
3430 memset(to, 0, sizeof(*to));
3432 to->si_signo = from->si_signo;
3433 to->si_errno = from->si_errno;
3434 to->si_code = from->si_code;
3435 switch(siginfo_layout(from->si_signo, from->si_code)) {
3437 to->si_pid = from->si_pid;
3438 to->si_uid = from->si_uid;
3441 to->si_tid = from->si_tid;
3442 to->si_overrun = from->si_overrun;
3443 to->si_int = from->si_int;
3446 to->si_band = from->si_band;
3447 to->si_fd = from->si_fd;
3450 to->si_addr = ptr_to_compat(from->si_addr);
3452 case SIL_FAULT_TRAPNO:
3453 to->si_addr = ptr_to_compat(from->si_addr);
3454 to->si_trapno = from->si_trapno;
3456 case SIL_FAULT_MCEERR:
3457 to->si_addr = ptr_to_compat(from->si_addr);
3458 to->si_addr_lsb = from->si_addr_lsb;
3460 case SIL_FAULT_BNDERR:
3461 to->si_addr = ptr_to_compat(from->si_addr);
3462 to->si_lower = ptr_to_compat(from->si_lower);
3463 to->si_upper = ptr_to_compat(from->si_upper);
3465 case SIL_FAULT_PKUERR:
3466 to->si_addr = ptr_to_compat(from->si_addr);
3467 to->si_pkey = from->si_pkey;
3469 case SIL_FAULT_PERF_EVENT:
3470 to->si_addr = ptr_to_compat(from->si_addr);
3471 to->si_perf_data = from->si_perf_data;
3472 to->si_perf_type = from->si_perf_type;
3473 to->si_perf_flags = from->si_perf_flags;
3476 to->si_pid = from->si_pid;
3477 to->si_uid = from->si_uid;
3478 to->si_status = from->si_status;
3479 to->si_utime = from->si_utime;
3480 to->si_stime = from->si_stime;
3483 to->si_pid = from->si_pid;
3484 to->si_uid = from->si_uid;
3485 to->si_int = from->si_int;
3488 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3489 to->si_syscall = from->si_syscall;
3490 to->si_arch = from->si_arch;
3495 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3496 const struct kernel_siginfo *from)
3498 struct compat_siginfo new;
3500 copy_siginfo_to_external32(&new, from);
3501 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3506 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3507 const struct compat_siginfo *from)
3510 to->si_signo = from->si_signo;
3511 to->si_errno = from->si_errno;
3512 to->si_code = from->si_code;
3513 switch(siginfo_layout(from->si_signo, from->si_code)) {
3515 to->si_pid = from->si_pid;
3516 to->si_uid = from->si_uid;
3519 to->si_tid = from->si_tid;
3520 to->si_overrun = from->si_overrun;
3521 to->si_int = from->si_int;
3524 to->si_band = from->si_band;
3525 to->si_fd = from->si_fd;
3528 to->si_addr = compat_ptr(from->si_addr);
3530 case SIL_FAULT_TRAPNO:
3531 to->si_addr = compat_ptr(from->si_addr);
3532 to->si_trapno = from->si_trapno;
3534 case SIL_FAULT_MCEERR:
3535 to->si_addr = compat_ptr(from->si_addr);
3536 to->si_addr_lsb = from->si_addr_lsb;
3538 case SIL_FAULT_BNDERR:
3539 to->si_addr = compat_ptr(from->si_addr);
3540 to->si_lower = compat_ptr(from->si_lower);
3541 to->si_upper = compat_ptr(from->si_upper);
3543 case SIL_FAULT_PKUERR:
3544 to->si_addr = compat_ptr(from->si_addr);
3545 to->si_pkey = from->si_pkey;
3547 case SIL_FAULT_PERF_EVENT:
3548 to->si_addr = compat_ptr(from->si_addr);
3549 to->si_perf_data = from->si_perf_data;
3550 to->si_perf_type = from->si_perf_type;
3551 to->si_perf_flags = from->si_perf_flags;
3554 to->si_pid = from->si_pid;
3555 to->si_uid = from->si_uid;
3556 to->si_status = from->si_status;
3557 #ifdef CONFIG_X86_X32_ABI
3558 if (in_x32_syscall()) {
3559 to->si_utime = from->_sifields._sigchld_x32._utime;
3560 to->si_stime = from->_sifields._sigchld_x32._stime;
3564 to->si_utime = from->si_utime;
3565 to->si_stime = from->si_stime;
3569 to->si_pid = from->si_pid;
3570 to->si_uid = from->si_uid;
3571 to->si_int = from->si_int;
3574 to->si_call_addr = compat_ptr(from->si_call_addr);
3575 to->si_syscall = from->si_syscall;
3576 to->si_arch = from->si_arch;
3582 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3583 const struct compat_siginfo __user *ufrom)
3585 struct compat_siginfo from;
3587 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3590 from.si_signo = signo;
3591 return post_copy_siginfo_from_user32(to, &from);
3594 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3595 const struct compat_siginfo __user *ufrom)
3597 struct compat_siginfo from;
3599 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3602 return post_copy_siginfo_from_user32(to, &from);
3604 #endif /* CONFIG_COMPAT */
3607 * do_sigtimedwait - wait for queued signals specified in @which
3608 * @which: queued signals to wait for
3609 * @info: if non-null, the signal's siginfo is returned here
3610 * @ts: upper bound on process time suspension
3612 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3613 const struct timespec64 *ts)
3615 ktime_t *to = NULL, timeout = KTIME_MAX;
3616 struct task_struct *tsk = current;
3617 sigset_t mask = *which;
3622 if (!timespec64_valid(ts))
3624 timeout = timespec64_to_ktime(*ts);
3629 * Invert the set of allowed signals to get those we want to block.
3631 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3634 spin_lock_irq(&tsk->sighand->siglock);
3635 sig = dequeue_signal(tsk, &mask, info, &type);
3636 if (!sig && timeout) {
3638 * None ready, temporarily unblock those we're interested
3639 * while we are sleeping in so that we'll be awakened when
3640 * they arrive. Unblocking is always fine, we can avoid
3641 * set_current_blocked().
3643 tsk->real_blocked = tsk->blocked;
3644 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3645 recalc_sigpending();
3646 spin_unlock_irq(&tsk->sighand->siglock);
3648 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3649 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3651 spin_lock_irq(&tsk->sighand->siglock);
3652 __set_task_blocked(tsk, &tsk->real_blocked);
3653 sigemptyset(&tsk->real_blocked);
3654 sig = dequeue_signal(tsk, &mask, info, &type);
3656 spin_unlock_irq(&tsk->sighand->siglock);
3660 return ret ? -EINTR : -EAGAIN;
3664 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3666 * @uthese: queued signals to wait for
3667 * @uinfo: if non-null, the signal's siginfo is returned here
3668 * @uts: upper bound on process time suspension
3669 * @sigsetsize: size of sigset_t type
3671 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3672 siginfo_t __user *, uinfo,
3673 const struct __kernel_timespec __user *, uts,
3677 struct timespec64 ts;
3678 kernel_siginfo_t info;
3681 /* XXX: Don't preclude handling different sized sigset_t's. */
3682 if (sigsetsize != sizeof(sigset_t))
3685 if (copy_from_user(&these, uthese, sizeof(these)))
3689 if (get_timespec64(&ts, uts))
3693 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3695 if (ret > 0 && uinfo) {
3696 if (copy_siginfo_to_user(uinfo, &info))
3703 #ifdef CONFIG_COMPAT_32BIT_TIME
3704 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3705 siginfo_t __user *, uinfo,
3706 const struct old_timespec32 __user *, uts,
3710 struct timespec64 ts;
3711 kernel_siginfo_t info;
3714 if (sigsetsize != sizeof(sigset_t))
3717 if (copy_from_user(&these, uthese, sizeof(these)))
3721 if (get_old_timespec32(&ts, uts))
3725 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3727 if (ret > 0 && uinfo) {
3728 if (copy_siginfo_to_user(uinfo, &info))
3736 #ifdef CONFIG_COMPAT
3737 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3738 struct compat_siginfo __user *, uinfo,
3739 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3742 struct timespec64 t;
3743 kernel_siginfo_t info;
3746 if (sigsetsize != sizeof(sigset_t))
3749 if (get_compat_sigset(&s, uthese))
3753 if (get_timespec64(&t, uts))
3757 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3759 if (ret > 0 && uinfo) {
3760 if (copy_siginfo_to_user32(uinfo, &info))
3767 #ifdef CONFIG_COMPAT_32BIT_TIME
3768 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3769 struct compat_siginfo __user *, uinfo,
3770 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3773 struct timespec64 t;
3774 kernel_siginfo_t info;
3777 if (sigsetsize != sizeof(sigset_t))
3780 if (get_compat_sigset(&s, uthese))
3784 if (get_old_timespec32(&t, uts))
3788 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3790 if (ret > 0 && uinfo) {
3791 if (copy_siginfo_to_user32(uinfo, &info))
3800 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3802 clear_siginfo(info);
3803 info->si_signo = sig;
3805 info->si_code = SI_USER;
3806 info->si_pid = task_tgid_vnr(current);
3807 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3811 * sys_kill - send a signal to a process
3812 * @pid: the PID of the process
3813 * @sig: signal to be sent
3815 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3817 struct kernel_siginfo info;
3819 prepare_kill_siginfo(sig, &info);
3821 return kill_something_info(sig, &info, pid);
3825 * Verify that the signaler and signalee either are in the same pid namespace
3826 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3829 static bool access_pidfd_pidns(struct pid *pid)
3831 struct pid_namespace *active = task_active_pid_ns(current);
3832 struct pid_namespace *p = ns_of_pid(pid);
3845 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3846 siginfo_t __user *info)
3848 #ifdef CONFIG_COMPAT
3850 * Avoid hooking up compat syscalls and instead handle necessary
3851 * conversions here. Note, this is a stop-gap measure and should not be
3852 * considered a generic solution.
3854 if (in_compat_syscall())
3855 return copy_siginfo_from_user32(
3856 kinfo, (struct compat_siginfo __user *)info);
3858 return copy_siginfo_from_user(kinfo, info);
3861 static struct pid *pidfd_to_pid(const struct file *file)
3865 pid = pidfd_pid(file);
3869 return tgid_pidfd_to_pid(file);
3873 * sys_pidfd_send_signal - Signal a process through a pidfd
3874 * @pidfd: file descriptor of the process
3875 * @sig: signal to send
3876 * @info: signal info
3877 * @flags: future flags
3879 * The syscall currently only signals via PIDTYPE_PID which covers
3880 * kill(<positive-pid>, <signal>. It does not signal threads or process
3882 * In order to extend the syscall to threads and process groups the @flags
3883 * argument should be used. In essence, the @flags argument will determine
3884 * what is signaled and not the file descriptor itself. Put in other words,
3885 * grouping is a property of the flags argument not a property of the file
3888 * Return: 0 on success, negative errno on failure
3890 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3891 siginfo_t __user *, info, unsigned int, flags)
3896 kernel_siginfo_t kinfo;
3898 /* Enforce flags be set to 0 until we add an extension. */
3906 /* Is this a pidfd? */
3907 pid = pidfd_to_pid(f.file);
3914 if (!access_pidfd_pidns(pid))
3918 ret = copy_siginfo_from_user_any(&kinfo, info);
3923 if (unlikely(sig != kinfo.si_signo))
3926 /* Only allow sending arbitrary signals to yourself. */
3928 if ((task_pid(current) != pid) &&
3929 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3932 prepare_kill_siginfo(sig, &kinfo);
3935 ret = kill_pid_info(sig, &kinfo, pid);
3943 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3945 struct task_struct *p;
3949 p = find_task_by_vpid(pid);
3950 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3951 error = check_kill_permission(sig, info, p);
3953 * The null signal is a permissions and process existence
3954 * probe. No signal is actually delivered.
3956 if (!error && sig) {
3957 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3959 * If lock_task_sighand() failed we pretend the task
3960 * dies after receiving the signal. The window is tiny,
3961 * and the signal is private anyway.
3963 if (unlikely(error == -ESRCH))
3972 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3974 struct kernel_siginfo info;
3976 clear_siginfo(&info);
3977 info.si_signo = sig;
3979 info.si_code = SI_TKILL;
3980 info.si_pid = task_tgid_vnr(current);
3981 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3983 return do_send_specific(tgid, pid, sig, &info);
3987 * sys_tgkill - send signal to one specific thread
3988 * @tgid: the thread group ID of the thread
3989 * @pid: the PID of the thread
3990 * @sig: signal to be sent
3992 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3993 * exists but it's not belonging to the target process anymore. This
3994 * method solves the problem of threads exiting and PIDs getting reused.
3996 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3998 /* This is only valid for single tasks */
3999 if (pid <= 0 || tgid <= 0)
4002 return do_tkill(tgid, pid, sig);
4006 * sys_tkill - send signal to one specific task
4007 * @pid: the PID of the task
4008 * @sig: signal to be sent
4010 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4012 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4014 /* This is only valid for single tasks */
4018 return do_tkill(0, pid, sig);
4021 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4023 /* Not even root can pretend to send signals from the kernel.
4024 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4026 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4027 (task_pid_vnr(current) != pid))
4030 /* POSIX.1b doesn't mention process groups. */
4031 return kill_proc_info(sig, info, pid);
4035 * sys_rt_sigqueueinfo - send signal information to a signal
4036 * @pid: the PID of the thread
4037 * @sig: signal to be sent
4038 * @uinfo: signal info to be sent
4040 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4041 siginfo_t __user *, uinfo)
4043 kernel_siginfo_t info;
4044 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4047 return do_rt_sigqueueinfo(pid, sig, &info);
4050 #ifdef CONFIG_COMPAT
4051 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4054 struct compat_siginfo __user *, uinfo)
4056 kernel_siginfo_t info;
4057 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4060 return do_rt_sigqueueinfo(pid, sig, &info);
4064 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4066 /* This is only valid for single tasks */
4067 if (pid <= 0 || tgid <= 0)
4070 /* Not even root can pretend to send signals from the kernel.
4071 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4073 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4074 (task_pid_vnr(current) != pid))
4077 return do_send_specific(tgid, pid, sig, info);
4080 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4081 siginfo_t __user *, uinfo)
4083 kernel_siginfo_t info;
4084 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4087 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4090 #ifdef CONFIG_COMPAT
4091 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4095 struct compat_siginfo __user *, uinfo)
4097 kernel_siginfo_t info;
4098 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4101 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4106 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4108 void kernel_sigaction(int sig, __sighandler_t action)
4110 spin_lock_irq(¤t->sighand->siglock);
4111 current->sighand->action[sig - 1].sa.sa_handler = action;
4112 if (action == SIG_IGN) {
4116 sigaddset(&mask, sig);
4118 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4119 flush_sigqueue_mask(&mask, ¤t->pending);
4120 recalc_sigpending();
4122 spin_unlock_irq(¤t->sighand->siglock);
4124 EXPORT_SYMBOL(kernel_sigaction);
4126 void __weak sigaction_compat_abi(struct k_sigaction *act,
4127 struct k_sigaction *oact)
4131 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4133 struct task_struct *p = current, *t;
4134 struct k_sigaction *k;
4137 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4140 k = &p->sighand->action[sig-1];
4142 spin_lock_irq(&p->sighand->siglock);
4143 if (k->sa.sa_flags & SA_IMMUTABLE) {
4144 spin_unlock_irq(&p->sighand->siglock);
4151 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4152 * e.g. by having an architecture use the bit in their uapi.
4154 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4157 * Clear unknown flag bits in order to allow userspace to detect missing
4158 * support for flag bits and to allow the kernel to use non-uapi bits
4162 act->sa.sa_flags &= UAPI_SA_FLAGS;
4164 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4166 sigaction_compat_abi(act, oact);
4169 sigdelsetmask(&act->sa.sa_mask,
4170 sigmask(SIGKILL) | sigmask(SIGSTOP));
4174 * "Setting a signal action to SIG_IGN for a signal that is
4175 * pending shall cause the pending signal to be discarded,
4176 * whether or not it is blocked."
4178 * "Setting a signal action to SIG_DFL for a signal that is
4179 * pending and whose default action is to ignore the signal
4180 * (for example, SIGCHLD), shall cause the pending signal to
4181 * be discarded, whether or not it is blocked"
4183 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4185 sigaddset(&mask, sig);
4186 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4187 for_each_thread(p, t)
4188 flush_sigqueue_mask(&mask, &t->pending);
4192 spin_unlock_irq(&p->sighand->siglock);
4196 #ifdef CONFIG_DYNAMIC_SIGFRAME
4197 static inline void sigaltstack_lock(void)
4198 __acquires(¤t->sighand->siglock)
4200 spin_lock_irq(¤t->sighand->siglock);
4203 static inline void sigaltstack_unlock(void)
4204 __releases(¤t->sighand->siglock)
4206 spin_unlock_irq(¤t->sighand->siglock);
4209 static inline void sigaltstack_lock(void) { }
4210 static inline void sigaltstack_unlock(void) { }
4214 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4217 struct task_struct *t = current;
4221 memset(oss, 0, sizeof(stack_t));
4222 oss->ss_sp = (void __user *) t->sas_ss_sp;
4223 oss->ss_size = t->sas_ss_size;
4224 oss->ss_flags = sas_ss_flags(sp) |
4225 (current->sas_ss_flags & SS_FLAG_BITS);
4229 void __user *ss_sp = ss->ss_sp;
4230 size_t ss_size = ss->ss_size;
4231 unsigned ss_flags = ss->ss_flags;
4234 if (unlikely(on_sig_stack(sp)))
4237 ss_mode = ss_flags & ~SS_FLAG_BITS;
4238 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4243 * Return before taking any locks if no actual
4244 * sigaltstack changes were requested.
4246 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4247 t->sas_ss_size == ss_size &&
4248 t->sas_ss_flags == ss_flags)
4252 if (ss_mode == SS_DISABLE) {
4256 if (unlikely(ss_size < min_ss_size))
4258 if (!sigaltstack_size_valid(ss_size))
4262 t->sas_ss_sp = (unsigned long) ss_sp;
4263 t->sas_ss_size = ss_size;
4264 t->sas_ss_flags = ss_flags;
4266 sigaltstack_unlock();
4271 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4275 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4277 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4278 current_user_stack_pointer(),
4280 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4285 int restore_altstack(const stack_t __user *uss)
4288 if (copy_from_user(&new, uss, sizeof(stack_t)))
4290 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4292 /* squash all but EFAULT for now */
4296 int __save_altstack(stack_t __user *uss, unsigned long sp)
4298 struct task_struct *t = current;
4299 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4300 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4301 __put_user(t->sas_ss_size, &uss->ss_size);
4305 #ifdef CONFIG_COMPAT
4306 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4307 compat_stack_t __user *uoss_ptr)
4313 compat_stack_t uss32;
4314 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4316 uss.ss_sp = compat_ptr(uss32.ss_sp);
4317 uss.ss_flags = uss32.ss_flags;
4318 uss.ss_size = uss32.ss_size;
4320 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4321 compat_user_stack_pointer(),
4322 COMPAT_MINSIGSTKSZ);
4323 if (ret >= 0 && uoss_ptr) {
4325 memset(&old, 0, sizeof(old));
4326 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4327 old.ss_flags = uoss.ss_flags;
4328 old.ss_size = uoss.ss_size;
4329 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4335 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4336 const compat_stack_t __user *, uss_ptr,
4337 compat_stack_t __user *, uoss_ptr)
4339 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4342 int compat_restore_altstack(const compat_stack_t __user *uss)
4344 int err = do_compat_sigaltstack(uss, NULL);
4345 /* squash all but -EFAULT for now */
4346 return err == -EFAULT ? err : 0;
4349 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4352 struct task_struct *t = current;
4353 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4355 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4356 __put_user(t->sas_ss_size, &uss->ss_size);
4361 #ifdef __ARCH_WANT_SYS_SIGPENDING
4364 * sys_sigpending - examine pending signals
4365 * @uset: where mask of pending signal is returned
4367 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4371 if (sizeof(old_sigset_t) > sizeof(*uset))
4374 do_sigpending(&set);
4376 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4382 #ifdef CONFIG_COMPAT
4383 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4387 do_sigpending(&set);
4389 return put_user(set.sig[0], set32);
4395 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4397 * sys_sigprocmask - examine and change blocked signals
4398 * @how: whether to add, remove, or set signals
4399 * @nset: signals to add or remove (if non-null)
4400 * @oset: previous value of signal mask if non-null
4402 * Some platforms have their own version with special arguments;
4403 * others support only sys_rt_sigprocmask.
4406 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4407 old_sigset_t __user *, oset)
4409 old_sigset_t old_set, new_set;
4410 sigset_t new_blocked;
4412 old_set = current->blocked.sig[0];
4415 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4418 new_blocked = current->blocked;
4422 sigaddsetmask(&new_blocked, new_set);
4425 sigdelsetmask(&new_blocked, new_set);
4428 new_blocked.sig[0] = new_set;
4434 set_current_blocked(&new_blocked);
4438 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4444 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4446 #ifndef CONFIG_ODD_RT_SIGACTION
4448 * sys_rt_sigaction - alter an action taken by a process
4449 * @sig: signal to be sent
4450 * @act: new sigaction
4451 * @oact: used to save the previous sigaction
4452 * @sigsetsize: size of sigset_t type
4454 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4455 const struct sigaction __user *, act,
4456 struct sigaction __user *, oact,
4459 struct k_sigaction new_sa, old_sa;
4462 /* XXX: Don't preclude handling different sized sigset_t's. */
4463 if (sigsetsize != sizeof(sigset_t))
4466 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4469 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4473 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4478 #ifdef CONFIG_COMPAT
4479 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4480 const struct compat_sigaction __user *, act,
4481 struct compat_sigaction __user *, oact,
4482 compat_size_t, sigsetsize)
4484 struct k_sigaction new_ka, old_ka;
4485 #ifdef __ARCH_HAS_SA_RESTORER
4486 compat_uptr_t restorer;
4490 /* XXX: Don't preclude handling different sized sigset_t's. */
4491 if (sigsetsize != sizeof(compat_sigset_t))
4495 compat_uptr_t handler;
4496 ret = get_user(handler, &act->sa_handler);
4497 new_ka.sa.sa_handler = compat_ptr(handler);
4498 #ifdef __ARCH_HAS_SA_RESTORER
4499 ret |= get_user(restorer, &act->sa_restorer);
4500 new_ka.sa.sa_restorer = compat_ptr(restorer);
4502 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4503 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4508 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4510 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4512 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4513 sizeof(oact->sa_mask));
4514 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4515 #ifdef __ARCH_HAS_SA_RESTORER
4516 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4517 &oact->sa_restorer);
4523 #endif /* !CONFIG_ODD_RT_SIGACTION */
4525 #ifdef CONFIG_OLD_SIGACTION
4526 SYSCALL_DEFINE3(sigaction, int, sig,
4527 const struct old_sigaction __user *, act,
4528 struct old_sigaction __user *, oact)
4530 struct k_sigaction new_ka, old_ka;
4535 if (!access_ok(act, sizeof(*act)) ||
4536 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4537 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4538 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4539 __get_user(mask, &act->sa_mask))
4541 #ifdef __ARCH_HAS_KA_RESTORER
4542 new_ka.ka_restorer = NULL;
4544 siginitset(&new_ka.sa.sa_mask, mask);
4547 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4550 if (!access_ok(oact, sizeof(*oact)) ||
4551 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4552 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4553 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4554 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4561 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4562 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4563 const struct compat_old_sigaction __user *, act,
4564 struct compat_old_sigaction __user *, oact)
4566 struct k_sigaction new_ka, old_ka;
4568 compat_old_sigset_t mask;
4569 compat_uptr_t handler, restorer;
4572 if (!access_ok(act, sizeof(*act)) ||
4573 __get_user(handler, &act->sa_handler) ||
4574 __get_user(restorer, &act->sa_restorer) ||
4575 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4576 __get_user(mask, &act->sa_mask))
4579 #ifdef __ARCH_HAS_KA_RESTORER
4580 new_ka.ka_restorer = NULL;
4582 new_ka.sa.sa_handler = compat_ptr(handler);
4583 new_ka.sa.sa_restorer = compat_ptr(restorer);
4584 siginitset(&new_ka.sa.sa_mask, mask);
4587 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4590 if (!access_ok(oact, sizeof(*oact)) ||
4591 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4592 &oact->sa_handler) ||
4593 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4594 &oact->sa_restorer) ||
4595 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4596 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4603 #ifdef CONFIG_SGETMASK_SYSCALL
4606 * For backwards compatibility. Functionality superseded by sigprocmask.
4608 SYSCALL_DEFINE0(sgetmask)
4611 return current->blocked.sig[0];
4614 SYSCALL_DEFINE1(ssetmask, int, newmask)
4616 int old = current->blocked.sig[0];
4619 siginitset(&newset, newmask);
4620 set_current_blocked(&newset);
4624 #endif /* CONFIG_SGETMASK_SYSCALL */
4626 #ifdef __ARCH_WANT_SYS_SIGNAL
4628 * For backwards compatibility. Functionality superseded by sigaction.
4630 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4632 struct k_sigaction new_sa, old_sa;
4635 new_sa.sa.sa_handler = handler;
4636 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4637 sigemptyset(&new_sa.sa.sa_mask);
4639 ret = do_sigaction(sig, &new_sa, &old_sa);
4641 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4643 #endif /* __ARCH_WANT_SYS_SIGNAL */
4645 #ifdef __ARCH_WANT_SYS_PAUSE
4647 SYSCALL_DEFINE0(pause)
4649 while (!signal_pending(current)) {
4650 __set_current_state(TASK_INTERRUPTIBLE);
4653 return -ERESTARTNOHAND;
4658 static int sigsuspend(sigset_t *set)
4660 current->saved_sigmask = current->blocked;
4661 set_current_blocked(set);
4663 while (!signal_pending(current)) {
4664 __set_current_state(TASK_INTERRUPTIBLE);
4667 set_restore_sigmask();
4668 return -ERESTARTNOHAND;
4672 * sys_rt_sigsuspend - replace the signal mask for a value with the
4673 * @unewset value until a signal is received
4674 * @unewset: new signal mask value
4675 * @sigsetsize: size of sigset_t type
4677 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4681 /* XXX: Don't preclude handling different sized sigset_t's. */
4682 if (sigsetsize != sizeof(sigset_t))
4685 if (copy_from_user(&newset, unewset, sizeof(newset)))
4687 return sigsuspend(&newset);
4690 #ifdef CONFIG_COMPAT
4691 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4695 /* XXX: Don't preclude handling different sized sigset_t's. */
4696 if (sigsetsize != sizeof(sigset_t))
4699 if (get_compat_sigset(&newset, unewset))
4701 return sigsuspend(&newset);
4705 #ifdef CONFIG_OLD_SIGSUSPEND
4706 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4709 siginitset(&blocked, mask);
4710 return sigsuspend(&blocked);
4713 #ifdef CONFIG_OLD_SIGSUSPEND3
4714 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4717 siginitset(&blocked, mask);
4718 return sigsuspend(&blocked);
4722 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4727 static inline void siginfo_buildtime_checks(void)
4729 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4731 /* Verify the offsets in the two siginfos match */
4732 #define CHECK_OFFSET(field) \
4733 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4736 CHECK_OFFSET(si_pid);
4737 CHECK_OFFSET(si_uid);
4740 CHECK_OFFSET(si_tid);
4741 CHECK_OFFSET(si_overrun);
4742 CHECK_OFFSET(si_value);
4745 CHECK_OFFSET(si_pid);
4746 CHECK_OFFSET(si_uid);
4747 CHECK_OFFSET(si_value);
4750 CHECK_OFFSET(si_pid);
4751 CHECK_OFFSET(si_uid);
4752 CHECK_OFFSET(si_status);
4753 CHECK_OFFSET(si_utime);
4754 CHECK_OFFSET(si_stime);
4757 CHECK_OFFSET(si_addr);
4758 CHECK_OFFSET(si_trapno);
4759 CHECK_OFFSET(si_addr_lsb);
4760 CHECK_OFFSET(si_lower);
4761 CHECK_OFFSET(si_upper);
4762 CHECK_OFFSET(si_pkey);
4763 CHECK_OFFSET(si_perf_data);
4764 CHECK_OFFSET(si_perf_type);
4765 CHECK_OFFSET(si_perf_flags);
4768 CHECK_OFFSET(si_band);
4769 CHECK_OFFSET(si_fd);
4772 CHECK_OFFSET(si_call_addr);
4773 CHECK_OFFSET(si_syscall);
4774 CHECK_OFFSET(si_arch);
4778 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4779 offsetof(struct siginfo, si_addr));
4780 if (sizeof(int) == sizeof(void __user *)) {
4781 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4782 sizeof(void __user *));
4784 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4785 sizeof_field(struct siginfo, si_uid)) !=
4786 sizeof(void __user *));
4787 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4788 offsetof(struct siginfo, si_uid));
4790 #ifdef CONFIG_COMPAT
4791 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4792 offsetof(struct compat_siginfo, si_addr));
4793 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4794 sizeof(compat_uptr_t));
4795 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4796 sizeof_field(struct siginfo, si_pid));
4800 #if defined(CONFIG_SYSCTL)
4801 static struct ctl_table signal_debug_table[] = {
4802 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4804 .procname = "exception-trace",
4805 .data = &show_unhandled_signals,
4806 .maxlen = sizeof(int),
4808 .proc_handler = proc_dointvec
4814 static int __init init_signal_sysctls(void)
4816 register_sysctl_init("debug", signal_debug_table);
4819 early_initcall(init_signal_sysctls);
4820 #endif /* CONFIG_SYSCTL */
4822 void __init signals_init(void)
4824 siginfo_buildtime_checks();
4826 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4829 #ifdef CONFIG_KGDB_KDB
4830 #include <linux/kdb.h>
4832 * kdb_send_sig - Allows kdb to send signals without exposing
4833 * signal internals. This function checks if the required locks are
4834 * available before calling the main signal code, to avoid kdb
4837 void kdb_send_sig(struct task_struct *t, int sig)
4839 static struct task_struct *kdb_prev_t;
4841 if (!spin_trylock(&t->sighand->siglock)) {
4842 kdb_printf("Can't do kill command now.\n"
4843 "The sigmask lock is held somewhere else in "
4844 "kernel, try again later\n");
4847 new_t = kdb_prev_t != t;
4849 if (!task_is_running(t) && new_t) {
4850 spin_unlock(&t->sighand->siglock);
4851 kdb_printf("Process is not RUNNING, sending a signal from "
4852 "kdb risks deadlock\n"
4853 "on the run queue locks. "
4854 "The signal has _not_ been sent.\n"
4855 "Reissue the kill command if you want to risk "
4859 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4860 spin_unlock(&t->sighand->siglock);
4862 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4865 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4867 #endif /* CONFIG_KGDB_KDB */