1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) ||
292 (task->flags & (PF_EXITING | PF_IO_WORKER))))
295 if (mask & JOBCTL_STOP_SIGMASK)
296 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
298 task->jobctl |= mask;
303 * task_clear_jobctl_trapping - clear jobctl trapping bit
306 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
307 * Clear it and wake up the ptracer. Note that we don't need any further
308 * locking. @task->siglock guarantees that @task->parent points to the
312 * Must be called with @task->sighand->siglock held.
314 void task_clear_jobctl_trapping(struct task_struct *task)
316 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
317 task->jobctl &= ~JOBCTL_TRAPPING;
318 smp_mb(); /* advised by wake_up_bit() */
319 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
324 * task_clear_jobctl_pending - clear jobctl pending bits
326 * @mask: pending bits to clear
328 * Clear @mask from @task->jobctl. @mask must be subset of
329 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
330 * STOP bits are cleared together.
332 * If clearing of @mask leaves no stop or trap pending, this function calls
333 * task_clear_jobctl_trapping().
336 * Must be called with @task->sighand->siglock held.
338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
340 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
342 if (mask & JOBCTL_STOP_PENDING)
343 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
345 task->jobctl &= ~mask;
347 if (!(task->jobctl & JOBCTL_PENDING_MASK))
348 task_clear_jobctl_trapping(task);
352 * task_participate_group_stop - participate in a group stop
353 * @task: task participating in a group stop
355 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
356 * Group stop states are cleared and the group stop count is consumed if
357 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
358 * stop, the appropriate `SIGNAL_*` flags are set.
361 * Must be called with @task->sighand->siglock held.
364 * %true if group stop completion should be notified to the parent, %false
367 static bool task_participate_group_stop(struct task_struct *task)
369 struct signal_struct *sig = task->signal;
370 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
372 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
374 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
379 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
380 sig->group_stop_count--;
383 * Tell the caller to notify completion iff we are entering into a
384 * fresh group stop. Read comment in do_signal_stop() for details.
386 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
387 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
393 void task_join_group_stop(struct task_struct *task)
395 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
396 struct signal_struct *sig = current->signal;
398 if (sig->group_stop_count) {
399 sig->group_stop_count++;
400 mask |= JOBCTL_STOP_CONSUME;
401 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 /* Have the new thread join an on-going signal group stop */
405 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
409 * allocate a new signal queue record
410 * - this may be called without locks if and only if t == current, otherwise an
411 * appropriate lock must be held to stop the target task from exiting
413 static struct sigqueue *
414 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
416 struct sigqueue *q = NULL;
417 struct user_struct *user;
421 * Protect access to @t credentials. This can go away when all
422 * callers hold rcu read lock.
424 * NOTE! A pending signal will hold on to the user refcount,
425 * and we get/put the refcount only when the sigpending count
426 * changes from/to zero.
429 user = __task_cred(t)->user;
430 sigpending = atomic_inc_return(&user->sigpending);
435 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
436 q = kmem_cache_alloc(sigqueue_cachep, flags);
438 print_dropped_signal(sig);
441 if (unlikely(q == NULL)) {
442 if (atomic_dec_and_test(&user->sigpending))
445 INIT_LIST_HEAD(&q->list);
453 static void __sigqueue_free(struct sigqueue *q)
455 if (q->flags & SIGQUEUE_PREALLOC)
457 if (atomic_dec_and_test(&q->user->sigpending))
459 kmem_cache_free(sigqueue_cachep, q);
462 void flush_sigqueue(struct sigpending *queue)
466 sigemptyset(&queue->signal);
467 while (!list_empty(&queue->list)) {
468 q = list_entry(queue->list.next, struct sigqueue , list);
469 list_del_init(&q->list);
475 * Flush all pending signals for this kthread.
477 void flush_signals(struct task_struct *t)
481 spin_lock_irqsave(&t->sighand->siglock, flags);
482 clear_tsk_thread_flag(t, TIF_SIGPENDING);
483 flush_sigqueue(&t->pending);
484 flush_sigqueue(&t->signal->shared_pending);
485 spin_unlock_irqrestore(&t->sighand->siglock, flags);
487 EXPORT_SYMBOL(flush_signals);
489 #ifdef CONFIG_POSIX_TIMERS
490 static void __flush_itimer_signals(struct sigpending *pending)
492 sigset_t signal, retain;
493 struct sigqueue *q, *n;
495 signal = pending->signal;
496 sigemptyset(&retain);
498 list_for_each_entry_safe(q, n, &pending->list, list) {
499 int sig = q->info.si_signo;
501 if (likely(q->info.si_code != SI_TIMER)) {
502 sigaddset(&retain, sig);
504 sigdelset(&signal, sig);
505 list_del_init(&q->list);
510 sigorsets(&pending->signal, &signal, &retain);
513 void flush_itimer_signals(void)
515 struct task_struct *tsk = current;
518 spin_lock_irqsave(&tsk->sighand->siglock, flags);
519 __flush_itimer_signals(&tsk->pending);
520 __flush_itimer_signals(&tsk->signal->shared_pending);
521 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
525 void ignore_signals(struct task_struct *t)
529 for (i = 0; i < _NSIG; ++i)
530 t->sighand->action[i].sa.sa_handler = SIG_IGN;
536 * Flush all handlers for a task.
540 flush_signal_handlers(struct task_struct *t, int force_default)
543 struct k_sigaction *ka = &t->sighand->action[0];
544 for (i = _NSIG ; i != 0 ; i--) {
545 if (force_default || ka->sa.sa_handler != SIG_IGN)
546 ka->sa.sa_handler = SIG_DFL;
548 #ifdef __ARCH_HAS_SA_RESTORER
549 ka->sa.sa_restorer = NULL;
551 sigemptyset(&ka->sa.sa_mask);
556 bool unhandled_signal(struct task_struct *tsk, int sig)
558 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
559 if (is_global_init(tsk))
562 if (handler != SIG_IGN && handler != SIG_DFL)
565 /* if ptraced, let the tracer determine */
569 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
572 struct sigqueue *q, *first = NULL;
575 * Collect the siginfo appropriate to this signal. Check if
576 * there is another siginfo for the same signal.
578 list_for_each_entry(q, &list->list, list) {
579 if (q->info.si_signo == sig) {
586 sigdelset(&list->signal, sig);
590 list_del_init(&first->list);
591 copy_siginfo(info, &first->info);
594 (first->flags & SIGQUEUE_PREALLOC) &&
595 (info->si_code == SI_TIMER) &&
596 (info->si_sys_private);
598 __sigqueue_free(first);
601 * Ok, it wasn't in the queue. This must be
602 * a fast-pathed signal or we must have been
603 * out of queue space. So zero out the info.
606 info->si_signo = sig;
608 info->si_code = SI_USER;
614 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
615 kernel_siginfo_t *info, bool *resched_timer)
617 int sig = next_signal(pending, mask);
620 collect_signal(sig, pending, info, resched_timer);
625 * Dequeue a signal and return the element to the caller, which is
626 * expected to free it.
628 * All callers have to hold the siglock.
630 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
632 bool resched_timer = false;
635 /* We only dequeue private signals from ourselves, we don't let
636 * signalfd steal them
638 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
640 signr = __dequeue_signal(&tsk->signal->shared_pending,
641 mask, info, &resched_timer);
642 #ifdef CONFIG_POSIX_TIMERS
646 * itimers are process shared and we restart periodic
647 * itimers in the signal delivery path to prevent DoS
648 * attacks in the high resolution timer case. This is
649 * compliant with the old way of self-restarting
650 * itimers, as the SIGALRM is a legacy signal and only
651 * queued once. Changing the restart behaviour to
652 * restart the timer in the signal dequeue path is
653 * reducing the timer noise on heavy loaded !highres
656 if (unlikely(signr == SIGALRM)) {
657 struct hrtimer *tmr = &tsk->signal->real_timer;
659 if (!hrtimer_is_queued(tmr) &&
660 tsk->signal->it_real_incr != 0) {
661 hrtimer_forward(tmr, tmr->base->get_time(),
662 tsk->signal->it_real_incr);
663 hrtimer_restart(tmr);
673 if (unlikely(sig_kernel_stop(signr))) {
675 * Set a marker that we have dequeued a stop signal. Our
676 * caller might release the siglock and then the pending
677 * stop signal it is about to process is no longer in the
678 * pending bitmasks, but must still be cleared by a SIGCONT
679 * (and overruled by a SIGKILL). So those cases clear this
680 * shared flag after we've set it. Note that this flag may
681 * remain set after the signal we return is ignored or
682 * handled. That doesn't matter because its only purpose
683 * is to alert stop-signal processing code when another
684 * processor has come along and cleared the flag.
686 current->jobctl |= JOBCTL_STOP_DEQUEUED;
688 #ifdef CONFIG_POSIX_TIMERS
691 * Release the siglock to ensure proper locking order
692 * of timer locks outside of siglocks. Note, we leave
693 * irqs disabled here, since the posix-timers code is
694 * about to disable them again anyway.
696 spin_unlock(&tsk->sighand->siglock);
697 posixtimer_rearm(info);
698 spin_lock(&tsk->sighand->siglock);
700 /* Don't expose the si_sys_private value to userspace */
701 info->si_sys_private = 0;
706 EXPORT_SYMBOL_GPL(dequeue_signal);
708 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
710 struct task_struct *tsk = current;
711 struct sigpending *pending = &tsk->pending;
712 struct sigqueue *q, *sync = NULL;
715 * Might a synchronous signal be in the queue?
717 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
721 * Return the first synchronous signal in the queue.
723 list_for_each_entry(q, &pending->list, list) {
724 /* Synchronous signals have a positive si_code */
725 if ((q->info.si_code > SI_USER) &&
726 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
734 * Check if there is another siginfo for the same signal.
736 list_for_each_entry_continue(q, &pending->list, list) {
737 if (q->info.si_signo == sync->info.si_signo)
741 sigdelset(&pending->signal, sync->info.si_signo);
744 list_del_init(&sync->list);
745 copy_siginfo(info, &sync->info);
746 __sigqueue_free(sync);
747 return info->si_signo;
751 * Tell a process that it has a new active signal..
753 * NOTE! we rely on the previous spin_lock to
754 * lock interrupts for us! We can only be called with
755 * "siglock" held, and the local interrupt must
756 * have been disabled when that got acquired!
758 * No need to set need_resched since signal event passing
759 * goes through ->blocked
761 void signal_wake_up_state(struct task_struct *t, unsigned int state)
763 set_tsk_thread_flag(t, TIF_SIGPENDING);
765 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
766 * case. We don't check t->state here because there is a race with it
767 * executing another processor and just now entering stopped state.
768 * By using wake_up_state, we ensure the process will wake up and
769 * handle its death signal.
771 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
776 * Remove signals in mask from the pending set and queue.
777 * Returns 1 if any signals were found.
779 * All callers must be holding the siglock.
781 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
783 struct sigqueue *q, *n;
786 sigandsets(&m, mask, &s->signal);
787 if (sigisemptyset(&m))
790 sigandnsets(&s->signal, &s->signal, mask);
791 list_for_each_entry_safe(q, n, &s->list, list) {
792 if (sigismember(mask, q->info.si_signo)) {
793 list_del_init(&q->list);
799 static inline int is_si_special(const struct kernel_siginfo *info)
801 return info <= SEND_SIG_PRIV;
804 static inline bool si_fromuser(const struct kernel_siginfo *info)
806 return info == SEND_SIG_NOINFO ||
807 (!is_si_special(info) && SI_FROMUSER(info));
811 * called with RCU read lock from check_kill_permission()
813 static bool kill_ok_by_cred(struct task_struct *t)
815 const struct cred *cred = current_cred();
816 const struct cred *tcred = __task_cred(t);
818 return uid_eq(cred->euid, tcred->suid) ||
819 uid_eq(cred->euid, tcred->uid) ||
820 uid_eq(cred->uid, tcred->suid) ||
821 uid_eq(cred->uid, tcred->uid) ||
822 ns_capable(tcred->user_ns, CAP_KILL);
826 * Bad permissions for sending the signal
827 * - the caller must hold the RCU read lock
829 static int check_kill_permission(int sig, struct kernel_siginfo *info,
830 struct task_struct *t)
835 if (!valid_signal(sig))
838 if (!si_fromuser(info))
841 error = audit_signal_info(sig, t); /* Let audit system see the signal */
845 if (!same_thread_group(current, t) &&
846 !kill_ok_by_cred(t)) {
849 sid = task_session(t);
851 * We don't return the error if sid == NULL. The
852 * task was unhashed, the caller must notice this.
854 if (!sid || sid == task_session(current))
862 return security_task_kill(t, info, sig, NULL);
866 * ptrace_trap_notify - schedule trap to notify ptracer
867 * @t: tracee wanting to notify tracer
869 * This function schedules sticky ptrace trap which is cleared on the next
870 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
873 * If @t is running, STOP trap will be taken. If trapped for STOP and
874 * ptracer is listening for events, tracee is woken up so that it can
875 * re-trap for the new event. If trapped otherwise, STOP trap will be
876 * eventually taken without returning to userland after the existing traps
877 * are finished by PTRACE_CONT.
880 * Must be called with @task->sighand->siglock held.
882 static void ptrace_trap_notify(struct task_struct *t)
884 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
885 assert_spin_locked(&t->sighand->siglock);
887 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
888 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
892 * Handle magic process-wide effects of stop/continue signals. Unlike
893 * the signal actions, these happen immediately at signal-generation
894 * time regardless of blocking, ignoring, or handling. This does the
895 * actual continuing for SIGCONT, but not the actual stopping for stop
896 * signals. The process stop is done as a signal action for SIG_DFL.
898 * Returns true if the signal should be actually delivered, otherwise
899 * it should be dropped.
901 static bool prepare_signal(int sig, struct task_struct *p, bool force)
903 struct signal_struct *signal = p->signal;
904 struct task_struct *t;
907 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
908 if (!(signal->flags & SIGNAL_GROUP_EXIT))
909 return sig == SIGKILL;
911 * The process is in the middle of dying, nothing to do.
913 } else if (sig_kernel_stop(sig)) {
915 * This is a stop signal. Remove SIGCONT from all queues.
917 siginitset(&flush, sigmask(SIGCONT));
918 flush_sigqueue_mask(&flush, &signal->shared_pending);
919 for_each_thread(p, t)
920 flush_sigqueue_mask(&flush, &t->pending);
921 } else if (sig == SIGCONT) {
924 * Remove all stop signals from all queues, wake all threads.
926 siginitset(&flush, SIG_KERNEL_STOP_MASK);
927 flush_sigqueue_mask(&flush, &signal->shared_pending);
928 for_each_thread(p, t) {
929 flush_sigqueue_mask(&flush, &t->pending);
930 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
931 if (likely(!(t->ptrace & PT_SEIZED)))
932 wake_up_state(t, __TASK_STOPPED);
934 ptrace_trap_notify(t);
938 * Notify the parent with CLD_CONTINUED if we were stopped.
940 * If we were in the middle of a group stop, we pretend it
941 * was already finished, and then continued. Since SIGCHLD
942 * doesn't queue we report only CLD_STOPPED, as if the next
943 * CLD_CONTINUED was dropped.
946 if (signal->flags & SIGNAL_STOP_STOPPED)
947 why |= SIGNAL_CLD_CONTINUED;
948 else if (signal->group_stop_count)
949 why |= SIGNAL_CLD_STOPPED;
953 * The first thread which returns from do_signal_stop()
954 * will take ->siglock, notice SIGNAL_CLD_MASK, and
955 * notify its parent. See get_signal().
957 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
958 signal->group_stop_count = 0;
959 signal->group_exit_code = 0;
963 return !sig_ignored(p, sig, force);
967 * Test if P wants to take SIG. After we've checked all threads with this,
968 * it's equivalent to finding no threads not blocking SIG. Any threads not
969 * blocking SIG were ruled out because they are not running and already
970 * have pending signals. Such threads will dequeue from the shared queue
971 * as soon as they're available, so putting the signal on the shared queue
972 * will be equivalent to sending it to one such thread.
974 static inline bool wants_signal(int sig, struct task_struct *p)
976 if (sigismember(&p->blocked, sig))
979 if (p->flags & PF_EXITING)
985 if (task_is_stopped_or_traced(p))
988 return task_curr(p) || !task_sigpending(p);
991 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
993 struct signal_struct *signal = p->signal;
994 struct task_struct *t;
997 * Now find a thread we can wake up to take the signal off the queue.
999 * If the main thread wants the signal, it gets first crack.
1000 * Probably the least surprising to the average bear.
1002 if (wants_signal(sig, p))
1004 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1006 * There is just one thread and it does not need to be woken.
1007 * It will dequeue unblocked signals before it runs again.
1012 * Otherwise try to find a suitable thread.
1014 t = signal->curr_target;
1015 while (!wants_signal(sig, t)) {
1017 if (t == signal->curr_target)
1019 * No thread needs to be woken.
1020 * Any eligible threads will see
1021 * the signal in the queue soon.
1025 signal->curr_target = t;
1029 * Found a killable thread. If the signal will be fatal,
1030 * then start taking the whole group down immediately.
1032 if (sig_fatal(p, sig) &&
1033 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1034 !sigismember(&t->real_blocked, sig) &&
1035 (sig == SIGKILL || !p->ptrace)) {
1037 * This signal will be fatal to the whole group.
1039 if (!sig_kernel_coredump(sig)) {
1041 * Start a group exit and wake everybody up.
1042 * This way we don't have other threads
1043 * running and doing things after a slower
1044 * thread has the fatal signal pending.
1046 signal->flags = SIGNAL_GROUP_EXIT;
1047 signal->group_exit_code = sig;
1048 signal->group_stop_count = 0;
1051 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1052 sigaddset(&t->pending.signal, SIGKILL);
1053 signal_wake_up(t, 1);
1054 } while_each_thread(p, t);
1060 * The signal is already in the shared-pending queue.
1061 * Tell the chosen thread to wake up and dequeue it.
1063 signal_wake_up(t, sig == SIGKILL);
1067 static inline bool legacy_queue(struct sigpending *signals, int sig)
1069 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1072 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1073 enum pid_type type, bool force)
1075 struct sigpending *pending;
1077 int override_rlimit;
1078 int ret = 0, result;
1080 assert_spin_locked(&t->sighand->siglock);
1082 result = TRACE_SIGNAL_IGNORED;
1083 if (!prepare_signal(sig, t, force))
1086 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1088 * Short-circuit ignored signals and support queuing
1089 * exactly one non-rt signal, so that we can get more
1090 * detailed information about the cause of the signal.
1092 result = TRACE_SIGNAL_ALREADY_PENDING;
1093 if (legacy_queue(pending, sig))
1096 result = TRACE_SIGNAL_DELIVERED;
1098 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1100 if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER)))
1104 * Real-time signals must be queued if sent by sigqueue, or
1105 * some other real-time mechanism. It is implementation
1106 * defined whether kill() does so. We attempt to do so, on
1107 * the principle of least surprise, but since kill is not
1108 * allowed to fail with EAGAIN when low on memory we just
1109 * make sure at least one signal gets delivered and don't
1110 * pass on the info struct.
1113 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1115 override_rlimit = 0;
1117 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1143 copy_siginfo(&q->info, info);
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1149 * Queue overflow, abort. We may abort if the
1150 * signal was rt and sent by user using something
1151 * other than kill().
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1158 * This is a silent loss of information. We still
1159 * send the signal, but the *info bits are lost.
1161 result = TRACE_SIGNAL_LOSE_INFO;
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1168 /* Let multiprocess signals appear after on-going forks */
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173 /* Can't queue both a stop and a continue signal */
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1182 complete_signal(sig, t, type);
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_FAULT_MCEERR:
1201 case SIL_FAULT_BNDERR:
1202 case SIL_FAULT_PKUERR:
1210 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1213 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1216 if (info == SEND_SIG_NOINFO) {
1217 /* Force if sent from an ancestor pid namespace */
1218 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1219 } else if (info == SEND_SIG_PRIV) {
1220 /* Don't ignore kernel generated signals */
1222 } else if (has_si_pid_and_uid(info)) {
1223 /* SIGKILL and SIGSTOP is special or has ids */
1224 struct user_namespace *t_user_ns;
1227 t_user_ns = task_cred_xxx(t, user_ns);
1228 if (current_user_ns() != t_user_ns) {
1229 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1230 info->si_uid = from_kuid_munged(t_user_ns, uid);
1234 /* A kernel generated signal? */
1235 force = (info->si_code == SI_KERNEL);
1237 /* From an ancestor pid namespace? */
1238 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1243 return __send_signal(sig, info, t, type, force);
1246 static void print_fatal_signal(int signr)
1248 struct pt_regs *regs = signal_pt_regs();
1249 pr_info("potentially unexpected fatal signal %d.\n", signr);
1251 #if defined(__i386__) && !defined(__arch_um__)
1252 pr_info("code at %08lx: ", regs->ip);
1255 for (i = 0; i < 16; i++) {
1258 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1260 pr_cont("%02x ", insn);
1270 static int __init setup_print_fatal_signals(char *str)
1272 get_option (&str, &print_fatal_signals);
1277 __setup("print-fatal-signals=", setup_print_fatal_signals);
1280 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1282 return send_signal(sig, info, p, PIDTYPE_TGID);
1285 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1288 unsigned long flags;
1291 if (lock_task_sighand(p, &flags)) {
1292 ret = send_signal(sig, info, p, type);
1293 unlock_task_sighand(p, &flags);
1300 * Force a signal that the process can't ignore: if necessary
1301 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1303 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1304 * since we do not want to have a signal handler that was blocked
1305 * be invoked when user space had explicitly blocked it.
1307 * We don't want to have recursive SIGSEGV's etc, for example,
1308 * that is why we also clear SIGNAL_UNKILLABLE.
1311 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1313 unsigned long int flags;
1314 int ret, blocked, ignored;
1315 struct k_sigaction *action;
1316 int sig = info->si_signo;
1318 spin_lock_irqsave(&t->sighand->siglock, flags);
1319 action = &t->sighand->action[sig-1];
1320 ignored = action->sa.sa_handler == SIG_IGN;
1321 blocked = sigismember(&t->blocked, sig);
1322 if (blocked || ignored) {
1323 action->sa.sa_handler = SIG_DFL;
1325 sigdelset(&t->blocked, sig);
1326 recalc_sigpending_and_wake(t);
1330 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1331 * debugging to leave init killable.
1333 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1334 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1335 ret = send_signal(sig, info, t, PIDTYPE_PID);
1336 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1341 int force_sig_info(struct kernel_siginfo *info)
1343 return force_sig_info_to_task(info, current);
1347 * Nuke all other threads in the group.
1349 int zap_other_threads(struct task_struct *p)
1351 struct task_struct *t = p;
1354 p->signal->group_stop_count = 0;
1356 while_each_thread(p, t) {
1357 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1360 /* Don't bother with already dead threads */
1363 sigaddset(&t->pending.signal, SIGKILL);
1364 signal_wake_up(t, 1);
1370 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1371 unsigned long *flags)
1373 struct sighand_struct *sighand;
1377 sighand = rcu_dereference(tsk->sighand);
1378 if (unlikely(sighand == NULL))
1382 * This sighand can be already freed and even reused, but
1383 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1384 * initializes ->siglock: this slab can't go away, it has
1385 * the same object type, ->siglock can't be reinitialized.
1387 * We need to ensure that tsk->sighand is still the same
1388 * after we take the lock, we can race with de_thread() or
1389 * __exit_signal(). In the latter case the next iteration
1390 * must see ->sighand == NULL.
1392 spin_lock_irqsave(&sighand->siglock, *flags);
1393 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1395 spin_unlock_irqrestore(&sighand->siglock, *flags);
1403 * send signal info to all the members of a group
1405 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1406 struct task_struct *p, enum pid_type type)
1411 ret = check_kill_permission(sig, info, p);
1415 ret = do_send_sig_info(sig, info, p, type);
1421 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1422 * control characters do (^C, ^Z etc)
1423 * - the caller must hold at least a readlock on tasklist_lock
1425 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1427 struct task_struct *p = NULL;
1428 int retval, success;
1432 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1433 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1436 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1437 return success ? 0 : retval;
1440 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1443 struct task_struct *p;
1447 p = pid_task(pid, PIDTYPE_PID);
1449 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1451 if (likely(!p || error != -ESRCH))
1455 * The task was unhashed in between, try again. If it
1456 * is dead, pid_task() will return NULL, if we race with
1457 * de_thread() it will find the new leader.
1462 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1466 error = kill_pid_info(sig, info, find_vpid(pid));
1471 static inline bool kill_as_cred_perm(const struct cred *cred,
1472 struct task_struct *target)
1474 const struct cred *pcred = __task_cred(target);
1476 return uid_eq(cred->euid, pcred->suid) ||
1477 uid_eq(cred->euid, pcred->uid) ||
1478 uid_eq(cred->uid, pcred->suid) ||
1479 uid_eq(cred->uid, pcred->uid);
1483 * The usb asyncio usage of siginfo is wrong. The glibc support
1484 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1485 * AKA after the generic fields:
1486 * kernel_pid_t si_pid;
1487 * kernel_uid32_t si_uid;
1488 * sigval_t si_value;
1490 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1491 * after the generic fields is:
1492 * void __user *si_addr;
1494 * This is a practical problem when there is a 64bit big endian kernel
1495 * and a 32bit userspace. As the 32bit address will encoded in the low
1496 * 32bits of the pointer. Those low 32bits will be stored at higher
1497 * address than appear in a 32 bit pointer. So userspace will not
1498 * see the address it was expecting for it's completions.
1500 * There is nothing in the encoding that can allow
1501 * copy_siginfo_to_user32 to detect this confusion of formats, so
1502 * handle this by requiring the caller of kill_pid_usb_asyncio to
1503 * notice when this situration takes place and to store the 32bit
1504 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1507 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1508 struct pid *pid, const struct cred *cred)
1510 struct kernel_siginfo info;
1511 struct task_struct *p;
1512 unsigned long flags;
1515 if (!valid_signal(sig))
1518 clear_siginfo(&info);
1519 info.si_signo = sig;
1520 info.si_errno = errno;
1521 info.si_code = SI_ASYNCIO;
1522 *((sigval_t *)&info.si_pid) = addr;
1525 p = pid_task(pid, PIDTYPE_PID);
1530 if (!kill_as_cred_perm(cred, p)) {
1534 ret = security_task_kill(p, &info, sig, cred);
1539 if (lock_task_sighand(p, &flags)) {
1540 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1541 unlock_task_sighand(p, &flags);
1549 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1552 * kill_something_info() interprets pid in interesting ways just like kill(2).
1554 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1555 * is probably wrong. Should make it like BSD or SYSV.
1558 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1563 return kill_proc_info(sig, info, pid);
1565 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1569 read_lock(&tasklist_lock);
1571 ret = __kill_pgrp_info(sig, info,
1572 pid ? find_vpid(-pid) : task_pgrp(current));
1574 int retval = 0, count = 0;
1575 struct task_struct * p;
1577 for_each_process(p) {
1578 if (task_pid_vnr(p) > 1 &&
1579 !same_thread_group(p, current)) {
1580 int err = group_send_sig_info(sig, info, p,
1587 ret = count ? retval : -ESRCH;
1589 read_unlock(&tasklist_lock);
1595 * These are for backward compatibility with the rest of the kernel source.
1598 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1601 * Make sure legacy kernel users don't send in bad values
1602 * (normal paths check this in check_kill_permission).
1604 if (!valid_signal(sig))
1607 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1609 EXPORT_SYMBOL(send_sig_info);
1611 #define __si_special(priv) \
1612 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1615 send_sig(int sig, struct task_struct *p, int priv)
1617 return send_sig_info(sig, __si_special(priv), p);
1619 EXPORT_SYMBOL(send_sig);
1621 void force_sig(int sig)
1623 struct kernel_siginfo info;
1625 clear_siginfo(&info);
1626 info.si_signo = sig;
1628 info.si_code = SI_KERNEL;
1631 force_sig_info(&info);
1633 EXPORT_SYMBOL(force_sig);
1636 * When things go south during signal handling, we
1637 * will force a SIGSEGV. And if the signal that caused
1638 * the problem was already a SIGSEGV, we'll want to
1639 * make sure we don't even try to deliver the signal..
1641 void force_sigsegv(int sig)
1643 struct task_struct *p = current;
1645 if (sig == SIGSEGV) {
1646 unsigned long flags;
1647 spin_lock_irqsave(&p->sighand->siglock, flags);
1648 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1649 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1654 int force_sig_fault_to_task(int sig, int code, void __user *addr
1655 ___ARCH_SI_TRAPNO(int trapno)
1656 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1657 , struct task_struct *t)
1659 struct kernel_siginfo info;
1661 clear_siginfo(&info);
1662 info.si_signo = sig;
1664 info.si_code = code;
1665 info.si_addr = addr;
1666 #ifdef __ARCH_SI_TRAPNO
1667 info.si_trapno = trapno;
1671 info.si_flags = flags;
1674 return force_sig_info_to_task(&info, t);
1677 int force_sig_fault(int sig, int code, void __user *addr
1678 ___ARCH_SI_TRAPNO(int trapno)
1679 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1681 return force_sig_fault_to_task(sig, code, addr
1682 ___ARCH_SI_TRAPNO(trapno)
1683 ___ARCH_SI_IA64(imm, flags, isr), current);
1686 int send_sig_fault(int sig, int code, void __user *addr
1687 ___ARCH_SI_TRAPNO(int trapno)
1688 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1689 , struct task_struct *t)
1691 struct kernel_siginfo info;
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1696 info.si_code = code;
1697 info.si_addr = addr;
1698 #ifdef __ARCH_SI_TRAPNO
1699 info.si_trapno = trapno;
1703 info.si_flags = flags;
1706 return send_sig_info(info.si_signo, &info, t);
1709 int force_sig_mceerr(int code, void __user *addr, short lsb)
1711 struct kernel_siginfo info;
1713 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1714 clear_siginfo(&info);
1715 info.si_signo = SIGBUS;
1717 info.si_code = code;
1718 info.si_addr = addr;
1719 info.si_addr_lsb = lsb;
1720 return force_sig_info(&info);
1723 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1725 struct kernel_siginfo info;
1727 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1728 clear_siginfo(&info);
1729 info.si_signo = SIGBUS;
1731 info.si_code = code;
1732 info.si_addr = addr;
1733 info.si_addr_lsb = lsb;
1734 return send_sig_info(info.si_signo, &info, t);
1736 EXPORT_SYMBOL(send_sig_mceerr);
1738 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1740 struct kernel_siginfo info;
1742 clear_siginfo(&info);
1743 info.si_signo = SIGSEGV;
1745 info.si_code = SEGV_BNDERR;
1746 info.si_addr = addr;
1747 info.si_lower = lower;
1748 info.si_upper = upper;
1749 return force_sig_info(&info);
1753 int force_sig_pkuerr(void __user *addr, u32 pkey)
1755 struct kernel_siginfo info;
1757 clear_siginfo(&info);
1758 info.si_signo = SIGSEGV;
1760 info.si_code = SEGV_PKUERR;
1761 info.si_addr = addr;
1762 info.si_pkey = pkey;
1763 return force_sig_info(&info);
1767 /* For the crazy architectures that include trap information in
1768 * the errno field, instead of an actual errno value.
1770 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1772 struct kernel_siginfo info;
1774 clear_siginfo(&info);
1775 info.si_signo = SIGTRAP;
1776 info.si_errno = errno;
1777 info.si_code = TRAP_HWBKPT;
1778 info.si_addr = addr;
1779 return force_sig_info(&info);
1782 int kill_pgrp(struct pid *pid, int sig, int priv)
1786 read_lock(&tasklist_lock);
1787 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1788 read_unlock(&tasklist_lock);
1792 EXPORT_SYMBOL(kill_pgrp);
1794 int kill_pid(struct pid *pid, int sig, int priv)
1796 return kill_pid_info(sig, __si_special(priv), pid);
1798 EXPORT_SYMBOL(kill_pid);
1801 * These functions support sending signals using preallocated sigqueue
1802 * structures. This is needed "because realtime applications cannot
1803 * afford to lose notifications of asynchronous events, like timer
1804 * expirations or I/O completions". In the case of POSIX Timers
1805 * we allocate the sigqueue structure from the timer_create. If this
1806 * allocation fails we are able to report the failure to the application
1807 * with an EAGAIN error.
1809 struct sigqueue *sigqueue_alloc(void)
1811 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1814 q->flags |= SIGQUEUE_PREALLOC;
1819 void sigqueue_free(struct sigqueue *q)
1821 unsigned long flags;
1822 spinlock_t *lock = ¤t->sighand->siglock;
1824 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1826 * We must hold ->siglock while testing q->list
1827 * to serialize with collect_signal() or with
1828 * __exit_signal()->flush_sigqueue().
1830 spin_lock_irqsave(lock, flags);
1831 q->flags &= ~SIGQUEUE_PREALLOC;
1833 * If it is queued it will be freed when dequeued,
1834 * like the "regular" sigqueue.
1836 if (!list_empty(&q->list))
1838 spin_unlock_irqrestore(lock, flags);
1844 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1846 int sig = q->info.si_signo;
1847 struct sigpending *pending;
1848 struct task_struct *t;
1849 unsigned long flags;
1852 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1856 t = pid_task(pid, type);
1857 if (!t || !likely(lock_task_sighand(t, &flags)))
1860 ret = 1; /* the signal is ignored */
1861 result = TRACE_SIGNAL_IGNORED;
1862 if (!prepare_signal(sig, t, false))
1866 if (unlikely(!list_empty(&q->list))) {
1868 * If an SI_TIMER entry is already queue just increment
1869 * the overrun count.
1871 BUG_ON(q->info.si_code != SI_TIMER);
1872 q->info.si_overrun++;
1873 result = TRACE_SIGNAL_ALREADY_PENDING;
1876 q->info.si_overrun = 0;
1878 signalfd_notify(t, sig);
1879 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1880 list_add_tail(&q->list, &pending->list);
1881 sigaddset(&pending->signal, sig);
1882 complete_signal(sig, t, type);
1883 result = TRACE_SIGNAL_DELIVERED;
1885 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1886 unlock_task_sighand(t, &flags);
1892 static void do_notify_pidfd(struct task_struct *task)
1896 WARN_ON(task->exit_state == 0);
1897 pid = task_pid(task);
1898 wake_up_all(&pid->wait_pidfd);
1902 * Let a parent know about the death of a child.
1903 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1905 * Returns true if our parent ignored us and so we've switched to
1908 bool do_notify_parent(struct task_struct *tsk, int sig)
1910 struct kernel_siginfo info;
1911 unsigned long flags;
1912 struct sighand_struct *psig;
1913 bool autoreap = false;
1918 /* do_notify_parent_cldstop should have been called instead. */
1919 BUG_ON(task_is_stopped_or_traced(tsk));
1921 BUG_ON(!tsk->ptrace &&
1922 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1924 /* Wake up all pidfd waiters */
1925 do_notify_pidfd(tsk);
1927 if (sig != SIGCHLD) {
1929 * This is only possible if parent == real_parent.
1930 * Check if it has changed security domain.
1932 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1936 clear_siginfo(&info);
1937 info.si_signo = sig;
1940 * We are under tasklist_lock here so our parent is tied to
1941 * us and cannot change.
1943 * task_active_pid_ns will always return the same pid namespace
1944 * until a task passes through release_task.
1946 * write_lock() currently calls preempt_disable() which is the
1947 * same as rcu_read_lock(), but according to Oleg, this is not
1948 * correct to rely on this
1951 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1952 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1956 task_cputime(tsk, &utime, &stime);
1957 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1958 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1960 info.si_status = tsk->exit_code & 0x7f;
1961 if (tsk->exit_code & 0x80)
1962 info.si_code = CLD_DUMPED;
1963 else if (tsk->exit_code & 0x7f)
1964 info.si_code = CLD_KILLED;
1966 info.si_code = CLD_EXITED;
1967 info.si_status = tsk->exit_code >> 8;
1970 psig = tsk->parent->sighand;
1971 spin_lock_irqsave(&psig->siglock, flags);
1972 if (!tsk->ptrace && sig == SIGCHLD &&
1973 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1974 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1976 * We are exiting and our parent doesn't care. POSIX.1
1977 * defines special semantics for setting SIGCHLD to SIG_IGN
1978 * or setting the SA_NOCLDWAIT flag: we should be reaped
1979 * automatically and not left for our parent's wait4 call.
1980 * Rather than having the parent do it as a magic kind of
1981 * signal handler, we just set this to tell do_exit that we
1982 * can be cleaned up without becoming a zombie. Note that
1983 * we still call __wake_up_parent in this case, because a
1984 * blocked sys_wait4 might now return -ECHILD.
1986 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1987 * is implementation-defined: we do (if you don't want
1988 * it, just use SIG_IGN instead).
1991 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1995 * Send with __send_signal as si_pid and si_uid are in the
1996 * parent's namespaces.
1998 if (valid_signal(sig) && sig)
1999 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2000 __wake_up_parent(tsk, tsk->parent);
2001 spin_unlock_irqrestore(&psig->siglock, flags);
2007 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2008 * @tsk: task reporting the state change
2009 * @for_ptracer: the notification is for ptracer
2010 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2012 * Notify @tsk's parent that the stopped/continued state has changed. If
2013 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2014 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2017 * Must be called with tasklist_lock at least read locked.
2019 static void do_notify_parent_cldstop(struct task_struct *tsk,
2020 bool for_ptracer, int why)
2022 struct kernel_siginfo info;
2023 unsigned long flags;
2024 struct task_struct *parent;
2025 struct sighand_struct *sighand;
2029 parent = tsk->parent;
2031 tsk = tsk->group_leader;
2032 parent = tsk->real_parent;
2035 clear_siginfo(&info);
2036 info.si_signo = SIGCHLD;
2039 * see comment in do_notify_parent() about the following 4 lines
2042 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2043 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2046 task_cputime(tsk, &utime, &stime);
2047 info.si_utime = nsec_to_clock_t(utime);
2048 info.si_stime = nsec_to_clock_t(stime);
2053 info.si_status = SIGCONT;
2056 info.si_status = tsk->signal->group_exit_code & 0x7f;
2059 info.si_status = tsk->exit_code & 0x7f;
2065 sighand = parent->sighand;
2066 spin_lock_irqsave(&sighand->siglock, flags);
2067 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2068 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2069 __group_send_sig_info(SIGCHLD, &info, parent);
2071 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2073 __wake_up_parent(tsk, parent);
2074 spin_unlock_irqrestore(&sighand->siglock, flags);
2077 static inline bool may_ptrace_stop(void)
2079 if (!likely(current->ptrace))
2082 * Are we in the middle of do_coredump?
2083 * If so and our tracer is also part of the coredump stopping
2084 * is a deadlock situation, and pointless because our tracer
2085 * is dead so don't allow us to stop.
2086 * If SIGKILL was already sent before the caller unlocked
2087 * ->siglock we must see ->core_state != NULL. Otherwise it
2088 * is safe to enter schedule().
2090 * This is almost outdated, a task with the pending SIGKILL can't
2091 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2092 * after SIGKILL was already dequeued.
2094 if (unlikely(current->mm->core_state) &&
2095 unlikely(current->mm == current->parent->mm))
2102 * Return non-zero if there is a SIGKILL that should be waking us up.
2103 * Called with the siglock held.
2105 static bool sigkill_pending(struct task_struct *tsk)
2107 return sigismember(&tsk->pending.signal, SIGKILL) ||
2108 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2112 * This must be called with current->sighand->siglock held.
2114 * This should be the path for all ptrace stops.
2115 * We always set current->last_siginfo while stopped here.
2116 * That makes it a way to test a stopped process for
2117 * being ptrace-stopped vs being job-control-stopped.
2119 * If we actually decide not to stop at all because the tracer
2120 * is gone, we keep current->exit_code unless clear_code.
2122 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2123 __releases(¤t->sighand->siglock)
2124 __acquires(¤t->sighand->siglock)
2126 bool gstop_done = false;
2128 if (arch_ptrace_stop_needed(exit_code, info)) {
2130 * The arch code has something special to do before a
2131 * ptrace stop. This is allowed to block, e.g. for faults
2132 * on user stack pages. We can't keep the siglock while
2133 * calling arch_ptrace_stop, so we must release it now.
2134 * To preserve proper semantics, we must do this before
2135 * any signal bookkeeping like checking group_stop_count.
2136 * Meanwhile, a SIGKILL could come in before we retake the
2137 * siglock. That must prevent us from sleeping in TASK_TRACED.
2138 * So after regaining the lock, we must check for SIGKILL.
2140 spin_unlock_irq(¤t->sighand->siglock);
2141 arch_ptrace_stop(exit_code, info);
2142 spin_lock_irq(¤t->sighand->siglock);
2143 if (sigkill_pending(current))
2147 set_special_state(TASK_TRACED);
2150 * We're committing to trapping. TRACED should be visible before
2151 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2152 * Also, transition to TRACED and updates to ->jobctl should be
2153 * atomic with respect to siglock and should be done after the arch
2154 * hook as siglock is released and regrabbed across it.
2159 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2161 * set_current_state() smp_wmb();
2163 * wait_task_stopped()
2164 * task_stopped_code()
2165 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2169 current->last_siginfo = info;
2170 current->exit_code = exit_code;
2173 * If @why is CLD_STOPPED, we're trapping to participate in a group
2174 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2175 * across siglock relocks since INTERRUPT was scheduled, PENDING
2176 * could be clear now. We act as if SIGCONT is received after
2177 * TASK_TRACED is entered - ignore it.
2179 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2180 gstop_done = task_participate_group_stop(current);
2182 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2183 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2184 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2185 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2187 /* entering a trap, clear TRAPPING */
2188 task_clear_jobctl_trapping(current);
2190 spin_unlock_irq(¤t->sighand->siglock);
2191 read_lock(&tasklist_lock);
2192 if (may_ptrace_stop()) {
2194 * Notify parents of the stop.
2196 * While ptraced, there are two parents - the ptracer and
2197 * the real_parent of the group_leader. The ptracer should
2198 * know about every stop while the real parent is only
2199 * interested in the completion of group stop. The states
2200 * for the two don't interact with each other. Notify
2201 * separately unless they're gonna be duplicates.
2203 do_notify_parent_cldstop(current, true, why);
2204 if (gstop_done && ptrace_reparented(current))
2205 do_notify_parent_cldstop(current, false, why);
2208 * Don't want to allow preemption here, because
2209 * sys_ptrace() needs this task to be inactive.
2211 * XXX: implement read_unlock_no_resched().
2214 read_unlock(&tasklist_lock);
2215 cgroup_enter_frozen();
2216 preempt_enable_no_resched();
2217 freezable_schedule();
2218 cgroup_leave_frozen(true);
2221 * By the time we got the lock, our tracer went away.
2222 * Don't drop the lock yet, another tracer may come.
2224 * If @gstop_done, the ptracer went away between group stop
2225 * completion and here. During detach, it would have set
2226 * JOBCTL_STOP_PENDING on us and we'll re-enter
2227 * TASK_STOPPED in do_signal_stop() on return, so notifying
2228 * the real parent of the group stop completion is enough.
2231 do_notify_parent_cldstop(current, false, why);
2233 /* tasklist protects us from ptrace_freeze_traced() */
2234 __set_current_state(TASK_RUNNING);
2236 current->exit_code = 0;
2237 read_unlock(&tasklist_lock);
2241 * We are back. Now reacquire the siglock before touching
2242 * last_siginfo, so that we are sure to have synchronized with
2243 * any signal-sending on another CPU that wants to examine it.
2245 spin_lock_irq(¤t->sighand->siglock);
2246 current->last_siginfo = NULL;
2248 /* LISTENING can be set only during STOP traps, clear it */
2249 current->jobctl &= ~JOBCTL_LISTENING;
2252 * Queued signals ignored us while we were stopped for tracing.
2253 * So check for any that we should take before resuming user mode.
2254 * This sets TIF_SIGPENDING, but never clears it.
2256 recalc_sigpending_tsk(current);
2259 static void ptrace_do_notify(int signr, int exit_code, int why)
2261 kernel_siginfo_t info;
2263 clear_siginfo(&info);
2264 info.si_signo = signr;
2265 info.si_code = exit_code;
2266 info.si_pid = task_pid_vnr(current);
2267 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2269 /* Let the debugger run. */
2270 ptrace_stop(exit_code, why, 1, &info);
2273 void ptrace_notify(int exit_code)
2275 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2276 if (unlikely(current->task_works))
2279 spin_lock_irq(¤t->sighand->siglock);
2280 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2281 spin_unlock_irq(¤t->sighand->siglock);
2285 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2286 * @signr: signr causing group stop if initiating
2288 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2289 * and participate in it. If already set, participate in the existing
2290 * group stop. If participated in a group stop (and thus slept), %true is
2291 * returned with siglock released.
2293 * If ptraced, this function doesn't handle stop itself. Instead,
2294 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2295 * untouched. The caller must ensure that INTERRUPT trap handling takes
2296 * places afterwards.
2299 * Must be called with @current->sighand->siglock held, which is released
2303 * %false if group stop is already cancelled or ptrace trap is scheduled.
2304 * %true if participated in group stop.
2306 static bool do_signal_stop(int signr)
2307 __releases(¤t->sighand->siglock)
2309 struct signal_struct *sig = current->signal;
2311 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2312 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2313 struct task_struct *t;
2315 /* signr will be recorded in task->jobctl for retries */
2316 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2318 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2319 unlikely(signal_group_exit(sig)))
2322 * There is no group stop already in progress. We must
2325 * While ptraced, a task may be resumed while group stop is
2326 * still in effect and then receive a stop signal and
2327 * initiate another group stop. This deviates from the
2328 * usual behavior as two consecutive stop signals can't
2329 * cause two group stops when !ptraced. That is why we
2330 * also check !task_is_stopped(t) below.
2332 * The condition can be distinguished by testing whether
2333 * SIGNAL_STOP_STOPPED is already set. Don't generate
2334 * group_exit_code in such case.
2336 * This is not necessary for SIGNAL_STOP_CONTINUED because
2337 * an intervening stop signal is required to cause two
2338 * continued events regardless of ptrace.
2340 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2341 sig->group_exit_code = signr;
2343 sig->group_stop_count = 0;
2345 if (task_set_jobctl_pending(current, signr | gstop))
2346 sig->group_stop_count++;
2349 while_each_thread(current, t) {
2351 * Setting state to TASK_STOPPED for a group
2352 * stop is always done with the siglock held,
2353 * so this check has no races.
2355 if (!task_is_stopped(t) &&
2356 task_set_jobctl_pending(t, signr | gstop)) {
2357 sig->group_stop_count++;
2358 if (likely(!(t->ptrace & PT_SEIZED)))
2359 signal_wake_up(t, 0);
2361 ptrace_trap_notify(t);
2366 if (likely(!current->ptrace)) {
2370 * If there are no other threads in the group, or if there
2371 * is a group stop in progress and we are the last to stop,
2372 * report to the parent.
2374 if (task_participate_group_stop(current))
2375 notify = CLD_STOPPED;
2377 set_special_state(TASK_STOPPED);
2378 spin_unlock_irq(¤t->sighand->siglock);
2381 * Notify the parent of the group stop completion. Because
2382 * we're not holding either the siglock or tasklist_lock
2383 * here, ptracer may attach inbetween; however, this is for
2384 * group stop and should always be delivered to the real
2385 * parent of the group leader. The new ptracer will get
2386 * its notification when this task transitions into
2390 read_lock(&tasklist_lock);
2391 do_notify_parent_cldstop(current, false, notify);
2392 read_unlock(&tasklist_lock);
2395 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2396 cgroup_enter_frozen();
2397 freezable_schedule();
2401 * While ptraced, group stop is handled by STOP trap.
2402 * Schedule it and let the caller deal with it.
2404 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2410 * do_jobctl_trap - take care of ptrace jobctl traps
2412 * When PT_SEIZED, it's used for both group stop and explicit
2413 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2414 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2415 * the stop signal; otherwise, %SIGTRAP.
2417 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2418 * number as exit_code and no siginfo.
2421 * Must be called with @current->sighand->siglock held, which may be
2422 * released and re-acquired before returning with intervening sleep.
2424 static void do_jobctl_trap(void)
2426 struct signal_struct *signal = current->signal;
2427 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2429 if (current->ptrace & PT_SEIZED) {
2430 if (!signal->group_stop_count &&
2431 !(signal->flags & SIGNAL_STOP_STOPPED))
2433 WARN_ON_ONCE(!signr);
2434 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2437 WARN_ON_ONCE(!signr);
2438 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2439 current->exit_code = 0;
2444 * do_freezer_trap - handle the freezer jobctl trap
2446 * Puts the task into frozen state, if only the task is not about to quit.
2447 * In this case it drops JOBCTL_TRAP_FREEZE.
2450 * Must be called with @current->sighand->siglock held,
2451 * which is always released before returning.
2453 static void do_freezer_trap(void)
2454 __releases(¤t->sighand->siglock)
2457 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2458 * let's make another loop to give it a chance to be handled.
2459 * In any case, we'll return back.
2461 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2462 JOBCTL_TRAP_FREEZE) {
2463 spin_unlock_irq(¤t->sighand->siglock);
2468 * Now we're sure that there is no pending fatal signal and no
2469 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2470 * immediately (if there is a non-fatal signal pending), and
2471 * put the task into sleep.
2473 __set_current_state(TASK_INTERRUPTIBLE);
2474 clear_thread_flag(TIF_SIGPENDING);
2475 spin_unlock_irq(¤t->sighand->siglock);
2476 cgroup_enter_frozen();
2477 freezable_schedule();
2480 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2483 * We do not check sig_kernel_stop(signr) but set this marker
2484 * unconditionally because we do not know whether debugger will
2485 * change signr. This flag has no meaning unless we are going
2486 * to stop after return from ptrace_stop(). In this case it will
2487 * be checked in do_signal_stop(), we should only stop if it was
2488 * not cleared by SIGCONT while we were sleeping. See also the
2489 * comment in dequeue_signal().
2491 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2492 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2494 /* We're back. Did the debugger cancel the sig? */
2495 signr = current->exit_code;
2499 current->exit_code = 0;
2502 * Update the siginfo structure if the signal has
2503 * changed. If the debugger wanted something
2504 * specific in the siginfo structure then it should
2505 * have updated *info via PTRACE_SETSIGINFO.
2507 if (signr != info->si_signo) {
2508 clear_siginfo(info);
2509 info->si_signo = signr;
2511 info->si_code = SI_USER;
2513 info->si_pid = task_pid_vnr(current->parent);
2514 info->si_uid = from_kuid_munged(current_user_ns(),
2515 task_uid(current->parent));
2519 /* If the (new) signal is now blocked, requeue it. */
2520 if (sigismember(¤t->blocked, signr)) {
2521 send_signal(signr, info, current, PIDTYPE_PID);
2528 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2530 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2532 case SIL_FAULT_MCEERR:
2533 case SIL_FAULT_BNDERR:
2534 case SIL_FAULT_PKUERR:
2535 ksig->info.si_addr = arch_untagged_si_addr(
2536 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2548 bool get_signal(struct ksignal *ksig)
2550 struct sighand_struct *sighand = current->sighand;
2551 struct signal_struct *signal = current->signal;
2554 if (unlikely(current->task_works))
2558 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2559 * that the arch handlers don't all have to do it. If we get here
2560 * without TIF_SIGPENDING, just exit after running signal work.
2562 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2563 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2564 tracehook_notify_signal();
2565 if (!task_sigpending(current))
2569 if (unlikely(uprobe_deny_signal()))
2573 * Do this once, we can't return to user-mode if freezing() == T.
2574 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2575 * thus do not need another check after return.
2580 spin_lock_irq(&sighand->siglock);
2583 * Every stopped thread goes here after wakeup. Check to see if
2584 * we should notify the parent, prepare_signal(SIGCONT) encodes
2585 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2587 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2590 if (signal->flags & SIGNAL_CLD_CONTINUED)
2591 why = CLD_CONTINUED;
2595 signal->flags &= ~SIGNAL_CLD_MASK;
2597 spin_unlock_irq(&sighand->siglock);
2600 * Notify the parent that we're continuing. This event is
2601 * always per-process and doesn't make whole lot of sense
2602 * for ptracers, who shouldn't consume the state via
2603 * wait(2) either, but, for backward compatibility, notify
2604 * the ptracer of the group leader too unless it's gonna be
2607 read_lock(&tasklist_lock);
2608 do_notify_parent_cldstop(current, false, why);
2610 if (ptrace_reparented(current->group_leader))
2611 do_notify_parent_cldstop(current->group_leader,
2613 read_unlock(&tasklist_lock);
2618 /* Has this task already been marked for death? */
2619 if (signal_group_exit(signal)) {
2620 ksig->info.si_signo = signr = SIGKILL;
2621 sigdelset(¤t->pending.signal, SIGKILL);
2622 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2623 &sighand->action[SIGKILL - 1]);
2624 recalc_sigpending();
2629 struct k_sigaction *ka;
2631 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2635 if (unlikely(current->jobctl &
2636 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2637 if (current->jobctl & JOBCTL_TRAP_MASK) {
2639 spin_unlock_irq(&sighand->siglock);
2640 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2647 * If the task is leaving the frozen state, let's update
2648 * cgroup counters and reset the frozen bit.
2650 if (unlikely(cgroup_task_frozen(current))) {
2651 spin_unlock_irq(&sighand->siglock);
2652 cgroup_leave_frozen(false);
2657 * Signals generated by the execution of an instruction
2658 * need to be delivered before any other pending signals
2659 * so that the instruction pointer in the signal stack
2660 * frame points to the faulting instruction.
2662 signr = dequeue_synchronous_signal(&ksig->info);
2664 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2667 break; /* will return 0 */
2669 if (unlikely(current->ptrace) && signr != SIGKILL) {
2670 signr = ptrace_signal(signr, &ksig->info);
2675 ka = &sighand->action[signr-1];
2677 /* Trace actually delivered signals. */
2678 trace_signal_deliver(signr, &ksig->info, ka);
2680 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2682 if (ka->sa.sa_handler != SIG_DFL) {
2683 /* Run the handler. */
2686 if (ka->sa.sa_flags & SA_ONESHOT)
2687 ka->sa.sa_handler = SIG_DFL;
2689 break; /* will return non-zero "signr" value */
2693 * Now we are doing the default action for this signal.
2695 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2699 * Global init gets no signals it doesn't want.
2700 * Container-init gets no signals it doesn't want from same
2703 * Note that if global/container-init sees a sig_kernel_only()
2704 * signal here, the signal must have been generated internally
2705 * or must have come from an ancestor namespace. In either
2706 * case, the signal cannot be dropped.
2708 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2709 !sig_kernel_only(signr))
2712 if (sig_kernel_stop(signr)) {
2714 * The default action is to stop all threads in
2715 * the thread group. The job control signals
2716 * do nothing in an orphaned pgrp, but SIGSTOP
2717 * always works. Note that siglock needs to be
2718 * dropped during the call to is_orphaned_pgrp()
2719 * because of lock ordering with tasklist_lock.
2720 * This allows an intervening SIGCONT to be posted.
2721 * We need to check for that and bail out if necessary.
2723 if (signr != SIGSTOP) {
2724 spin_unlock_irq(&sighand->siglock);
2726 /* signals can be posted during this window */
2728 if (is_current_pgrp_orphaned())
2731 spin_lock_irq(&sighand->siglock);
2734 if (likely(do_signal_stop(ksig->info.si_signo))) {
2735 /* It released the siglock. */
2740 * We didn't actually stop, due to a race
2741 * with SIGCONT or something like that.
2747 spin_unlock_irq(&sighand->siglock);
2748 if (unlikely(cgroup_task_frozen(current)))
2749 cgroup_leave_frozen(true);
2752 * Anything else is fatal, maybe with a core dump.
2754 current->flags |= PF_SIGNALED;
2756 if (sig_kernel_coredump(signr)) {
2757 if (print_fatal_signals)
2758 print_fatal_signal(ksig->info.si_signo);
2759 proc_coredump_connector(current);
2761 * If it was able to dump core, this kills all
2762 * other threads in the group and synchronizes with
2763 * their demise. If we lost the race with another
2764 * thread getting here, it set group_exit_code
2765 * first and our do_group_exit call below will use
2766 * that value and ignore the one we pass it.
2768 do_coredump(&ksig->info);
2772 * PF_IO_WORKER threads will catch and exit on fatal signals
2773 * themselves. They have cleanup that must be performed, so
2774 * we cannot call do_exit() on their behalf.
2776 if (current->flags & PF_IO_WORKER)
2780 * Death signals, no core dump.
2782 do_group_exit(ksig->info.si_signo);
2785 spin_unlock_irq(&sighand->siglock);
2789 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2790 hide_si_addr_tag_bits(ksig);
2792 return ksig->sig > 0;
2796 * signal_delivered -
2797 * @ksig: kernel signal struct
2798 * @stepping: nonzero if debugger single-step or block-step in use
2800 * This function should be called when a signal has successfully been
2801 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2802 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2803 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2805 static void signal_delivered(struct ksignal *ksig, int stepping)
2809 /* A signal was successfully delivered, and the
2810 saved sigmask was stored on the signal frame,
2811 and will be restored by sigreturn. So we can
2812 simply clear the restore sigmask flag. */
2813 clear_restore_sigmask();
2815 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2816 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2817 sigaddset(&blocked, ksig->sig);
2818 set_current_blocked(&blocked);
2819 tracehook_signal_handler(stepping);
2822 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2825 force_sigsegv(ksig->sig);
2827 signal_delivered(ksig, stepping);
2831 * It could be that complete_signal() picked us to notify about the
2832 * group-wide signal. Other threads should be notified now to take
2833 * the shared signals in @which since we will not.
2835 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2838 struct task_struct *t;
2840 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2841 if (sigisemptyset(&retarget))
2845 while_each_thread(tsk, t) {
2846 if (t->flags & PF_EXITING)
2849 if (!has_pending_signals(&retarget, &t->blocked))
2851 /* Remove the signals this thread can handle. */
2852 sigandsets(&retarget, &retarget, &t->blocked);
2854 if (!task_sigpending(t))
2855 signal_wake_up(t, 0);
2857 if (sigisemptyset(&retarget))
2862 void exit_signals(struct task_struct *tsk)
2868 * @tsk is about to have PF_EXITING set - lock out users which
2869 * expect stable threadgroup.
2871 cgroup_threadgroup_change_begin(tsk);
2873 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2874 tsk->flags |= PF_EXITING;
2875 cgroup_threadgroup_change_end(tsk);
2879 spin_lock_irq(&tsk->sighand->siglock);
2881 * From now this task is not visible for group-wide signals,
2882 * see wants_signal(), do_signal_stop().
2884 tsk->flags |= PF_EXITING;
2886 cgroup_threadgroup_change_end(tsk);
2888 if (!task_sigpending(tsk))
2891 unblocked = tsk->blocked;
2892 signotset(&unblocked);
2893 retarget_shared_pending(tsk, &unblocked);
2895 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2896 task_participate_group_stop(tsk))
2897 group_stop = CLD_STOPPED;
2899 spin_unlock_irq(&tsk->sighand->siglock);
2902 * If group stop has completed, deliver the notification. This
2903 * should always go to the real parent of the group leader.
2905 if (unlikely(group_stop)) {
2906 read_lock(&tasklist_lock);
2907 do_notify_parent_cldstop(tsk, false, group_stop);
2908 read_unlock(&tasklist_lock);
2913 * System call entry points.
2917 * sys_restart_syscall - restart a system call
2919 SYSCALL_DEFINE0(restart_syscall)
2921 struct restart_block *restart = ¤t->restart_block;
2922 return restart->fn(restart);
2925 long do_no_restart_syscall(struct restart_block *param)
2930 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2932 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2933 sigset_t newblocked;
2934 /* A set of now blocked but previously unblocked signals. */
2935 sigandnsets(&newblocked, newset, ¤t->blocked);
2936 retarget_shared_pending(tsk, &newblocked);
2938 tsk->blocked = *newset;
2939 recalc_sigpending();
2943 * set_current_blocked - change current->blocked mask
2946 * It is wrong to change ->blocked directly, this helper should be used
2947 * to ensure the process can't miss a shared signal we are going to block.
2949 void set_current_blocked(sigset_t *newset)
2951 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2952 __set_current_blocked(newset);
2955 void __set_current_blocked(const sigset_t *newset)
2957 struct task_struct *tsk = current;
2960 * In case the signal mask hasn't changed, there is nothing we need
2961 * to do. The current->blocked shouldn't be modified by other task.
2963 if (sigequalsets(&tsk->blocked, newset))
2966 spin_lock_irq(&tsk->sighand->siglock);
2967 __set_task_blocked(tsk, newset);
2968 spin_unlock_irq(&tsk->sighand->siglock);
2972 * This is also useful for kernel threads that want to temporarily
2973 * (or permanently) block certain signals.
2975 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2976 * interface happily blocks "unblockable" signals like SIGKILL
2979 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2981 struct task_struct *tsk = current;
2984 /* Lockless, only current can change ->blocked, never from irq */
2986 *oldset = tsk->blocked;
2990 sigorsets(&newset, &tsk->blocked, set);
2993 sigandnsets(&newset, &tsk->blocked, set);
3002 __set_current_blocked(&newset);
3005 EXPORT_SYMBOL(sigprocmask);
3008 * The api helps set app-provided sigmasks.
3010 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3011 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3013 * Note that it does set_restore_sigmask() in advance, so it must be always
3014 * paired with restore_saved_sigmask_unless() before return from syscall.
3016 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3022 if (sigsetsize != sizeof(sigset_t))
3024 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3027 set_restore_sigmask();
3028 current->saved_sigmask = current->blocked;
3029 set_current_blocked(&kmask);
3034 #ifdef CONFIG_COMPAT
3035 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3042 if (sigsetsize != sizeof(compat_sigset_t))
3044 if (get_compat_sigset(&kmask, umask))
3047 set_restore_sigmask();
3048 current->saved_sigmask = current->blocked;
3049 set_current_blocked(&kmask);
3056 * sys_rt_sigprocmask - change the list of currently blocked signals
3057 * @how: whether to add, remove, or set signals
3058 * @nset: stores pending signals
3059 * @oset: previous value of signal mask if non-null
3060 * @sigsetsize: size of sigset_t type
3062 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3063 sigset_t __user *, oset, size_t, sigsetsize)
3065 sigset_t old_set, new_set;
3068 /* XXX: Don't preclude handling different sized sigset_t's. */
3069 if (sigsetsize != sizeof(sigset_t))
3072 old_set = current->blocked;
3075 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3077 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3079 error = sigprocmask(how, &new_set, NULL);
3085 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3092 #ifdef CONFIG_COMPAT
3093 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3094 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3096 sigset_t old_set = current->blocked;
3098 /* XXX: Don't preclude handling different sized sigset_t's. */
3099 if (sigsetsize != sizeof(sigset_t))
3105 if (get_compat_sigset(&new_set, nset))
3107 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3109 error = sigprocmask(how, &new_set, NULL);
3113 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3117 static void do_sigpending(sigset_t *set)
3119 spin_lock_irq(¤t->sighand->siglock);
3120 sigorsets(set, ¤t->pending.signal,
3121 ¤t->signal->shared_pending.signal);
3122 spin_unlock_irq(¤t->sighand->siglock);
3124 /* Outside the lock because only this thread touches it. */
3125 sigandsets(set, ¤t->blocked, set);
3129 * sys_rt_sigpending - examine a pending signal that has been raised
3131 * @uset: stores pending signals
3132 * @sigsetsize: size of sigset_t type or larger
3134 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3138 if (sigsetsize > sizeof(*uset))
3141 do_sigpending(&set);
3143 if (copy_to_user(uset, &set, sigsetsize))
3149 #ifdef CONFIG_COMPAT
3150 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3151 compat_size_t, sigsetsize)
3155 if (sigsetsize > sizeof(*uset))
3158 do_sigpending(&set);
3160 return put_compat_sigset(uset, &set, sigsetsize);
3164 static const struct {
3165 unsigned char limit, layout;
3167 [SIGILL] = { NSIGILL, SIL_FAULT },
3168 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3169 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3170 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3171 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3173 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3175 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3176 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3177 [SIGSYS] = { NSIGSYS, SIL_SYS },
3180 static bool known_siginfo_layout(unsigned sig, int si_code)
3182 if (si_code == SI_KERNEL)
3184 else if ((si_code > SI_USER)) {
3185 if (sig_specific_sicodes(sig)) {
3186 if (si_code <= sig_sicodes[sig].limit)
3189 else if (si_code <= NSIGPOLL)
3192 else if (si_code >= SI_DETHREAD)
3194 else if (si_code == SI_ASYNCNL)
3199 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3201 enum siginfo_layout layout = SIL_KILL;
3202 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3203 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3204 (si_code <= sig_sicodes[sig].limit)) {
3205 layout = sig_sicodes[sig].layout;
3206 /* Handle the exceptions */
3207 if ((sig == SIGBUS) &&
3208 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3209 layout = SIL_FAULT_MCEERR;
3210 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3211 layout = SIL_FAULT_BNDERR;
3213 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3214 layout = SIL_FAULT_PKUERR;
3217 else if (si_code <= NSIGPOLL)
3220 if (si_code == SI_TIMER)
3222 else if (si_code == SI_SIGIO)
3224 else if (si_code < 0)
3230 static inline char __user *si_expansion(const siginfo_t __user *info)
3232 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3235 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3237 char __user *expansion = si_expansion(to);
3238 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3240 if (clear_user(expansion, SI_EXPANSION_SIZE))
3245 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3246 const siginfo_t __user *from)
3248 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3249 char __user *expansion = si_expansion(from);
3250 char buf[SI_EXPANSION_SIZE];
3253 * An unknown si_code might need more than
3254 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3255 * extra bytes are 0. This guarantees copy_siginfo_to_user
3256 * will return this data to userspace exactly.
3258 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3260 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3268 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3269 const siginfo_t __user *from)
3271 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3273 to->si_signo = signo;
3274 return post_copy_siginfo_from_user(to, from);
3277 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3279 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3281 return post_copy_siginfo_from_user(to, from);
3284 #ifdef CONFIG_COMPAT
3286 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3287 * @to: compat siginfo destination
3288 * @from: kernel siginfo source
3290 * Note: This function does not work properly for the SIGCHLD on x32, but
3291 * fortunately it doesn't have to. The only valid callers for this function are
3292 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3293 * The latter does not care because SIGCHLD will never cause a coredump.
3295 void copy_siginfo_to_external32(struct compat_siginfo *to,
3296 const struct kernel_siginfo *from)
3298 memset(to, 0, sizeof(*to));
3300 to->si_signo = from->si_signo;
3301 to->si_errno = from->si_errno;
3302 to->si_code = from->si_code;
3303 switch(siginfo_layout(from->si_signo, from->si_code)) {
3305 to->si_pid = from->si_pid;
3306 to->si_uid = from->si_uid;
3309 to->si_tid = from->si_tid;
3310 to->si_overrun = from->si_overrun;
3311 to->si_int = from->si_int;
3314 to->si_band = from->si_band;
3315 to->si_fd = from->si_fd;
3318 to->si_addr = ptr_to_compat(from->si_addr);
3319 #ifdef __ARCH_SI_TRAPNO
3320 to->si_trapno = from->si_trapno;
3323 case SIL_FAULT_MCEERR:
3324 to->si_addr = ptr_to_compat(from->si_addr);
3325 #ifdef __ARCH_SI_TRAPNO
3326 to->si_trapno = from->si_trapno;
3328 to->si_addr_lsb = from->si_addr_lsb;
3330 case SIL_FAULT_BNDERR:
3331 to->si_addr = ptr_to_compat(from->si_addr);
3332 #ifdef __ARCH_SI_TRAPNO
3333 to->si_trapno = from->si_trapno;
3335 to->si_lower = ptr_to_compat(from->si_lower);
3336 to->si_upper = ptr_to_compat(from->si_upper);
3338 case SIL_FAULT_PKUERR:
3339 to->si_addr = ptr_to_compat(from->si_addr);
3340 #ifdef __ARCH_SI_TRAPNO
3341 to->si_trapno = from->si_trapno;
3343 to->si_pkey = from->si_pkey;
3346 to->si_pid = from->si_pid;
3347 to->si_uid = from->si_uid;
3348 to->si_status = from->si_status;
3349 to->si_utime = from->si_utime;
3350 to->si_stime = from->si_stime;
3353 to->si_pid = from->si_pid;
3354 to->si_uid = from->si_uid;
3355 to->si_int = from->si_int;
3358 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3359 to->si_syscall = from->si_syscall;
3360 to->si_arch = from->si_arch;
3365 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3366 const struct kernel_siginfo *from)
3368 struct compat_siginfo new;
3370 copy_siginfo_to_external32(&new, from);
3371 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3376 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3377 const struct compat_siginfo *from)
3380 to->si_signo = from->si_signo;
3381 to->si_errno = from->si_errno;
3382 to->si_code = from->si_code;
3383 switch(siginfo_layout(from->si_signo, from->si_code)) {
3385 to->si_pid = from->si_pid;
3386 to->si_uid = from->si_uid;
3389 to->si_tid = from->si_tid;
3390 to->si_overrun = from->si_overrun;
3391 to->si_int = from->si_int;
3394 to->si_band = from->si_band;
3395 to->si_fd = from->si_fd;
3398 to->si_addr = compat_ptr(from->si_addr);
3399 #ifdef __ARCH_SI_TRAPNO
3400 to->si_trapno = from->si_trapno;
3403 case SIL_FAULT_MCEERR:
3404 to->si_addr = compat_ptr(from->si_addr);
3405 #ifdef __ARCH_SI_TRAPNO
3406 to->si_trapno = from->si_trapno;
3408 to->si_addr_lsb = from->si_addr_lsb;
3410 case SIL_FAULT_BNDERR:
3411 to->si_addr = compat_ptr(from->si_addr);
3412 #ifdef __ARCH_SI_TRAPNO
3413 to->si_trapno = from->si_trapno;
3415 to->si_lower = compat_ptr(from->si_lower);
3416 to->si_upper = compat_ptr(from->si_upper);
3418 case SIL_FAULT_PKUERR:
3419 to->si_addr = compat_ptr(from->si_addr);
3420 #ifdef __ARCH_SI_TRAPNO
3421 to->si_trapno = from->si_trapno;
3423 to->si_pkey = from->si_pkey;
3426 to->si_pid = from->si_pid;
3427 to->si_uid = from->si_uid;
3428 to->si_status = from->si_status;
3429 #ifdef CONFIG_X86_X32_ABI
3430 if (in_x32_syscall()) {
3431 to->si_utime = from->_sifields._sigchld_x32._utime;
3432 to->si_stime = from->_sifields._sigchld_x32._stime;
3436 to->si_utime = from->si_utime;
3437 to->si_stime = from->si_stime;
3441 to->si_pid = from->si_pid;
3442 to->si_uid = from->si_uid;
3443 to->si_int = from->si_int;
3446 to->si_call_addr = compat_ptr(from->si_call_addr);
3447 to->si_syscall = from->si_syscall;
3448 to->si_arch = from->si_arch;
3454 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3455 const struct compat_siginfo __user *ufrom)
3457 struct compat_siginfo from;
3459 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3462 from.si_signo = signo;
3463 return post_copy_siginfo_from_user32(to, &from);
3466 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3467 const struct compat_siginfo __user *ufrom)
3469 struct compat_siginfo from;
3471 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3474 return post_copy_siginfo_from_user32(to, &from);
3476 #endif /* CONFIG_COMPAT */
3479 * do_sigtimedwait - wait for queued signals specified in @which
3480 * @which: queued signals to wait for
3481 * @info: if non-null, the signal's siginfo is returned here
3482 * @ts: upper bound on process time suspension
3484 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3485 const struct timespec64 *ts)
3487 ktime_t *to = NULL, timeout = KTIME_MAX;
3488 struct task_struct *tsk = current;
3489 sigset_t mask = *which;
3493 if (!timespec64_valid(ts))
3495 timeout = timespec64_to_ktime(*ts);
3500 * Invert the set of allowed signals to get those we want to block.
3502 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3505 spin_lock_irq(&tsk->sighand->siglock);
3506 sig = dequeue_signal(tsk, &mask, info);
3507 if (!sig && timeout) {
3509 * None ready, temporarily unblock those we're interested
3510 * while we are sleeping in so that we'll be awakened when
3511 * they arrive. Unblocking is always fine, we can avoid
3512 * set_current_blocked().
3514 tsk->real_blocked = tsk->blocked;
3515 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3516 recalc_sigpending();
3517 spin_unlock_irq(&tsk->sighand->siglock);
3519 __set_current_state(TASK_INTERRUPTIBLE);
3520 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3522 spin_lock_irq(&tsk->sighand->siglock);
3523 __set_task_blocked(tsk, &tsk->real_blocked);
3524 sigemptyset(&tsk->real_blocked);
3525 sig = dequeue_signal(tsk, &mask, info);
3527 spin_unlock_irq(&tsk->sighand->siglock);
3531 return ret ? -EINTR : -EAGAIN;
3535 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3537 * @uthese: queued signals to wait for
3538 * @uinfo: if non-null, the signal's siginfo is returned here
3539 * @uts: upper bound on process time suspension
3540 * @sigsetsize: size of sigset_t type
3542 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3543 siginfo_t __user *, uinfo,
3544 const struct __kernel_timespec __user *, uts,
3548 struct timespec64 ts;
3549 kernel_siginfo_t info;
3552 /* XXX: Don't preclude handling different sized sigset_t's. */
3553 if (sigsetsize != sizeof(sigset_t))
3556 if (copy_from_user(&these, uthese, sizeof(these)))
3560 if (get_timespec64(&ts, uts))
3564 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3566 if (ret > 0 && uinfo) {
3567 if (copy_siginfo_to_user(uinfo, &info))
3574 #ifdef CONFIG_COMPAT_32BIT_TIME
3575 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3576 siginfo_t __user *, uinfo,
3577 const struct old_timespec32 __user *, uts,
3581 struct timespec64 ts;
3582 kernel_siginfo_t info;
3585 if (sigsetsize != sizeof(sigset_t))
3588 if (copy_from_user(&these, uthese, sizeof(these)))
3592 if (get_old_timespec32(&ts, uts))
3596 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3598 if (ret > 0 && uinfo) {
3599 if (copy_siginfo_to_user(uinfo, &info))
3607 #ifdef CONFIG_COMPAT
3608 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3609 struct compat_siginfo __user *, uinfo,
3610 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3613 struct timespec64 t;
3614 kernel_siginfo_t info;
3617 if (sigsetsize != sizeof(sigset_t))
3620 if (get_compat_sigset(&s, uthese))
3624 if (get_timespec64(&t, uts))
3628 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3630 if (ret > 0 && uinfo) {
3631 if (copy_siginfo_to_user32(uinfo, &info))
3638 #ifdef CONFIG_COMPAT_32BIT_TIME
3639 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3640 struct compat_siginfo __user *, uinfo,
3641 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3644 struct timespec64 t;
3645 kernel_siginfo_t info;
3648 if (sigsetsize != sizeof(sigset_t))
3651 if (get_compat_sigset(&s, uthese))
3655 if (get_old_timespec32(&t, uts))
3659 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3661 if (ret > 0 && uinfo) {
3662 if (copy_siginfo_to_user32(uinfo, &info))
3671 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3673 clear_siginfo(info);
3674 info->si_signo = sig;
3676 info->si_code = SI_USER;
3677 info->si_pid = task_tgid_vnr(current);
3678 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3682 * sys_kill - send a signal to a process
3683 * @pid: the PID of the process
3684 * @sig: signal to be sent
3686 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3688 struct kernel_siginfo info;
3690 prepare_kill_siginfo(sig, &info);
3692 return kill_something_info(sig, &info, pid);
3696 * Verify that the signaler and signalee either are in the same pid namespace
3697 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3700 static bool access_pidfd_pidns(struct pid *pid)
3702 struct pid_namespace *active = task_active_pid_ns(current);
3703 struct pid_namespace *p = ns_of_pid(pid);
3716 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3717 siginfo_t __user *info)
3719 #ifdef CONFIG_COMPAT
3721 * Avoid hooking up compat syscalls and instead handle necessary
3722 * conversions here. Note, this is a stop-gap measure and should not be
3723 * considered a generic solution.
3725 if (in_compat_syscall())
3726 return copy_siginfo_from_user32(
3727 kinfo, (struct compat_siginfo __user *)info);
3729 return copy_siginfo_from_user(kinfo, info);
3732 static struct pid *pidfd_to_pid(const struct file *file)
3736 pid = pidfd_pid(file);
3740 return tgid_pidfd_to_pid(file);
3744 * sys_pidfd_send_signal - Signal a process through a pidfd
3745 * @pidfd: file descriptor of the process
3746 * @sig: signal to send
3747 * @info: signal info
3748 * @flags: future flags
3750 * The syscall currently only signals via PIDTYPE_PID which covers
3751 * kill(<positive-pid>, <signal>. It does not signal threads or process
3753 * In order to extend the syscall to threads and process groups the @flags
3754 * argument should be used. In essence, the @flags argument will determine
3755 * what is signaled and not the file descriptor itself. Put in other words,
3756 * grouping is a property of the flags argument not a property of the file
3759 * Return: 0 on success, negative errno on failure
3761 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3762 siginfo_t __user *, info, unsigned int, flags)
3767 kernel_siginfo_t kinfo;
3769 /* Enforce flags be set to 0 until we add an extension. */
3777 /* Is this a pidfd? */
3778 pid = pidfd_to_pid(f.file);
3785 if (!access_pidfd_pidns(pid))
3789 ret = copy_siginfo_from_user_any(&kinfo, info);
3794 if (unlikely(sig != kinfo.si_signo))
3797 /* Only allow sending arbitrary signals to yourself. */
3799 if ((task_pid(current) != pid) &&
3800 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3803 prepare_kill_siginfo(sig, &kinfo);
3806 ret = kill_pid_info(sig, &kinfo, pid);
3814 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3816 struct task_struct *p;
3820 p = find_task_by_vpid(pid);
3821 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3822 error = check_kill_permission(sig, info, p);
3824 * The null signal is a permissions and process existence
3825 * probe. No signal is actually delivered.
3827 if (!error && sig) {
3828 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3830 * If lock_task_sighand() failed we pretend the task
3831 * dies after receiving the signal. The window is tiny,
3832 * and the signal is private anyway.
3834 if (unlikely(error == -ESRCH))
3843 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3845 struct kernel_siginfo info;
3847 clear_siginfo(&info);
3848 info.si_signo = sig;
3850 info.si_code = SI_TKILL;
3851 info.si_pid = task_tgid_vnr(current);
3852 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3854 return do_send_specific(tgid, pid, sig, &info);
3858 * sys_tgkill - send signal to one specific thread
3859 * @tgid: the thread group ID of the thread
3860 * @pid: the PID of the thread
3861 * @sig: signal to be sent
3863 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3864 * exists but it's not belonging to the target process anymore. This
3865 * method solves the problem of threads exiting and PIDs getting reused.
3867 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3869 /* This is only valid for single tasks */
3870 if (pid <= 0 || tgid <= 0)
3873 return do_tkill(tgid, pid, sig);
3877 * sys_tkill - send signal to one specific task
3878 * @pid: the PID of the task
3879 * @sig: signal to be sent
3881 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3883 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3885 /* This is only valid for single tasks */
3889 return do_tkill(0, pid, sig);
3892 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3894 /* Not even root can pretend to send signals from the kernel.
3895 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3897 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3898 (task_pid_vnr(current) != pid))
3901 /* POSIX.1b doesn't mention process groups. */
3902 return kill_proc_info(sig, info, pid);
3906 * sys_rt_sigqueueinfo - send signal information to a signal
3907 * @pid: the PID of the thread
3908 * @sig: signal to be sent
3909 * @uinfo: signal info to be sent
3911 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3912 siginfo_t __user *, uinfo)
3914 kernel_siginfo_t info;
3915 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3918 return do_rt_sigqueueinfo(pid, sig, &info);
3921 #ifdef CONFIG_COMPAT
3922 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3925 struct compat_siginfo __user *, uinfo)
3927 kernel_siginfo_t info;
3928 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3931 return do_rt_sigqueueinfo(pid, sig, &info);
3935 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3937 /* This is only valid for single tasks */
3938 if (pid <= 0 || tgid <= 0)
3941 /* Not even root can pretend to send signals from the kernel.
3942 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3944 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3945 (task_pid_vnr(current) != pid))
3948 return do_send_specific(tgid, pid, sig, info);
3951 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3952 siginfo_t __user *, uinfo)
3954 kernel_siginfo_t info;
3955 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3958 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3961 #ifdef CONFIG_COMPAT
3962 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3966 struct compat_siginfo __user *, uinfo)
3968 kernel_siginfo_t info;
3969 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3972 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3977 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3979 void kernel_sigaction(int sig, __sighandler_t action)
3981 spin_lock_irq(¤t->sighand->siglock);
3982 current->sighand->action[sig - 1].sa.sa_handler = action;
3983 if (action == SIG_IGN) {
3987 sigaddset(&mask, sig);
3989 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3990 flush_sigqueue_mask(&mask, ¤t->pending);
3991 recalc_sigpending();
3993 spin_unlock_irq(¤t->sighand->siglock);
3995 EXPORT_SYMBOL(kernel_sigaction);
3997 void __weak sigaction_compat_abi(struct k_sigaction *act,
3998 struct k_sigaction *oact)
4002 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4004 struct task_struct *p = current, *t;
4005 struct k_sigaction *k;
4008 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4011 k = &p->sighand->action[sig-1];
4013 spin_lock_irq(&p->sighand->siglock);
4018 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4019 * e.g. by having an architecture use the bit in their uapi.
4021 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4024 * Clear unknown flag bits in order to allow userspace to detect missing
4025 * support for flag bits and to allow the kernel to use non-uapi bits
4029 act->sa.sa_flags &= UAPI_SA_FLAGS;
4031 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4033 sigaction_compat_abi(act, oact);
4036 sigdelsetmask(&act->sa.sa_mask,
4037 sigmask(SIGKILL) | sigmask(SIGSTOP));
4041 * "Setting a signal action to SIG_IGN for a signal that is
4042 * pending shall cause the pending signal to be discarded,
4043 * whether or not it is blocked."
4045 * "Setting a signal action to SIG_DFL for a signal that is
4046 * pending and whose default action is to ignore the signal
4047 * (for example, SIGCHLD), shall cause the pending signal to
4048 * be discarded, whether or not it is blocked"
4050 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4052 sigaddset(&mask, sig);
4053 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4054 for_each_thread(p, t)
4055 flush_sigqueue_mask(&mask, &t->pending);
4059 spin_unlock_irq(&p->sighand->siglock);
4064 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4067 struct task_struct *t = current;
4070 memset(oss, 0, sizeof(stack_t));
4071 oss->ss_sp = (void __user *) t->sas_ss_sp;
4072 oss->ss_size = t->sas_ss_size;
4073 oss->ss_flags = sas_ss_flags(sp) |
4074 (current->sas_ss_flags & SS_FLAG_BITS);
4078 void __user *ss_sp = ss->ss_sp;
4079 size_t ss_size = ss->ss_size;
4080 unsigned ss_flags = ss->ss_flags;
4083 if (unlikely(on_sig_stack(sp)))
4086 ss_mode = ss_flags & ~SS_FLAG_BITS;
4087 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4091 if (ss_mode == SS_DISABLE) {
4095 if (unlikely(ss_size < min_ss_size))
4099 t->sas_ss_sp = (unsigned long) ss_sp;
4100 t->sas_ss_size = ss_size;
4101 t->sas_ss_flags = ss_flags;
4106 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4110 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4112 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4113 current_user_stack_pointer(),
4115 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4120 int restore_altstack(const stack_t __user *uss)
4123 if (copy_from_user(&new, uss, sizeof(stack_t)))
4125 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4127 /* squash all but EFAULT for now */
4131 int __save_altstack(stack_t __user *uss, unsigned long sp)
4133 struct task_struct *t = current;
4134 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4135 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4136 __put_user(t->sas_ss_size, &uss->ss_size);
4139 if (t->sas_ss_flags & SS_AUTODISARM)
4144 #ifdef CONFIG_COMPAT
4145 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4146 compat_stack_t __user *uoss_ptr)
4152 compat_stack_t uss32;
4153 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4155 uss.ss_sp = compat_ptr(uss32.ss_sp);
4156 uss.ss_flags = uss32.ss_flags;
4157 uss.ss_size = uss32.ss_size;
4159 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4160 compat_user_stack_pointer(),
4161 COMPAT_MINSIGSTKSZ);
4162 if (ret >= 0 && uoss_ptr) {
4164 memset(&old, 0, sizeof(old));
4165 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4166 old.ss_flags = uoss.ss_flags;
4167 old.ss_size = uoss.ss_size;
4168 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4174 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4175 const compat_stack_t __user *, uss_ptr,
4176 compat_stack_t __user *, uoss_ptr)
4178 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4181 int compat_restore_altstack(const compat_stack_t __user *uss)
4183 int err = do_compat_sigaltstack(uss, NULL);
4184 /* squash all but -EFAULT for now */
4185 return err == -EFAULT ? err : 0;
4188 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4191 struct task_struct *t = current;
4192 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4194 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4195 __put_user(t->sas_ss_size, &uss->ss_size);
4198 if (t->sas_ss_flags & SS_AUTODISARM)
4204 #ifdef __ARCH_WANT_SYS_SIGPENDING
4207 * sys_sigpending - examine pending signals
4208 * @uset: where mask of pending signal is returned
4210 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4214 if (sizeof(old_sigset_t) > sizeof(*uset))
4217 do_sigpending(&set);
4219 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4225 #ifdef CONFIG_COMPAT
4226 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4230 do_sigpending(&set);
4232 return put_user(set.sig[0], set32);
4238 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4240 * sys_sigprocmask - examine and change blocked signals
4241 * @how: whether to add, remove, or set signals
4242 * @nset: signals to add or remove (if non-null)
4243 * @oset: previous value of signal mask if non-null
4245 * Some platforms have their own version with special arguments;
4246 * others support only sys_rt_sigprocmask.
4249 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4250 old_sigset_t __user *, oset)
4252 old_sigset_t old_set, new_set;
4253 sigset_t new_blocked;
4255 old_set = current->blocked.sig[0];
4258 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4261 new_blocked = current->blocked;
4265 sigaddsetmask(&new_blocked, new_set);
4268 sigdelsetmask(&new_blocked, new_set);
4271 new_blocked.sig[0] = new_set;
4277 set_current_blocked(&new_blocked);
4281 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4287 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4289 #ifndef CONFIG_ODD_RT_SIGACTION
4291 * sys_rt_sigaction - alter an action taken by a process
4292 * @sig: signal to be sent
4293 * @act: new sigaction
4294 * @oact: used to save the previous sigaction
4295 * @sigsetsize: size of sigset_t type
4297 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4298 const struct sigaction __user *, act,
4299 struct sigaction __user *, oact,
4302 struct k_sigaction new_sa, old_sa;
4305 /* XXX: Don't preclude handling different sized sigset_t's. */
4306 if (sigsetsize != sizeof(sigset_t))
4309 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4312 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4316 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4321 #ifdef CONFIG_COMPAT
4322 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4323 const struct compat_sigaction __user *, act,
4324 struct compat_sigaction __user *, oact,
4325 compat_size_t, sigsetsize)
4327 struct k_sigaction new_ka, old_ka;
4328 #ifdef __ARCH_HAS_SA_RESTORER
4329 compat_uptr_t restorer;
4333 /* XXX: Don't preclude handling different sized sigset_t's. */
4334 if (sigsetsize != sizeof(compat_sigset_t))
4338 compat_uptr_t handler;
4339 ret = get_user(handler, &act->sa_handler);
4340 new_ka.sa.sa_handler = compat_ptr(handler);
4341 #ifdef __ARCH_HAS_SA_RESTORER
4342 ret |= get_user(restorer, &act->sa_restorer);
4343 new_ka.sa.sa_restorer = compat_ptr(restorer);
4345 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4346 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4351 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4353 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4355 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4356 sizeof(oact->sa_mask));
4357 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4358 #ifdef __ARCH_HAS_SA_RESTORER
4359 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4360 &oact->sa_restorer);
4366 #endif /* !CONFIG_ODD_RT_SIGACTION */
4368 #ifdef CONFIG_OLD_SIGACTION
4369 SYSCALL_DEFINE3(sigaction, int, sig,
4370 const struct old_sigaction __user *, act,
4371 struct old_sigaction __user *, oact)
4373 struct k_sigaction new_ka, old_ka;
4378 if (!access_ok(act, sizeof(*act)) ||
4379 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4380 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4381 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4382 __get_user(mask, &act->sa_mask))
4384 #ifdef __ARCH_HAS_KA_RESTORER
4385 new_ka.ka_restorer = NULL;
4387 siginitset(&new_ka.sa.sa_mask, mask);
4390 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4393 if (!access_ok(oact, sizeof(*oact)) ||
4394 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4395 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4396 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4397 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4404 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4405 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4406 const struct compat_old_sigaction __user *, act,
4407 struct compat_old_sigaction __user *, oact)
4409 struct k_sigaction new_ka, old_ka;
4411 compat_old_sigset_t mask;
4412 compat_uptr_t handler, restorer;
4415 if (!access_ok(act, sizeof(*act)) ||
4416 __get_user(handler, &act->sa_handler) ||
4417 __get_user(restorer, &act->sa_restorer) ||
4418 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4419 __get_user(mask, &act->sa_mask))
4422 #ifdef __ARCH_HAS_KA_RESTORER
4423 new_ka.ka_restorer = NULL;
4425 new_ka.sa.sa_handler = compat_ptr(handler);
4426 new_ka.sa.sa_restorer = compat_ptr(restorer);
4427 siginitset(&new_ka.sa.sa_mask, mask);
4430 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4433 if (!access_ok(oact, sizeof(*oact)) ||
4434 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4435 &oact->sa_handler) ||
4436 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4437 &oact->sa_restorer) ||
4438 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4439 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4446 #ifdef CONFIG_SGETMASK_SYSCALL
4449 * For backwards compatibility. Functionality superseded by sigprocmask.
4451 SYSCALL_DEFINE0(sgetmask)
4454 return current->blocked.sig[0];
4457 SYSCALL_DEFINE1(ssetmask, int, newmask)
4459 int old = current->blocked.sig[0];
4462 siginitset(&newset, newmask);
4463 set_current_blocked(&newset);
4467 #endif /* CONFIG_SGETMASK_SYSCALL */
4469 #ifdef __ARCH_WANT_SYS_SIGNAL
4471 * For backwards compatibility. Functionality superseded by sigaction.
4473 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4475 struct k_sigaction new_sa, old_sa;
4478 new_sa.sa.sa_handler = handler;
4479 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4480 sigemptyset(&new_sa.sa.sa_mask);
4482 ret = do_sigaction(sig, &new_sa, &old_sa);
4484 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4486 #endif /* __ARCH_WANT_SYS_SIGNAL */
4488 #ifdef __ARCH_WANT_SYS_PAUSE
4490 SYSCALL_DEFINE0(pause)
4492 while (!signal_pending(current)) {
4493 __set_current_state(TASK_INTERRUPTIBLE);
4496 return -ERESTARTNOHAND;
4501 static int sigsuspend(sigset_t *set)
4503 current->saved_sigmask = current->blocked;
4504 set_current_blocked(set);
4506 while (!signal_pending(current)) {
4507 __set_current_state(TASK_INTERRUPTIBLE);
4510 set_restore_sigmask();
4511 return -ERESTARTNOHAND;
4515 * sys_rt_sigsuspend - replace the signal mask for a value with the
4516 * @unewset value until a signal is received
4517 * @unewset: new signal mask value
4518 * @sigsetsize: size of sigset_t type
4520 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4524 /* XXX: Don't preclude handling different sized sigset_t's. */
4525 if (sigsetsize != sizeof(sigset_t))
4528 if (copy_from_user(&newset, unewset, sizeof(newset)))
4530 return sigsuspend(&newset);
4533 #ifdef CONFIG_COMPAT
4534 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4538 /* XXX: Don't preclude handling different sized sigset_t's. */
4539 if (sigsetsize != sizeof(sigset_t))
4542 if (get_compat_sigset(&newset, unewset))
4544 return sigsuspend(&newset);
4548 #ifdef CONFIG_OLD_SIGSUSPEND
4549 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4552 siginitset(&blocked, mask);
4553 return sigsuspend(&blocked);
4556 #ifdef CONFIG_OLD_SIGSUSPEND3
4557 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4560 siginitset(&blocked, mask);
4561 return sigsuspend(&blocked);
4565 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4570 static inline void siginfo_buildtime_checks(void)
4572 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4574 /* Verify the offsets in the two siginfos match */
4575 #define CHECK_OFFSET(field) \
4576 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4579 CHECK_OFFSET(si_pid);
4580 CHECK_OFFSET(si_uid);
4583 CHECK_OFFSET(si_tid);
4584 CHECK_OFFSET(si_overrun);
4585 CHECK_OFFSET(si_value);
4588 CHECK_OFFSET(si_pid);
4589 CHECK_OFFSET(si_uid);
4590 CHECK_OFFSET(si_value);
4593 CHECK_OFFSET(si_pid);
4594 CHECK_OFFSET(si_uid);
4595 CHECK_OFFSET(si_status);
4596 CHECK_OFFSET(si_utime);
4597 CHECK_OFFSET(si_stime);
4600 CHECK_OFFSET(si_addr);
4601 CHECK_OFFSET(si_addr_lsb);
4602 CHECK_OFFSET(si_lower);
4603 CHECK_OFFSET(si_upper);
4604 CHECK_OFFSET(si_pkey);
4607 CHECK_OFFSET(si_band);
4608 CHECK_OFFSET(si_fd);
4611 CHECK_OFFSET(si_call_addr);
4612 CHECK_OFFSET(si_syscall);
4613 CHECK_OFFSET(si_arch);
4617 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4618 offsetof(struct siginfo, si_addr));
4619 if (sizeof(int) == sizeof(void __user *)) {
4620 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4621 sizeof(void __user *));
4623 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4624 sizeof_field(struct siginfo, si_uid)) !=
4625 sizeof(void __user *));
4626 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4627 offsetof(struct siginfo, si_uid));
4629 #ifdef CONFIG_COMPAT
4630 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4631 offsetof(struct compat_siginfo, si_addr));
4632 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4633 sizeof(compat_uptr_t));
4634 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4635 sizeof_field(struct siginfo, si_pid));
4639 void __init signals_init(void)
4641 siginfo_buildtime_checks();
4643 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4646 #ifdef CONFIG_KGDB_KDB
4647 #include <linux/kdb.h>
4649 * kdb_send_sig - Allows kdb to send signals without exposing
4650 * signal internals. This function checks if the required locks are
4651 * available before calling the main signal code, to avoid kdb
4654 void kdb_send_sig(struct task_struct *t, int sig)
4656 static struct task_struct *kdb_prev_t;
4658 if (!spin_trylock(&t->sighand->siglock)) {
4659 kdb_printf("Can't do kill command now.\n"
4660 "The sigmask lock is held somewhere else in "
4661 "kernel, try again later\n");
4664 new_t = kdb_prev_t != t;
4666 if (t->state != TASK_RUNNING && new_t) {
4667 spin_unlock(&t->sighand->siglock);
4668 kdb_printf("Process is not RUNNING, sending a signal from "
4669 "kdb risks deadlock\n"
4670 "on the run queue locks. "
4671 "The signal has _not_ been sent.\n"
4672 "Reissue the kill command if you want to risk "
4676 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4677 spin_unlock(&t->sighand->siglock);
4679 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4682 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4684 #endif /* CONFIG_KGDB_KDB */