1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
187 EXPORT_SYMBOL(recalc_sigpending);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 * Preallocation does not hold sighand::siglock so it can't
436 * use the cache. The lockless caching requires that only
437 * one consumer and only one producer run at a time.
439 q = READ_ONCE(t->sigqueue_cache);
440 if (!q || sigqueue_flags)
441 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
443 WRITE_ONCE(t->sigqueue_cache, NULL);
445 print_dropped_signal(sig);
448 if (unlikely(q == NULL)) {
449 if (atomic_dec_and_test(&user->sigpending))
452 INIT_LIST_HEAD(&q->list);
453 q->flags = sigqueue_flags;
460 void exit_task_sigqueue_cache(struct task_struct *tsk)
462 /* Race free because @tsk is mopped up */
463 struct sigqueue *q = tsk->sigqueue_cache;
466 tsk->sigqueue_cache = NULL;
468 * Hand it back to the cache as the task might
469 * be self reaping which would leak the object.
471 kmem_cache_free(sigqueue_cachep, q);
475 static void sigqueue_cache_or_free(struct sigqueue *q)
478 * Cache one sigqueue per task. This pairs with the consumer side
479 * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
480 * compiler from store tearing and to tell KCSAN that the data race
481 * is intentional when run without holding current->sighand->siglock,
482 * which is fine as current obviously cannot run __sigqueue_free()
485 if (!READ_ONCE(current->sigqueue_cache))
486 WRITE_ONCE(current->sigqueue_cache, q);
488 kmem_cache_free(sigqueue_cachep, q);
491 static void __sigqueue_free(struct sigqueue *q)
493 if (q->flags & SIGQUEUE_PREALLOC)
495 if (atomic_dec_and_test(&q->user->sigpending))
497 sigqueue_cache_or_free(q);
500 void flush_sigqueue(struct sigpending *queue)
504 sigemptyset(&queue->signal);
505 while (!list_empty(&queue->list)) {
506 q = list_entry(queue->list.next, struct sigqueue , list);
507 list_del_init(&q->list);
513 * Flush all pending signals for this kthread.
515 void flush_signals(struct task_struct *t)
519 spin_lock_irqsave(&t->sighand->siglock, flags);
520 clear_tsk_thread_flag(t, TIF_SIGPENDING);
521 flush_sigqueue(&t->pending);
522 flush_sigqueue(&t->signal->shared_pending);
523 spin_unlock_irqrestore(&t->sighand->siglock, flags);
525 EXPORT_SYMBOL(flush_signals);
527 #ifdef CONFIG_POSIX_TIMERS
528 static void __flush_itimer_signals(struct sigpending *pending)
530 sigset_t signal, retain;
531 struct sigqueue *q, *n;
533 signal = pending->signal;
534 sigemptyset(&retain);
536 list_for_each_entry_safe(q, n, &pending->list, list) {
537 int sig = q->info.si_signo;
539 if (likely(q->info.si_code != SI_TIMER)) {
540 sigaddset(&retain, sig);
542 sigdelset(&signal, sig);
543 list_del_init(&q->list);
548 sigorsets(&pending->signal, &signal, &retain);
551 void flush_itimer_signals(void)
553 struct task_struct *tsk = current;
556 spin_lock_irqsave(&tsk->sighand->siglock, flags);
557 __flush_itimer_signals(&tsk->pending);
558 __flush_itimer_signals(&tsk->signal->shared_pending);
559 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
563 void ignore_signals(struct task_struct *t)
567 for (i = 0; i < _NSIG; ++i)
568 t->sighand->action[i].sa.sa_handler = SIG_IGN;
574 * Flush all handlers for a task.
578 flush_signal_handlers(struct task_struct *t, int force_default)
581 struct k_sigaction *ka = &t->sighand->action[0];
582 for (i = _NSIG ; i != 0 ; i--) {
583 if (force_default || ka->sa.sa_handler != SIG_IGN)
584 ka->sa.sa_handler = SIG_DFL;
586 #ifdef __ARCH_HAS_SA_RESTORER
587 ka->sa.sa_restorer = NULL;
589 sigemptyset(&ka->sa.sa_mask);
594 bool unhandled_signal(struct task_struct *tsk, int sig)
596 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
597 if (is_global_init(tsk))
600 if (handler != SIG_IGN && handler != SIG_DFL)
603 /* if ptraced, let the tracer determine */
607 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
610 struct sigqueue *q, *first = NULL;
613 * Collect the siginfo appropriate to this signal. Check if
614 * there is another siginfo for the same signal.
616 list_for_each_entry(q, &list->list, list) {
617 if (q->info.si_signo == sig) {
624 sigdelset(&list->signal, sig);
628 list_del_init(&first->list);
629 copy_siginfo(info, &first->info);
632 (first->flags & SIGQUEUE_PREALLOC) &&
633 (info->si_code == SI_TIMER) &&
634 (info->si_sys_private);
636 __sigqueue_free(first);
639 * Ok, it wasn't in the queue. This must be
640 * a fast-pathed signal or we must have been
641 * out of queue space. So zero out the info.
644 info->si_signo = sig;
646 info->si_code = SI_USER;
652 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
653 kernel_siginfo_t *info, bool *resched_timer)
655 int sig = next_signal(pending, mask);
658 collect_signal(sig, pending, info, resched_timer);
663 * Dequeue a signal and return the element to the caller, which is
664 * expected to free it.
666 * All callers have to hold the siglock.
668 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
670 bool resched_timer = false;
673 /* We only dequeue private signals from ourselves, we don't let
674 * signalfd steal them
676 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
678 signr = __dequeue_signal(&tsk->signal->shared_pending,
679 mask, info, &resched_timer);
680 #ifdef CONFIG_POSIX_TIMERS
684 * itimers are process shared and we restart periodic
685 * itimers in the signal delivery path to prevent DoS
686 * attacks in the high resolution timer case. This is
687 * compliant with the old way of self-restarting
688 * itimers, as the SIGALRM is a legacy signal and only
689 * queued once. Changing the restart behaviour to
690 * restart the timer in the signal dequeue path is
691 * reducing the timer noise on heavy loaded !highres
694 if (unlikely(signr == SIGALRM)) {
695 struct hrtimer *tmr = &tsk->signal->real_timer;
697 if (!hrtimer_is_queued(tmr) &&
698 tsk->signal->it_real_incr != 0) {
699 hrtimer_forward(tmr, tmr->base->get_time(),
700 tsk->signal->it_real_incr);
701 hrtimer_restart(tmr);
711 if (unlikely(sig_kernel_stop(signr))) {
713 * Set a marker that we have dequeued a stop signal. Our
714 * caller might release the siglock and then the pending
715 * stop signal it is about to process is no longer in the
716 * pending bitmasks, but must still be cleared by a SIGCONT
717 * (and overruled by a SIGKILL). So those cases clear this
718 * shared flag after we've set it. Note that this flag may
719 * remain set after the signal we return is ignored or
720 * handled. That doesn't matter because its only purpose
721 * is to alert stop-signal processing code when another
722 * processor has come along and cleared the flag.
724 current->jobctl |= JOBCTL_STOP_DEQUEUED;
726 #ifdef CONFIG_POSIX_TIMERS
729 * Release the siglock to ensure proper locking order
730 * of timer locks outside of siglocks. Note, we leave
731 * irqs disabled here, since the posix-timers code is
732 * about to disable them again anyway.
734 spin_unlock(&tsk->sighand->siglock);
735 posixtimer_rearm(info);
736 spin_lock(&tsk->sighand->siglock);
738 /* Don't expose the si_sys_private value to userspace */
739 info->si_sys_private = 0;
744 EXPORT_SYMBOL_GPL(dequeue_signal);
746 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
748 struct task_struct *tsk = current;
749 struct sigpending *pending = &tsk->pending;
750 struct sigqueue *q, *sync = NULL;
753 * Might a synchronous signal be in the queue?
755 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
759 * Return the first synchronous signal in the queue.
761 list_for_each_entry(q, &pending->list, list) {
762 /* Synchronous signals have a positive si_code */
763 if ((q->info.si_code > SI_USER) &&
764 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
772 * Check if there is another siginfo for the same signal.
774 list_for_each_entry_continue(q, &pending->list, list) {
775 if (q->info.si_signo == sync->info.si_signo)
779 sigdelset(&pending->signal, sync->info.si_signo);
782 list_del_init(&sync->list);
783 copy_siginfo(info, &sync->info);
784 __sigqueue_free(sync);
785 return info->si_signo;
789 * Tell a process that it has a new active signal..
791 * NOTE! we rely on the previous spin_lock to
792 * lock interrupts for us! We can only be called with
793 * "siglock" held, and the local interrupt must
794 * have been disabled when that got acquired!
796 * No need to set need_resched since signal event passing
797 * goes through ->blocked
799 void signal_wake_up_state(struct task_struct *t, unsigned int state)
801 set_tsk_thread_flag(t, TIF_SIGPENDING);
803 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
804 * case. We don't check t->state here because there is a race with it
805 * executing another processor and just now entering stopped state.
806 * By using wake_up_state, we ensure the process will wake up and
807 * handle its death signal.
809 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
814 * Remove signals in mask from the pending set and queue.
815 * Returns 1 if any signals were found.
817 * All callers must be holding the siglock.
819 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
821 struct sigqueue *q, *n;
824 sigandsets(&m, mask, &s->signal);
825 if (sigisemptyset(&m))
828 sigandnsets(&s->signal, &s->signal, mask);
829 list_for_each_entry_safe(q, n, &s->list, list) {
830 if (sigismember(mask, q->info.si_signo)) {
831 list_del_init(&q->list);
837 static inline int is_si_special(const struct kernel_siginfo *info)
839 return info <= SEND_SIG_PRIV;
842 static inline bool si_fromuser(const struct kernel_siginfo *info)
844 return info == SEND_SIG_NOINFO ||
845 (!is_si_special(info) && SI_FROMUSER(info));
849 * called with RCU read lock from check_kill_permission()
851 static bool kill_ok_by_cred(struct task_struct *t)
853 const struct cred *cred = current_cred();
854 const struct cred *tcred = __task_cred(t);
856 return uid_eq(cred->euid, tcred->suid) ||
857 uid_eq(cred->euid, tcred->uid) ||
858 uid_eq(cred->uid, tcred->suid) ||
859 uid_eq(cred->uid, tcred->uid) ||
860 ns_capable(tcred->user_ns, CAP_KILL);
864 * Bad permissions for sending the signal
865 * - the caller must hold the RCU read lock
867 static int check_kill_permission(int sig, struct kernel_siginfo *info,
868 struct task_struct *t)
873 if (!valid_signal(sig))
876 if (!si_fromuser(info))
879 error = audit_signal_info(sig, t); /* Let audit system see the signal */
883 if (!same_thread_group(current, t) &&
884 !kill_ok_by_cred(t)) {
887 sid = task_session(t);
889 * We don't return the error if sid == NULL. The
890 * task was unhashed, the caller must notice this.
892 if (!sid || sid == task_session(current))
900 return security_task_kill(t, info, sig, NULL);
904 * ptrace_trap_notify - schedule trap to notify ptracer
905 * @t: tracee wanting to notify tracer
907 * This function schedules sticky ptrace trap which is cleared on the next
908 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
911 * If @t is running, STOP trap will be taken. If trapped for STOP and
912 * ptracer is listening for events, tracee is woken up so that it can
913 * re-trap for the new event. If trapped otherwise, STOP trap will be
914 * eventually taken without returning to userland after the existing traps
915 * are finished by PTRACE_CONT.
918 * Must be called with @task->sighand->siglock held.
920 static void ptrace_trap_notify(struct task_struct *t)
922 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
923 assert_spin_locked(&t->sighand->siglock);
925 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
926 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
930 * Handle magic process-wide effects of stop/continue signals. Unlike
931 * the signal actions, these happen immediately at signal-generation
932 * time regardless of blocking, ignoring, or handling. This does the
933 * actual continuing for SIGCONT, but not the actual stopping for stop
934 * signals. The process stop is done as a signal action for SIG_DFL.
936 * Returns true if the signal should be actually delivered, otherwise
937 * it should be dropped.
939 static bool prepare_signal(int sig, struct task_struct *p, bool force)
941 struct signal_struct *signal = p->signal;
942 struct task_struct *t;
945 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
946 if (!(signal->flags & SIGNAL_GROUP_EXIT))
947 return sig == SIGKILL;
949 * The process is in the middle of dying, nothing to do.
951 } else if (sig_kernel_stop(sig)) {
953 * This is a stop signal. Remove SIGCONT from all queues.
955 siginitset(&flush, sigmask(SIGCONT));
956 flush_sigqueue_mask(&flush, &signal->shared_pending);
957 for_each_thread(p, t)
958 flush_sigqueue_mask(&flush, &t->pending);
959 } else if (sig == SIGCONT) {
962 * Remove all stop signals from all queues, wake all threads.
964 siginitset(&flush, SIG_KERNEL_STOP_MASK);
965 flush_sigqueue_mask(&flush, &signal->shared_pending);
966 for_each_thread(p, t) {
967 flush_sigqueue_mask(&flush, &t->pending);
968 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
969 if (likely(!(t->ptrace & PT_SEIZED)))
970 wake_up_state(t, __TASK_STOPPED);
972 ptrace_trap_notify(t);
976 * Notify the parent with CLD_CONTINUED if we were stopped.
978 * If we were in the middle of a group stop, we pretend it
979 * was already finished, and then continued. Since SIGCHLD
980 * doesn't queue we report only CLD_STOPPED, as if the next
981 * CLD_CONTINUED was dropped.
984 if (signal->flags & SIGNAL_STOP_STOPPED)
985 why |= SIGNAL_CLD_CONTINUED;
986 else if (signal->group_stop_count)
987 why |= SIGNAL_CLD_STOPPED;
991 * The first thread which returns from do_signal_stop()
992 * will take ->siglock, notice SIGNAL_CLD_MASK, and
993 * notify its parent. See get_signal().
995 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
996 signal->group_stop_count = 0;
997 signal->group_exit_code = 0;
1001 return !sig_ignored(p, sig, force);
1005 * Test if P wants to take SIG. After we've checked all threads with this,
1006 * it's equivalent to finding no threads not blocking SIG. Any threads not
1007 * blocking SIG were ruled out because they are not running and already
1008 * have pending signals. Such threads will dequeue from the shared queue
1009 * as soon as they're available, so putting the signal on the shared queue
1010 * will be equivalent to sending it to one such thread.
1012 static inline bool wants_signal(int sig, struct task_struct *p)
1014 if (sigismember(&p->blocked, sig))
1017 if (p->flags & PF_EXITING)
1023 if (task_is_stopped_or_traced(p))
1026 return task_curr(p) || !task_sigpending(p);
1029 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1031 struct signal_struct *signal = p->signal;
1032 struct task_struct *t;
1035 * Now find a thread we can wake up to take the signal off the queue.
1037 * If the main thread wants the signal, it gets first crack.
1038 * Probably the least surprising to the average bear.
1040 if (wants_signal(sig, p))
1042 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1044 * There is just one thread and it does not need to be woken.
1045 * It will dequeue unblocked signals before it runs again.
1050 * Otherwise try to find a suitable thread.
1052 t = signal->curr_target;
1053 while (!wants_signal(sig, t)) {
1055 if (t == signal->curr_target)
1057 * No thread needs to be woken.
1058 * Any eligible threads will see
1059 * the signal in the queue soon.
1063 signal->curr_target = t;
1067 * Found a killable thread. If the signal will be fatal,
1068 * then start taking the whole group down immediately.
1070 if (sig_fatal(p, sig) &&
1071 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1072 !sigismember(&t->real_blocked, sig) &&
1073 (sig == SIGKILL || !p->ptrace)) {
1075 * This signal will be fatal to the whole group.
1077 if (!sig_kernel_coredump(sig)) {
1079 * Start a group exit and wake everybody up.
1080 * This way we don't have other threads
1081 * running and doing things after a slower
1082 * thread has the fatal signal pending.
1084 signal->flags = SIGNAL_GROUP_EXIT;
1085 signal->group_exit_code = sig;
1086 signal->group_stop_count = 0;
1089 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1090 sigaddset(&t->pending.signal, SIGKILL);
1091 signal_wake_up(t, 1);
1092 } while_each_thread(p, t);
1098 * The signal is already in the shared-pending queue.
1099 * Tell the chosen thread to wake up and dequeue it.
1101 signal_wake_up(t, sig == SIGKILL);
1105 static inline bool legacy_queue(struct sigpending *signals, int sig)
1107 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1110 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1111 enum pid_type type, bool force)
1113 struct sigpending *pending;
1115 int override_rlimit;
1116 int ret = 0, result;
1118 assert_spin_locked(&t->sighand->siglock);
1120 result = TRACE_SIGNAL_IGNORED;
1121 if (!prepare_signal(sig, t, force))
1124 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1126 * Short-circuit ignored signals and support queuing
1127 * exactly one non-rt signal, so that we can get more
1128 * detailed information about the cause of the signal.
1130 result = TRACE_SIGNAL_ALREADY_PENDING;
1131 if (legacy_queue(pending, sig))
1134 result = TRACE_SIGNAL_DELIVERED;
1136 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1138 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1142 * Real-time signals must be queued if sent by sigqueue, or
1143 * some other real-time mechanism. It is implementation
1144 * defined whether kill() does so. We attempt to do so, on
1145 * the principle of least surprise, but since kill is not
1146 * allowed to fail with EAGAIN when low on memory we just
1147 * make sure at least one signal gets delivered and don't
1148 * pass on the info struct.
1151 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1153 override_rlimit = 0;
1155 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1158 list_add_tail(&q->list, &pending->list);
1159 switch ((unsigned long) info) {
1160 case (unsigned long) SEND_SIG_NOINFO:
1161 clear_siginfo(&q->info);
1162 q->info.si_signo = sig;
1163 q->info.si_errno = 0;
1164 q->info.si_code = SI_USER;
1165 q->info.si_pid = task_tgid_nr_ns(current,
1166 task_active_pid_ns(t));
1169 from_kuid_munged(task_cred_xxx(t, user_ns),
1173 case (unsigned long) SEND_SIG_PRIV:
1174 clear_siginfo(&q->info);
1175 q->info.si_signo = sig;
1176 q->info.si_errno = 0;
1177 q->info.si_code = SI_KERNEL;
1182 copy_siginfo(&q->info, info);
1185 } else if (!is_si_special(info) &&
1186 sig >= SIGRTMIN && info->si_code != SI_USER) {
1188 * Queue overflow, abort. We may abort if the
1189 * signal was rt and sent by user using something
1190 * other than kill().
1192 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1197 * This is a silent loss of information. We still
1198 * send the signal, but the *info bits are lost.
1200 result = TRACE_SIGNAL_LOSE_INFO;
1204 signalfd_notify(t, sig);
1205 sigaddset(&pending->signal, sig);
1207 /* Let multiprocess signals appear after on-going forks */
1208 if (type > PIDTYPE_TGID) {
1209 struct multiprocess_signals *delayed;
1210 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1211 sigset_t *signal = &delayed->signal;
1212 /* Can't queue both a stop and a continue signal */
1214 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1215 else if (sig_kernel_stop(sig))
1216 sigdelset(signal, SIGCONT);
1217 sigaddset(signal, sig);
1221 complete_signal(sig, t, type);
1223 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1227 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1230 switch (siginfo_layout(info->si_signo, info->si_code)) {
1239 case SIL_FAULT_TRAPNO:
1240 case SIL_FAULT_MCEERR:
1241 case SIL_FAULT_BNDERR:
1242 case SIL_FAULT_PKUERR:
1243 case SIL_PERF_EVENT:
1251 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1254 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1257 if (info == SEND_SIG_NOINFO) {
1258 /* Force if sent from an ancestor pid namespace */
1259 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1260 } else if (info == SEND_SIG_PRIV) {
1261 /* Don't ignore kernel generated signals */
1263 } else if (has_si_pid_and_uid(info)) {
1264 /* SIGKILL and SIGSTOP is special or has ids */
1265 struct user_namespace *t_user_ns;
1268 t_user_ns = task_cred_xxx(t, user_ns);
1269 if (current_user_ns() != t_user_ns) {
1270 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1271 info->si_uid = from_kuid_munged(t_user_ns, uid);
1275 /* A kernel generated signal? */
1276 force = (info->si_code == SI_KERNEL);
1278 /* From an ancestor pid namespace? */
1279 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1284 return __send_signal(sig, info, t, type, force);
1287 static void print_fatal_signal(int signr)
1289 struct pt_regs *regs = signal_pt_regs();
1290 pr_info("potentially unexpected fatal signal %d.\n", signr);
1292 #if defined(__i386__) && !defined(__arch_um__)
1293 pr_info("code at %08lx: ", regs->ip);
1296 for (i = 0; i < 16; i++) {
1299 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1301 pr_cont("%02x ", insn);
1311 static int __init setup_print_fatal_signals(char *str)
1313 get_option (&str, &print_fatal_signals);
1318 __setup("print-fatal-signals=", setup_print_fatal_signals);
1321 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1323 return send_signal(sig, info, p, PIDTYPE_TGID);
1326 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1329 unsigned long flags;
1332 if (lock_task_sighand(p, &flags)) {
1333 ret = send_signal(sig, info, p, type);
1334 unlock_task_sighand(p, &flags);
1341 * Force a signal that the process can't ignore: if necessary
1342 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1344 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1345 * since we do not want to have a signal handler that was blocked
1346 * be invoked when user space had explicitly blocked it.
1348 * We don't want to have recursive SIGSEGV's etc, for example,
1349 * that is why we also clear SIGNAL_UNKILLABLE.
1352 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1354 unsigned long int flags;
1355 int ret, blocked, ignored;
1356 struct k_sigaction *action;
1357 int sig = info->si_signo;
1359 spin_lock_irqsave(&t->sighand->siglock, flags);
1360 action = &t->sighand->action[sig-1];
1361 ignored = action->sa.sa_handler == SIG_IGN;
1362 blocked = sigismember(&t->blocked, sig);
1363 if (blocked || ignored) {
1364 action->sa.sa_handler = SIG_DFL;
1366 sigdelset(&t->blocked, sig);
1367 recalc_sigpending_and_wake(t);
1371 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1372 * debugging to leave init killable.
1374 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1375 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1376 ret = send_signal(sig, info, t, PIDTYPE_PID);
1377 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1382 int force_sig_info(struct kernel_siginfo *info)
1384 return force_sig_info_to_task(info, current);
1388 * Nuke all other threads in the group.
1390 int zap_other_threads(struct task_struct *p)
1392 struct task_struct *t = p;
1395 p->signal->group_stop_count = 0;
1397 while_each_thread(p, t) {
1398 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1401 /* Don't bother with already dead threads */
1404 sigaddset(&t->pending.signal, SIGKILL);
1405 signal_wake_up(t, 1);
1411 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1412 unsigned long *flags)
1414 struct sighand_struct *sighand;
1418 sighand = rcu_dereference(tsk->sighand);
1419 if (unlikely(sighand == NULL))
1423 * This sighand can be already freed and even reused, but
1424 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1425 * initializes ->siglock: this slab can't go away, it has
1426 * the same object type, ->siglock can't be reinitialized.
1428 * We need to ensure that tsk->sighand is still the same
1429 * after we take the lock, we can race with de_thread() or
1430 * __exit_signal(). In the latter case the next iteration
1431 * must see ->sighand == NULL.
1433 spin_lock_irqsave(&sighand->siglock, *flags);
1434 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1436 spin_unlock_irqrestore(&sighand->siglock, *flags);
1444 * send signal info to all the members of a group
1446 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1447 struct task_struct *p, enum pid_type type)
1452 ret = check_kill_permission(sig, info, p);
1456 ret = do_send_sig_info(sig, info, p, type);
1462 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1463 * control characters do (^C, ^Z etc)
1464 * - the caller must hold at least a readlock on tasklist_lock
1466 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1468 struct task_struct *p = NULL;
1469 int retval, success;
1473 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1474 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1477 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1478 return success ? 0 : retval;
1481 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1484 struct task_struct *p;
1488 p = pid_task(pid, PIDTYPE_PID);
1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1492 if (likely(!p || error != -ESRCH))
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1503 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1507 error = kill_pid_info(sig, info, find_vpid(pid));
1512 static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
1515 const struct cred *pcred = __task_cred(target);
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1548 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
1551 struct kernel_siginfo info;
1552 struct task_struct *p;
1553 unsigned long flags;
1556 if (!valid_signal(sig))
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
1566 p = pid_task(pid, PIDTYPE_PID);
1571 if (!kill_as_cred_perm(cred, p)) {
1575 ret = security_task_kill(p, &info, sig, cred);
1580 if (lock_task_sighand(p, &flags)) {
1581 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1582 unlock_task_sighand(p, &flags);
1590 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1599 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1604 return kill_proc_info(sig, info, pid);
1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1610 read_lock(&tasklist_lock);
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1618 for_each_process(p) {
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
1621 int err = group_send_sig_info(sig, info, p,
1628 ret = count ? retval : -ESRCH;
1630 read_unlock(&tasklist_lock);
1636 * These are for backward compatibility with the rest of the kernel source.
1639 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1645 if (!valid_signal(sig))
1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1650 EXPORT_SYMBOL(send_sig_info);
1652 #define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1656 send_sig(int sig, struct task_struct *p, int priv)
1658 return send_sig_info(sig, __si_special(priv), p);
1660 EXPORT_SYMBOL(send_sig);
1662 void force_sig(int sig)
1664 struct kernel_siginfo info;
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1669 info.si_code = SI_KERNEL;
1672 force_sig_info(&info);
1674 EXPORT_SYMBOL(force_sig);
1677 * When things go south during signal handling, we
1678 * will force a SIGSEGV. And if the signal that caused
1679 * the problem was already a SIGSEGV, we'll want to
1680 * make sure we don't even try to deliver the signal..
1682 void force_sigsegv(int sig)
1684 struct task_struct *p = current;
1686 if (sig == SIGSEGV) {
1687 unsigned long flags;
1688 spin_lock_irqsave(&p->sighand->siglock, flags);
1689 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1690 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1695 int force_sig_fault_to_task(int sig, int code, void __user *addr
1696 ___ARCH_SI_TRAPNO(int trapno)
1697 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1698 , struct task_struct *t)
1700 struct kernel_siginfo info;
1702 clear_siginfo(&info);
1703 info.si_signo = sig;
1705 info.si_code = code;
1706 info.si_addr = addr;
1707 #ifdef __ARCH_SI_TRAPNO
1708 info.si_trapno = trapno;
1712 info.si_flags = flags;
1715 return force_sig_info_to_task(&info, t);
1718 int force_sig_fault(int sig, int code, void __user *addr
1719 ___ARCH_SI_TRAPNO(int trapno)
1720 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1722 return force_sig_fault_to_task(sig, code, addr
1723 ___ARCH_SI_TRAPNO(trapno)
1724 ___ARCH_SI_IA64(imm, flags, isr), current);
1727 int send_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_TRAPNO(int trapno)
1729 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1730 , struct task_struct *t)
1732 struct kernel_siginfo info;
1734 clear_siginfo(&info);
1735 info.si_signo = sig;
1737 info.si_code = code;
1738 info.si_addr = addr;
1739 #ifdef __ARCH_SI_TRAPNO
1740 info.si_trapno = trapno;
1744 info.si_flags = flags;
1747 return send_sig_info(info.si_signo, &info, t);
1750 int force_sig_mceerr(int code, void __user *addr, short lsb)
1752 struct kernel_siginfo info;
1754 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1755 clear_siginfo(&info);
1756 info.si_signo = SIGBUS;
1758 info.si_code = code;
1759 info.si_addr = addr;
1760 info.si_addr_lsb = lsb;
1761 return force_sig_info(&info);
1764 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1766 struct kernel_siginfo info;
1768 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1769 clear_siginfo(&info);
1770 info.si_signo = SIGBUS;
1772 info.si_code = code;
1773 info.si_addr = addr;
1774 info.si_addr_lsb = lsb;
1775 return send_sig_info(info.si_signo, &info, t);
1777 EXPORT_SYMBOL(send_sig_mceerr);
1779 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1781 struct kernel_siginfo info;
1783 clear_siginfo(&info);
1784 info.si_signo = SIGSEGV;
1786 info.si_code = SEGV_BNDERR;
1787 info.si_addr = addr;
1788 info.si_lower = lower;
1789 info.si_upper = upper;
1790 return force_sig_info(&info);
1794 int force_sig_pkuerr(void __user *addr, u32 pkey)
1796 struct kernel_siginfo info;
1798 clear_siginfo(&info);
1799 info.si_signo = SIGSEGV;
1801 info.si_code = SEGV_PKUERR;
1802 info.si_addr = addr;
1803 info.si_pkey = pkey;
1804 return force_sig_info(&info);
1808 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1810 struct kernel_siginfo info;
1812 clear_siginfo(&info);
1813 info.si_signo = SIGTRAP;
1815 info.si_code = TRAP_PERF;
1816 info.si_addr = addr;
1817 info.si_perf_data = sig_data;
1818 info.si_perf_type = type;
1820 return force_sig_info(&info);
1823 /* For the crazy architectures that include trap information in
1824 * the errno field, instead of an actual errno value.
1826 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1828 struct kernel_siginfo info;
1830 clear_siginfo(&info);
1831 info.si_signo = SIGTRAP;
1832 info.si_errno = errno;
1833 info.si_code = TRAP_HWBKPT;
1834 info.si_addr = addr;
1835 return force_sig_info(&info);
1838 int kill_pgrp(struct pid *pid, int sig, int priv)
1842 read_lock(&tasklist_lock);
1843 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1844 read_unlock(&tasklist_lock);
1848 EXPORT_SYMBOL(kill_pgrp);
1850 int kill_pid(struct pid *pid, int sig, int priv)
1852 return kill_pid_info(sig, __si_special(priv), pid);
1854 EXPORT_SYMBOL(kill_pid);
1857 * These functions support sending signals using preallocated sigqueue
1858 * structures. This is needed "because realtime applications cannot
1859 * afford to lose notifications of asynchronous events, like timer
1860 * expirations or I/O completions". In the case of POSIX Timers
1861 * we allocate the sigqueue structure from the timer_create. If this
1862 * allocation fails we are able to report the failure to the application
1863 * with an EAGAIN error.
1865 struct sigqueue *sigqueue_alloc(void)
1867 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1870 void sigqueue_free(struct sigqueue *q)
1872 unsigned long flags;
1873 spinlock_t *lock = ¤t->sighand->siglock;
1875 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1877 * We must hold ->siglock while testing q->list
1878 * to serialize with collect_signal() or with
1879 * __exit_signal()->flush_sigqueue().
1881 spin_lock_irqsave(lock, flags);
1882 q->flags &= ~SIGQUEUE_PREALLOC;
1884 * If it is queued it will be freed when dequeued,
1885 * like the "regular" sigqueue.
1887 if (!list_empty(&q->list))
1889 spin_unlock_irqrestore(lock, flags);
1895 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1897 int sig = q->info.si_signo;
1898 struct sigpending *pending;
1899 struct task_struct *t;
1900 unsigned long flags;
1903 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1907 t = pid_task(pid, type);
1908 if (!t || !likely(lock_task_sighand(t, &flags)))
1911 ret = 1; /* the signal is ignored */
1912 result = TRACE_SIGNAL_IGNORED;
1913 if (!prepare_signal(sig, t, false))
1917 if (unlikely(!list_empty(&q->list))) {
1919 * If an SI_TIMER entry is already queue just increment
1920 * the overrun count.
1922 BUG_ON(q->info.si_code != SI_TIMER);
1923 q->info.si_overrun++;
1924 result = TRACE_SIGNAL_ALREADY_PENDING;
1927 q->info.si_overrun = 0;
1929 signalfd_notify(t, sig);
1930 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1931 list_add_tail(&q->list, &pending->list);
1932 sigaddset(&pending->signal, sig);
1933 complete_signal(sig, t, type);
1934 result = TRACE_SIGNAL_DELIVERED;
1936 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1937 unlock_task_sighand(t, &flags);
1943 static void do_notify_pidfd(struct task_struct *task)
1947 WARN_ON(task->exit_state == 0);
1948 pid = task_pid(task);
1949 wake_up_all(&pid->wait_pidfd);
1953 * Let a parent know about the death of a child.
1954 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1956 * Returns true if our parent ignored us and so we've switched to
1959 bool do_notify_parent(struct task_struct *tsk, int sig)
1961 struct kernel_siginfo info;
1962 unsigned long flags;
1963 struct sighand_struct *psig;
1964 bool autoreap = false;
1969 /* do_notify_parent_cldstop should have been called instead. */
1970 BUG_ON(task_is_stopped_or_traced(tsk));
1972 BUG_ON(!tsk->ptrace &&
1973 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1975 /* Wake up all pidfd waiters */
1976 do_notify_pidfd(tsk);
1978 if (sig != SIGCHLD) {
1980 * This is only possible if parent == real_parent.
1981 * Check if it has changed security domain.
1983 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1987 clear_siginfo(&info);
1988 info.si_signo = sig;
1991 * We are under tasklist_lock here so our parent is tied to
1992 * us and cannot change.
1994 * task_active_pid_ns will always return the same pid namespace
1995 * until a task passes through release_task.
1997 * write_lock() currently calls preempt_disable() which is the
1998 * same as rcu_read_lock(), but according to Oleg, this is not
1999 * correct to rely on this
2002 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2003 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2007 task_cputime(tsk, &utime, &stime);
2008 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2009 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2011 info.si_status = tsk->exit_code & 0x7f;
2012 if (tsk->exit_code & 0x80)
2013 info.si_code = CLD_DUMPED;
2014 else if (tsk->exit_code & 0x7f)
2015 info.si_code = CLD_KILLED;
2017 info.si_code = CLD_EXITED;
2018 info.si_status = tsk->exit_code >> 8;
2021 psig = tsk->parent->sighand;
2022 spin_lock_irqsave(&psig->siglock, flags);
2023 if (!tsk->ptrace && sig == SIGCHLD &&
2024 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2025 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2027 * We are exiting and our parent doesn't care. POSIX.1
2028 * defines special semantics for setting SIGCHLD to SIG_IGN
2029 * or setting the SA_NOCLDWAIT flag: we should be reaped
2030 * automatically and not left for our parent's wait4 call.
2031 * Rather than having the parent do it as a magic kind of
2032 * signal handler, we just set this to tell do_exit that we
2033 * can be cleaned up without becoming a zombie. Note that
2034 * we still call __wake_up_parent in this case, because a
2035 * blocked sys_wait4 might now return -ECHILD.
2037 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2038 * is implementation-defined: we do (if you don't want
2039 * it, just use SIG_IGN instead).
2042 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2046 * Send with __send_signal as si_pid and si_uid are in the
2047 * parent's namespaces.
2049 if (valid_signal(sig) && sig)
2050 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2051 __wake_up_parent(tsk, tsk->parent);
2052 spin_unlock_irqrestore(&psig->siglock, flags);
2058 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2059 * @tsk: task reporting the state change
2060 * @for_ptracer: the notification is for ptracer
2061 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2063 * Notify @tsk's parent that the stopped/continued state has changed. If
2064 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2065 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2068 * Must be called with tasklist_lock at least read locked.
2070 static void do_notify_parent_cldstop(struct task_struct *tsk,
2071 bool for_ptracer, int why)
2073 struct kernel_siginfo info;
2074 unsigned long flags;
2075 struct task_struct *parent;
2076 struct sighand_struct *sighand;
2080 parent = tsk->parent;
2082 tsk = tsk->group_leader;
2083 parent = tsk->real_parent;
2086 clear_siginfo(&info);
2087 info.si_signo = SIGCHLD;
2090 * see comment in do_notify_parent() about the following 4 lines
2093 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2094 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2097 task_cputime(tsk, &utime, &stime);
2098 info.si_utime = nsec_to_clock_t(utime);
2099 info.si_stime = nsec_to_clock_t(stime);
2104 info.si_status = SIGCONT;
2107 info.si_status = tsk->signal->group_exit_code & 0x7f;
2110 info.si_status = tsk->exit_code & 0x7f;
2116 sighand = parent->sighand;
2117 spin_lock_irqsave(&sighand->siglock, flags);
2118 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2119 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2120 __group_send_sig_info(SIGCHLD, &info, parent);
2122 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2124 __wake_up_parent(tsk, parent);
2125 spin_unlock_irqrestore(&sighand->siglock, flags);
2128 static inline bool may_ptrace_stop(void)
2130 if (!likely(current->ptrace))
2133 * Are we in the middle of do_coredump?
2134 * If so and our tracer is also part of the coredump stopping
2135 * is a deadlock situation, and pointless because our tracer
2136 * is dead so don't allow us to stop.
2137 * If SIGKILL was already sent before the caller unlocked
2138 * ->siglock we must see ->core_state != NULL. Otherwise it
2139 * is safe to enter schedule().
2141 * This is almost outdated, a task with the pending SIGKILL can't
2142 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2143 * after SIGKILL was already dequeued.
2145 if (unlikely(current->mm->core_state) &&
2146 unlikely(current->mm == current->parent->mm))
2153 * Return non-zero if there is a SIGKILL that should be waking us up.
2154 * Called with the siglock held.
2156 static bool sigkill_pending(struct task_struct *tsk)
2158 return sigismember(&tsk->pending.signal, SIGKILL) ||
2159 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2163 * This must be called with current->sighand->siglock held.
2165 * This should be the path for all ptrace stops.
2166 * We always set current->last_siginfo while stopped here.
2167 * That makes it a way to test a stopped process for
2168 * being ptrace-stopped vs being job-control-stopped.
2170 * If we actually decide not to stop at all because the tracer
2171 * is gone, we keep current->exit_code unless clear_code.
2173 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2174 __releases(¤t->sighand->siglock)
2175 __acquires(¤t->sighand->siglock)
2177 bool gstop_done = false;
2179 if (arch_ptrace_stop_needed(exit_code, info)) {
2181 * The arch code has something special to do before a
2182 * ptrace stop. This is allowed to block, e.g. for faults
2183 * on user stack pages. We can't keep the siglock while
2184 * calling arch_ptrace_stop, so we must release it now.
2185 * To preserve proper semantics, we must do this before
2186 * any signal bookkeeping like checking group_stop_count.
2187 * Meanwhile, a SIGKILL could come in before we retake the
2188 * siglock. That must prevent us from sleeping in TASK_TRACED.
2189 * So after regaining the lock, we must check for SIGKILL.
2191 spin_unlock_irq(¤t->sighand->siglock);
2192 arch_ptrace_stop(exit_code, info);
2193 spin_lock_irq(¤t->sighand->siglock);
2194 if (sigkill_pending(current))
2198 set_special_state(TASK_TRACED);
2201 * We're committing to trapping. TRACED should be visible before
2202 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2203 * Also, transition to TRACED and updates to ->jobctl should be
2204 * atomic with respect to siglock and should be done after the arch
2205 * hook as siglock is released and regrabbed across it.
2210 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2212 * set_current_state() smp_wmb();
2214 * wait_task_stopped()
2215 * task_stopped_code()
2216 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2220 current->last_siginfo = info;
2221 current->exit_code = exit_code;
2224 * If @why is CLD_STOPPED, we're trapping to participate in a group
2225 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2226 * across siglock relocks since INTERRUPT was scheduled, PENDING
2227 * could be clear now. We act as if SIGCONT is received after
2228 * TASK_TRACED is entered - ignore it.
2230 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2231 gstop_done = task_participate_group_stop(current);
2233 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2234 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2235 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2236 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2238 /* entering a trap, clear TRAPPING */
2239 task_clear_jobctl_trapping(current);
2241 spin_unlock_irq(¤t->sighand->siglock);
2242 read_lock(&tasklist_lock);
2243 if (may_ptrace_stop()) {
2245 * Notify parents of the stop.
2247 * While ptraced, there are two parents - the ptracer and
2248 * the real_parent of the group_leader. The ptracer should
2249 * know about every stop while the real parent is only
2250 * interested in the completion of group stop. The states
2251 * for the two don't interact with each other. Notify
2252 * separately unless they're gonna be duplicates.
2254 do_notify_parent_cldstop(current, true, why);
2255 if (gstop_done && ptrace_reparented(current))
2256 do_notify_parent_cldstop(current, false, why);
2259 * Don't want to allow preemption here, because
2260 * sys_ptrace() needs this task to be inactive.
2262 * XXX: implement read_unlock_no_resched().
2265 read_unlock(&tasklist_lock);
2266 cgroup_enter_frozen();
2267 preempt_enable_no_resched();
2268 freezable_schedule();
2269 cgroup_leave_frozen(true);
2272 * By the time we got the lock, our tracer went away.
2273 * Don't drop the lock yet, another tracer may come.
2275 * If @gstop_done, the ptracer went away between group stop
2276 * completion and here. During detach, it would have set
2277 * JOBCTL_STOP_PENDING on us and we'll re-enter
2278 * TASK_STOPPED in do_signal_stop() on return, so notifying
2279 * the real parent of the group stop completion is enough.
2282 do_notify_parent_cldstop(current, false, why);
2284 /* tasklist protects us from ptrace_freeze_traced() */
2285 __set_current_state(TASK_RUNNING);
2287 current->exit_code = 0;
2288 read_unlock(&tasklist_lock);
2292 * We are back. Now reacquire the siglock before touching
2293 * last_siginfo, so that we are sure to have synchronized with
2294 * any signal-sending on another CPU that wants to examine it.
2296 spin_lock_irq(¤t->sighand->siglock);
2297 current->last_siginfo = NULL;
2299 /* LISTENING can be set only during STOP traps, clear it */
2300 current->jobctl &= ~JOBCTL_LISTENING;
2303 * Queued signals ignored us while we were stopped for tracing.
2304 * So check for any that we should take before resuming user mode.
2305 * This sets TIF_SIGPENDING, but never clears it.
2307 recalc_sigpending_tsk(current);
2310 static void ptrace_do_notify(int signr, int exit_code, int why)
2312 kernel_siginfo_t info;
2314 clear_siginfo(&info);
2315 info.si_signo = signr;
2316 info.si_code = exit_code;
2317 info.si_pid = task_pid_vnr(current);
2318 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2320 /* Let the debugger run. */
2321 ptrace_stop(exit_code, why, 1, &info);
2324 void ptrace_notify(int exit_code)
2326 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2327 if (unlikely(current->task_works))
2330 spin_lock_irq(¤t->sighand->siglock);
2331 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2332 spin_unlock_irq(¤t->sighand->siglock);
2336 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2337 * @signr: signr causing group stop if initiating
2339 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2340 * and participate in it. If already set, participate in the existing
2341 * group stop. If participated in a group stop (and thus slept), %true is
2342 * returned with siglock released.
2344 * If ptraced, this function doesn't handle stop itself. Instead,
2345 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2346 * untouched. The caller must ensure that INTERRUPT trap handling takes
2347 * places afterwards.
2350 * Must be called with @current->sighand->siglock held, which is released
2354 * %false if group stop is already cancelled or ptrace trap is scheduled.
2355 * %true if participated in group stop.
2357 static bool do_signal_stop(int signr)
2358 __releases(¤t->sighand->siglock)
2360 struct signal_struct *sig = current->signal;
2362 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2363 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2364 struct task_struct *t;
2366 /* signr will be recorded in task->jobctl for retries */
2367 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2369 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2370 unlikely(signal_group_exit(sig)))
2373 * There is no group stop already in progress. We must
2376 * While ptraced, a task may be resumed while group stop is
2377 * still in effect and then receive a stop signal and
2378 * initiate another group stop. This deviates from the
2379 * usual behavior as two consecutive stop signals can't
2380 * cause two group stops when !ptraced. That is why we
2381 * also check !task_is_stopped(t) below.
2383 * The condition can be distinguished by testing whether
2384 * SIGNAL_STOP_STOPPED is already set. Don't generate
2385 * group_exit_code in such case.
2387 * This is not necessary for SIGNAL_STOP_CONTINUED because
2388 * an intervening stop signal is required to cause two
2389 * continued events regardless of ptrace.
2391 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2392 sig->group_exit_code = signr;
2394 sig->group_stop_count = 0;
2396 if (task_set_jobctl_pending(current, signr | gstop))
2397 sig->group_stop_count++;
2400 while_each_thread(current, t) {
2402 * Setting state to TASK_STOPPED for a group
2403 * stop is always done with the siglock held,
2404 * so this check has no races.
2406 if (!task_is_stopped(t) &&
2407 task_set_jobctl_pending(t, signr | gstop)) {
2408 sig->group_stop_count++;
2409 if (likely(!(t->ptrace & PT_SEIZED)))
2410 signal_wake_up(t, 0);
2412 ptrace_trap_notify(t);
2417 if (likely(!current->ptrace)) {
2421 * If there are no other threads in the group, or if there
2422 * is a group stop in progress and we are the last to stop,
2423 * report to the parent.
2425 if (task_participate_group_stop(current))
2426 notify = CLD_STOPPED;
2428 set_special_state(TASK_STOPPED);
2429 spin_unlock_irq(¤t->sighand->siglock);
2432 * Notify the parent of the group stop completion. Because
2433 * we're not holding either the siglock or tasklist_lock
2434 * here, ptracer may attach inbetween; however, this is for
2435 * group stop and should always be delivered to the real
2436 * parent of the group leader. The new ptracer will get
2437 * its notification when this task transitions into
2441 read_lock(&tasklist_lock);
2442 do_notify_parent_cldstop(current, false, notify);
2443 read_unlock(&tasklist_lock);
2446 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2447 cgroup_enter_frozen();
2448 freezable_schedule();
2452 * While ptraced, group stop is handled by STOP trap.
2453 * Schedule it and let the caller deal with it.
2455 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2461 * do_jobctl_trap - take care of ptrace jobctl traps
2463 * When PT_SEIZED, it's used for both group stop and explicit
2464 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2465 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2466 * the stop signal; otherwise, %SIGTRAP.
2468 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2469 * number as exit_code and no siginfo.
2472 * Must be called with @current->sighand->siglock held, which may be
2473 * released and re-acquired before returning with intervening sleep.
2475 static void do_jobctl_trap(void)
2477 struct signal_struct *signal = current->signal;
2478 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2480 if (current->ptrace & PT_SEIZED) {
2481 if (!signal->group_stop_count &&
2482 !(signal->flags & SIGNAL_STOP_STOPPED))
2484 WARN_ON_ONCE(!signr);
2485 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2488 WARN_ON_ONCE(!signr);
2489 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2490 current->exit_code = 0;
2495 * do_freezer_trap - handle the freezer jobctl trap
2497 * Puts the task into frozen state, if only the task is not about to quit.
2498 * In this case it drops JOBCTL_TRAP_FREEZE.
2501 * Must be called with @current->sighand->siglock held,
2502 * which is always released before returning.
2504 static void do_freezer_trap(void)
2505 __releases(¤t->sighand->siglock)
2508 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2509 * let's make another loop to give it a chance to be handled.
2510 * In any case, we'll return back.
2512 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2513 JOBCTL_TRAP_FREEZE) {
2514 spin_unlock_irq(¤t->sighand->siglock);
2519 * Now we're sure that there is no pending fatal signal and no
2520 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2521 * immediately (if there is a non-fatal signal pending), and
2522 * put the task into sleep.
2524 __set_current_state(TASK_INTERRUPTIBLE);
2525 clear_thread_flag(TIF_SIGPENDING);
2526 spin_unlock_irq(¤t->sighand->siglock);
2527 cgroup_enter_frozen();
2528 freezable_schedule();
2531 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2534 * We do not check sig_kernel_stop(signr) but set this marker
2535 * unconditionally because we do not know whether debugger will
2536 * change signr. This flag has no meaning unless we are going
2537 * to stop after return from ptrace_stop(). In this case it will
2538 * be checked in do_signal_stop(), we should only stop if it was
2539 * not cleared by SIGCONT while we were sleeping. See also the
2540 * comment in dequeue_signal().
2542 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2543 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2545 /* We're back. Did the debugger cancel the sig? */
2546 signr = current->exit_code;
2550 current->exit_code = 0;
2553 * Update the siginfo structure if the signal has
2554 * changed. If the debugger wanted something
2555 * specific in the siginfo structure then it should
2556 * have updated *info via PTRACE_SETSIGINFO.
2558 if (signr != info->si_signo) {
2559 clear_siginfo(info);
2560 info->si_signo = signr;
2562 info->si_code = SI_USER;
2564 info->si_pid = task_pid_vnr(current->parent);
2565 info->si_uid = from_kuid_munged(current_user_ns(),
2566 task_uid(current->parent));
2570 /* If the (new) signal is now blocked, requeue it. */
2571 if (sigismember(¤t->blocked, signr)) {
2572 send_signal(signr, info, current, PIDTYPE_PID);
2579 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2581 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2583 case SIL_FAULT_TRAPNO:
2584 case SIL_FAULT_MCEERR:
2585 case SIL_FAULT_BNDERR:
2586 case SIL_FAULT_PKUERR:
2587 case SIL_PERF_EVENT:
2588 ksig->info.si_addr = arch_untagged_si_addr(
2589 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2601 bool get_signal(struct ksignal *ksig)
2603 struct sighand_struct *sighand = current->sighand;
2604 struct signal_struct *signal = current->signal;
2607 if (unlikely(current->task_works))
2611 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2612 * that the arch handlers don't all have to do it. If we get here
2613 * without TIF_SIGPENDING, just exit after running signal work.
2615 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2616 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2617 tracehook_notify_signal();
2618 if (!task_sigpending(current))
2622 if (unlikely(uprobe_deny_signal()))
2626 * Do this once, we can't return to user-mode if freezing() == T.
2627 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2628 * thus do not need another check after return.
2633 spin_lock_irq(&sighand->siglock);
2636 * Every stopped thread goes here after wakeup. Check to see if
2637 * we should notify the parent, prepare_signal(SIGCONT) encodes
2638 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2640 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2643 if (signal->flags & SIGNAL_CLD_CONTINUED)
2644 why = CLD_CONTINUED;
2648 signal->flags &= ~SIGNAL_CLD_MASK;
2650 spin_unlock_irq(&sighand->siglock);
2653 * Notify the parent that we're continuing. This event is
2654 * always per-process and doesn't make whole lot of sense
2655 * for ptracers, who shouldn't consume the state via
2656 * wait(2) either, but, for backward compatibility, notify
2657 * the ptracer of the group leader too unless it's gonna be
2660 read_lock(&tasklist_lock);
2661 do_notify_parent_cldstop(current, false, why);
2663 if (ptrace_reparented(current->group_leader))
2664 do_notify_parent_cldstop(current->group_leader,
2666 read_unlock(&tasklist_lock);
2671 /* Has this task already been marked for death? */
2672 if (signal_group_exit(signal)) {
2673 ksig->info.si_signo = signr = SIGKILL;
2674 sigdelset(¤t->pending.signal, SIGKILL);
2675 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2676 &sighand->action[SIGKILL - 1]);
2677 recalc_sigpending();
2682 struct k_sigaction *ka;
2684 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2688 if (unlikely(current->jobctl &
2689 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2690 if (current->jobctl & JOBCTL_TRAP_MASK) {
2692 spin_unlock_irq(&sighand->siglock);
2693 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2700 * If the task is leaving the frozen state, let's update
2701 * cgroup counters and reset the frozen bit.
2703 if (unlikely(cgroup_task_frozen(current))) {
2704 spin_unlock_irq(&sighand->siglock);
2705 cgroup_leave_frozen(false);
2710 * Signals generated by the execution of an instruction
2711 * need to be delivered before any other pending signals
2712 * so that the instruction pointer in the signal stack
2713 * frame points to the faulting instruction.
2715 signr = dequeue_synchronous_signal(&ksig->info);
2717 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2720 break; /* will return 0 */
2722 if (unlikely(current->ptrace) && signr != SIGKILL) {
2723 signr = ptrace_signal(signr, &ksig->info);
2728 ka = &sighand->action[signr-1];
2730 /* Trace actually delivered signals. */
2731 trace_signal_deliver(signr, &ksig->info, ka);
2733 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2735 if (ka->sa.sa_handler != SIG_DFL) {
2736 /* Run the handler. */
2739 if (ka->sa.sa_flags & SA_ONESHOT)
2740 ka->sa.sa_handler = SIG_DFL;
2742 break; /* will return non-zero "signr" value */
2746 * Now we are doing the default action for this signal.
2748 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2752 * Global init gets no signals it doesn't want.
2753 * Container-init gets no signals it doesn't want from same
2756 * Note that if global/container-init sees a sig_kernel_only()
2757 * signal here, the signal must have been generated internally
2758 * or must have come from an ancestor namespace. In either
2759 * case, the signal cannot be dropped.
2761 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2762 !sig_kernel_only(signr))
2765 if (sig_kernel_stop(signr)) {
2767 * The default action is to stop all threads in
2768 * the thread group. The job control signals
2769 * do nothing in an orphaned pgrp, but SIGSTOP
2770 * always works. Note that siglock needs to be
2771 * dropped during the call to is_orphaned_pgrp()
2772 * because of lock ordering with tasklist_lock.
2773 * This allows an intervening SIGCONT to be posted.
2774 * We need to check for that and bail out if necessary.
2776 if (signr != SIGSTOP) {
2777 spin_unlock_irq(&sighand->siglock);
2779 /* signals can be posted during this window */
2781 if (is_current_pgrp_orphaned())
2784 spin_lock_irq(&sighand->siglock);
2787 if (likely(do_signal_stop(ksig->info.si_signo))) {
2788 /* It released the siglock. */
2793 * We didn't actually stop, due to a race
2794 * with SIGCONT or something like that.
2800 spin_unlock_irq(&sighand->siglock);
2801 if (unlikely(cgroup_task_frozen(current)))
2802 cgroup_leave_frozen(true);
2805 * Anything else is fatal, maybe with a core dump.
2807 current->flags |= PF_SIGNALED;
2809 if (sig_kernel_coredump(signr)) {
2810 if (print_fatal_signals)
2811 print_fatal_signal(ksig->info.si_signo);
2812 proc_coredump_connector(current);
2814 * If it was able to dump core, this kills all
2815 * other threads in the group and synchronizes with
2816 * their demise. If we lost the race with another
2817 * thread getting here, it set group_exit_code
2818 * first and our do_group_exit call below will use
2819 * that value and ignore the one we pass it.
2821 do_coredump(&ksig->info);
2825 * PF_IO_WORKER threads will catch and exit on fatal signals
2826 * themselves. They have cleanup that must be performed, so
2827 * we cannot call do_exit() on their behalf.
2829 if (current->flags & PF_IO_WORKER)
2833 * Death signals, no core dump.
2835 do_group_exit(ksig->info.si_signo);
2838 spin_unlock_irq(&sighand->siglock);
2842 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2843 hide_si_addr_tag_bits(ksig);
2845 return ksig->sig > 0;
2849 * signal_delivered -
2850 * @ksig: kernel signal struct
2851 * @stepping: nonzero if debugger single-step or block-step in use
2853 * This function should be called when a signal has successfully been
2854 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2855 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2856 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2858 static void signal_delivered(struct ksignal *ksig, int stepping)
2862 /* A signal was successfully delivered, and the
2863 saved sigmask was stored on the signal frame,
2864 and will be restored by sigreturn. So we can
2865 simply clear the restore sigmask flag. */
2866 clear_restore_sigmask();
2868 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2869 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2870 sigaddset(&blocked, ksig->sig);
2871 set_current_blocked(&blocked);
2872 tracehook_signal_handler(stepping);
2875 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2878 force_sigsegv(ksig->sig);
2880 signal_delivered(ksig, stepping);
2884 * It could be that complete_signal() picked us to notify about the
2885 * group-wide signal. Other threads should be notified now to take
2886 * the shared signals in @which since we will not.
2888 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2891 struct task_struct *t;
2893 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2894 if (sigisemptyset(&retarget))
2898 while_each_thread(tsk, t) {
2899 if (t->flags & PF_EXITING)
2902 if (!has_pending_signals(&retarget, &t->blocked))
2904 /* Remove the signals this thread can handle. */
2905 sigandsets(&retarget, &retarget, &t->blocked);
2907 if (!task_sigpending(t))
2908 signal_wake_up(t, 0);
2910 if (sigisemptyset(&retarget))
2915 void exit_signals(struct task_struct *tsk)
2921 * @tsk is about to have PF_EXITING set - lock out users which
2922 * expect stable threadgroup.
2924 cgroup_threadgroup_change_begin(tsk);
2926 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2927 tsk->flags |= PF_EXITING;
2928 cgroup_threadgroup_change_end(tsk);
2932 spin_lock_irq(&tsk->sighand->siglock);
2934 * From now this task is not visible for group-wide signals,
2935 * see wants_signal(), do_signal_stop().
2937 tsk->flags |= PF_EXITING;
2939 cgroup_threadgroup_change_end(tsk);
2941 if (!task_sigpending(tsk))
2944 unblocked = tsk->blocked;
2945 signotset(&unblocked);
2946 retarget_shared_pending(tsk, &unblocked);
2948 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2949 task_participate_group_stop(tsk))
2950 group_stop = CLD_STOPPED;
2952 spin_unlock_irq(&tsk->sighand->siglock);
2955 * If group stop has completed, deliver the notification. This
2956 * should always go to the real parent of the group leader.
2958 if (unlikely(group_stop)) {
2959 read_lock(&tasklist_lock);
2960 do_notify_parent_cldstop(tsk, false, group_stop);
2961 read_unlock(&tasklist_lock);
2966 * System call entry points.
2970 * sys_restart_syscall - restart a system call
2972 SYSCALL_DEFINE0(restart_syscall)
2974 struct restart_block *restart = ¤t->restart_block;
2975 return restart->fn(restart);
2978 long do_no_restart_syscall(struct restart_block *param)
2983 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2985 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2986 sigset_t newblocked;
2987 /* A set of now blocked but previously unblocked signals. */
2988 sigandnsets(&newblocked, newset, ¤t->blocked);
2989 retarget_shared_pending(tsk, &newblocked);
2991 tsk->blocked = *newset;
2992 recalc_sigpending();
2996 * set_current_blocked - change current->blocked mask
2999 * It is wrong to change ->blocked directly, this helper should be used
3000 * to ensure the process can't miss a shared signal we are going to block.
3002 void set_current_blocked(sigset_t *newset)
3004 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3005 __set_current_blocked(newset);
3008 void __set_current_blocked(const sigset_t *newset)
3010 struct task_struct *tsk = current;
3013 * In case the signal mask hasn't changed, there is nothing we need
3014 * to do. The current->blocked shouldn't be modified by other task.
3016 if (sigequalsets(&tsk->blocked, newset))
3019 spin_lock_irq(&tsk->sighand->siglock);
3020 __set_task_blocked(tsk, newset);
3021 spin_unlock_irq(&tsk->sighand->siglock);
3025 * This is also useful for kernel threads that want to temporarily
3026 * (or permanently) block certain signals.
3028 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3029 * interface happily blocks "unblockable" signals like SIGKILL
3032 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3034 struct task_struct *tsk = current;
3037 /* Lockless, only current can change ->blocked, never from irq */
3039 *oldset = tsk->blocked;
3043 sigorsets(&newset, &tsk->blocked, set);
3046 sigandnsets(&newset, &tsk->blocked, set);
3055 __set_current_blocked(&newset);
3058 EXPORT_SYMBOL(sigprocmask);
3061 * The api helps set app-provided sigmasks.
3063 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3064 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3066 * Note that it does set_restore_sigmask() in advance, so it must be always
3067 * paired with restore_saved_sigmask_unless() before return from syscall.
3069 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3075 if (sigsetsize != sizeof(sigset_t))
3077 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3080 set_restore_sigmask();
3081 current->saved_sigmask = current->blocked;
3082 set_current_blocked(&kmask);
3087 #ifdef CONFIG_COMPAT
3088 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3095 if (sigsetsize != sizeof(compat_sigset_t))
3097 if (get_compat_sigset(&kmask, umask))
3100 set_restore_sigmask();
3101 current->saved_sigmask = current->blocked;
3102 set_current_blocked(&kmask);
3109 * sys_rt_sigprocmask - change the list of currently blocked signals
3110 * @how: whether to add, remove, or set signals
3111 * @nset: stores pending signals
3112 * @oset: previous value of signal mask if non-null
3113 * @sigsetsize: size of sigset_t type
3115 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3116 sigset_t __user *, oset, size_t, sigsetsize)
3118 sigset_t old_set, new_set;
3121 /* XXX: Don't preclude handling different sized sigset_t's. */
3122 if (sigsetsize != sizeof(sigset_t))
3125 old_set = current->blocked;
3128 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3130 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3132 error = sigprocmask(how, &new_set, NULL);
3138 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3145 #ifdef CONFIG_COMPAT
3146 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3147 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3149 sigset_t old_set = current->blocked;
3151 /* XXX: Don't preclude handling different sized sigset_t's. */
3152 if (sigsetsize != sizeof(sigset_t))
3158 if (get_compat_sigset(&new_set, nset))
3160 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3162 error = sigprocmask(how, &new_set, NULL);
3166 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3170 static void do_sigpending(sigset_t *set)
3172 spin_lock_irq(¤t->sighand->siglock);
3173 sigorsets(set, ¤t->pending.signal,
3174 ¤t->signal->shared_pending.signal);
3175 spin_unlock_irq(¤t->sighand->siglock);
3177 /* Outside the lock because only this thread touches it. */
3178 sigandsets(set, ¤t->blocked, set);
3182 * sys_rt_sigpending - examine a pending signal that has been raised
3184 * @uset: stores pending signals
3185 * @sigsetsize: size of sigset_t type or larger
3187 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3191 if (sigsetsize > sizeof(*uset))
3194 do_sigpending(&set);
3196 if (copy_to_user(uset, &set, sigsetsize))
3202 #ifdef CONFIG_COMPAT
3203 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3204 compat_size_t, sigsetsize)
3208 if (sigsetsize > sizeof(*uset))
3211 do_sigpending(&set);
3213 return put_compat_sigset(uset, &set, sigsetsize);
3217 static const struct {
3218 unsigned char limit, layout;
3220 [SIGILL] = { NSIGILL, SIL_FAULT },
3221 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3222 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3223 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3224 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3226 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3228 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3229 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3230 [SIGSYS] = { NSIGSYS, SIL_SYS },
3233 static bool known_siginfo_layout(unsigned sig, int si_code)
3235 if (si_code == SI_KERNEL)
3237 else if ((si_code > SI_USER)) {
3238 if (sig_specific_sicodes(sig)) {
3239 if (si_code <= sig_sicodes[sig].limit)
3242 else if (si_code <= NSIGPOLL)
3245 else if (si_code >= SI_DETHREAD)
3247 else if (si_code == SI_ASYNCNL)
3252 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3254 enum siginfo_layout layout = SIL_KILL;
3255 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3256 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3257 (si_code <= sig_sicodes[sig].limit)) {
3258 layout = sig_sicodes[sig].layout;
3259 /* Handle the exceptions */
3260 if ((sig == SIGBUS) &&
3261 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3262 layout = SIL_FAULT_MCEERR;
3263 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3264 layout = SIL_FAULT_BNDERR;
3266 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3267 layout = SIL_FAULT_PKUERR;
3269 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3270 layout = SIL_PERF_EVENT;
3271 #ifdef __ARCH_SI_TRAPNO
3272 else if (layout == SIL_FAULT)
3273 layout = SIL_FAULT_TRAPNO;
3276 else if (si_code <= NSIGPOLL)
3279 if (si_code == SI_TIMER)
3281 else if (si_code == SI_SIGIO)
3283 else if (si_code < 0)
3289 static inline char __user *si_expansion(const siginfo_t __user *info)
3291 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3294 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3296 char __user *expansion = si_expansion(to);
3297 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3299 if (clear_user(expansion, SI_EXPANSION_SIZE))
3304 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3305 const siginfo_t __user *from)
3307 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3308 char __user *expansion = si_expansion(from);
3309 char buf[SI_EXPANSION_SIZE];
3312 * An unknown si_code might need more than
3313 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3314 * extra bytes are 0. This guarantees copy_siginfo_to_user
3315 * will return this data to userspace exactly.
3317 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3319 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3327 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3328 const siginfo_t __user *from)
3330 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3332 to->si_signo = signo;
3333 return post_copy_siginfo_from_user(to, from);
3336 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3338 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3340 return post_copy_siginfo_from_user(to, from);
3343 #ifdef CONFIG_COMPAT
3345 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3346 * @to: compat siginfo destination
3347 * @from: kernel siginfo source
3349 * Note: This function does not work properly for the SIGCHLD on x32, but
3350 * fortunately it doesn't have to. The only valid callers for this function are
3351 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3352 * The latter does not care because SIGCHLD will never cause a coredump.
3354 void copy_siginfo_to_external32(struct compat_siginfo *to,
3355 const struct kernel_siginfo *from)
3357 memset(to, 0, sizeof(*to));
3359 to->si_signo = from->si_signo;
3360 to->si_errno = from->si_errno;
3361 to->si_code = from->si_code;
3362 switch(siginfo_layout(from->si_signo, from->si_code)) {
3364 to->si_pid = from->si_pid;
3365 to->si_uid = from->si_uid;
3368 to->si_tid = from->si_tid;
3369 to->si_overrun = from->si_overrun;
3370 to->si_int = from->si_int;
3373 to->si_band = from->si_band;
3374 to->si_fd = from->si_fd;
3377 to->si_addr = ptr_to_compat(from->si_addr);
3379 case SIL_FAULT_TRAPNO:
3380 to->si_addr = ptr_to_compat(from->si_addr);
3381 to->si_trapno = from->si_trapno;
3383 case SIL_FAULT_MCEERR:
3384 to->si_addr = ptr_to_compat(from->si_addr);
3385 to->si_addr_lsb = from->si_addr_lsb;
3387 case SIL_FAULT_BNDERR:
3388 to->si_addr = ptr_to_compat(from->si_addr);
3389 to->si_lower = ptr_to_compat(from->si_lower);
3390 to->si_upper = ptr_to_compat(from->si_upper);
3392 case SIL_FAULT_PKUERR:
3393 to->si_addr = ptr_to_compat(from->si_addr);
3394 to->si_pkey = from->si_pkey;
3396 case SIL_PERF_EVENT:
3397 to->si_addr = ptr_to_compat(from->si_addr);
3398 to->si_perf_data = from->si_perf_data;
3399 to->si_perf_type = from->si_perf_type;
3402 to->si_pid = from->si_pid;
3403 to->si_uid = from->si_uid;
3404 to->si_status = from->si_status;
3405 to->si_utime = from->si_utime;
3406 to->si_stime = from->si_stime;
3409 to->si_pid = from->si_pid;
3410 to->si_uid = from->si_uid;
3411 to->si_int = from->si_int;
3414 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3415 to->si_syscall = from->si_syscall;
3416 to->si_arch = from->si_arch;
3421 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3422 const struct kernel_siginfo *from)
3424 struct compat_siginfo new;
3426 copy_siginfo_to_external32(&new, from);
3427 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3432 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3433 const struct compat_siginfo *from)
3436 to->si_signo = from->si_signo;
3437 to->si_errno = from->si_errno;
3438 to->si_code = from->si_code;
3439 switch(siginfo_layout(from->si_signo, from->si_code)) {
3441 to->si_pid = from->si_pid;
3442 to->si_uid = from->si_uid;
3445 to->si_tid = from->si_tid;
3446 to->si_overrun = from->si_overrun;
3447 to->si_int = from->si_int;
3450 to->si_band = from->si_band;
3451 to->si_fd = from->si_fd;
3454 to->si_addr = compat_ptr(from->si_addr);
3456 case SIL_FAULT_TRAPNO:
3457 to->si_addr = compat_ptr(from->si_addr);
3458 to->si_trapno = from->si_trapno;
3460 case SIL_FAULT_MCEERR:
3461 to->si_addr = compat_ptr(from->si_addr);
3462 to->si_addr_lsb = from->si_addr_lsb;
3464 case SIL_FAULT_BNDERR:
3465 to->si_addr = compat_ptr(from->si_addr);
3466 to->si_lower = compat_ptr(from->si_lower);
3467 to->si_upper = compat_ptr(from->si_upper);
3469 case SIL_FAULT_PKUERR:
3470 to->si_addr = compat_ptr(from->si_addr);
3471 to->si_pkey = from->si_pkey;
3473 case SIL_PERF_EVENT:
3474 to->si_addr = compat_ptr(from->si_addr);
3475 to->si_perf_data = from->si_perf_data;
3476 to->si_perf_type = from->si_perf_type;
3479 to->si_pid = from->si_pid;
3480 to->si_uid = from->si_uid;
3481 to->si_status = from->si_status;
3482 #ifdef CONFIG_X86_X32_ABI
3483 if (in_x32_syscall()) {
3484 to->si_utime = from->_sifields._sigchld_x32._utime;
3485 to->si_stime = from->_sifields._sigchld_x32._stime;
3489 to->si_utime = from->si_utime;
3490 to->si_stime = from->si_stime;
3494 to->si_pid = from->si_pid;
3495 to->si_uid = from->si_uid;
3496 to->si_int = from->si_int;
3499 to->si_call_addr = compat_ptr(from->si_call_addr);
3500 to->si_syscall = from->si_syscall;
3501 to->si_arch = from->si_arch;
3507 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3508 const struct compat_siginfo __user *ufrom)
3510 struct compat_siginfo from;
3512 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3515 from.si_signo = signo;
3516 return post_copy_siginfo_from_user32(to, &from);
3519 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3520 const struct compat_siginfo __user *ufrom)
3522 struct compat_siginfo from;
3524 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3527 return post_copy_siginfo_from_user32(to, &from);
3529 #endif /* CONFIG_COMPAT */
3532 * do_sigtimedwait - wait for queued signals specified in @which
3533 * @which: queued signals to wait for
3534 * @info: if non-null, the signal's siginfo is returned here
3535 * @ts: upper bound on process time suspension
3537 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3538 const struct timespec64 *ts)
3540 ktime_t *to = NULL, timeout = KTIME_MAX;
3541 struct task_struct *tsk = current;
3542 sigset_t mask = *which;
3546 if (!timespec64_valid(ts))
3548 timeout = timespec64_to_ktime(*ts);
3553 * Invert the set of allowed signals to get those we want to block.
3555 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3558 spin_lock_irq(&tsk->sighand->siglock);
3559 sig = dequeue_signal(tsk, &mask, info);
3560 if (!sig && timeout) {
3562 * None ready, temporarily unblock those we're interested
3563 * while we are sleeping in so that we'll be awakened when
3564 * they arrive. Unblocking is always fine, we can avoid
3565 * set_current_blocked().
3567 tsk->real_blocked = tsk->blocked;
3568 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3569 recalc_sigpending();
3570 spin_unlock_irq(&tsk->sighand->siglock);
3572 __set_current_state(TASK_INTERRUPTIBLE);
3573 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3575 spin_lock_irq(&tsk->sighand->siglock);
3576 __set_task_blocked(tsk, &tsk->real_blocked);
3577 sigemptyset(&tsk->real_blocked);
3578 sig = dequeue_signal(tsk, &mask, info);
3580 spin_unlock_irq(&tsk->sighand->siglock);
3584 return ret ? -EINTR : -EAGAIN;
3588 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3590 * @uthese: queued signals to wait for
3591 * @uinfo: if non-null, the signal's siginfo is returned here
3592 * @uts: upper bound on process time suspension
3593 * @sigsetsize: size of sigset_t type
3595 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3596 siginfo_t __user *, uinfo,
3597 const struct __kernel_timespec __user *, uts,
3601 struct timespec64 ts;
3602 kernel_siginfo_t info;
3605 /* XXX: Don't preclude handling different sized sigset_t's. */
3606 if (sigsetsize != sizeof(sigset_t))
3609 if (copy_from_user(&these, uthese, sizeof(these)))
3613 if (get_timespec64(&ts, uts))
3617 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3619 if (ret > 0 && uinfo) {
3620 if (copy_siginfo_to_user(uinfo, &info))
3627 #ifdef CONFIG_COMPAT_32BIT_TIME
3628 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3629 siginfo_t __user *, uinfo,
3630 const struct old_timespec32 __user *, uts,
3634 struct timespec64 ts;
3635 kernel_siginfo_t info;
3638 if (sigsetsize != sizeof(sigset_t))
3641 if (copy_from_user(&these, uthese, sizeof(these)))
3645 if (get_old_timespec32(&ts, uts))
3649 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3651 if (ret > 0 && uinfo) {
3652 if (copy_siginfo_to_user(uinfo, &info))
3660 #ifdef CONFIG_COMPAT
3661 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3662 struct compat_siginfo __user *, uinfo,
3663 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3666 struct timespec64 t;
3667 kernel_siginfo_t info;
3670 if (sigsetsize != sizeof(sigset_t))
3673 if (get_compat_sigset(&s, uthese))
3677 if (get_timespec64(&t, uts))
3681 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3683 if (ret > 0 && uinfo) {
3684 if (copy_siginfo_to_user32(uinfo, &info))
3691 #ifdef CONFIG_COMPAT_32BIT_TIME
3692 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3693 struct compat_siginfo __user *, uinfo,
3694 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3697 struct timespec64 t;
3698 kernel_siginfo_t info;
3701 if (sigsetsize != sizeof(sigset_t))
3704 if (get_compat_sigset(&s, uthese))
3708 if (get_old_timespec32(&t, uts))
3712 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3714 if (ret > 0 && uinfo) {
3715 if (copy_siginfo_to_user32(uinfo, &info))
3724 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3726 clear_siginfo(info);
3727 info->si_signo = sig;
3729 info->si_code = SI_USER;
3730 info->si_pid = task_tgid_vnr(current);
3731 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3735 * sys_kill - send a signal to a process
3736 * @pid: the PID of the process
3737 * @sig: signal to be sent
3739 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3741 struct kernel_siginfo info;
3743 prepare_kill_siginfo(sig, &info);
3745 return kill_something_info(sig, &info, pid);
3749 * Verify that the signaler and signalee either are in the same pid namespace
3750 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3753 static bool access_pidfd_pidns(struct pid *pid)
3755 struct pid_namespace *active = task_active_pid_ns(current);
3756 struct pid_namespace *p = ns_of_pid(pid);
3769 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3770 siginfo_t __user *info)
3772 #ifdef CONFIG_COMPAT
3774 * Avoid hooking up compat syscalls and instead handle necessary
3775 * conversions here. Note, this is a stop-gap measure and should not be
3776 * considered a generic solution.
3778 if (in_compat_syscall())
3779 return copy_siginfo_from_user32(
3780 kinfo, (struct compat_siginfo __user *)info);
3782 return copy_siginfo_from_user(kinfo, info);
3785 static struct pid *pidfd_to_pid(const struct file *file)
3789 pid = pidfd_pid(file);
3793 return tgid_pidfd_to_pid(file);
3797 * sys_pidfd_send_signal - Signal a process through a pidfd
3798 * @pidfd: file descriptor of the process
3799 * @sig: signal to send
3800 * @info: signal info
3801 * @flags: future flags
3803 * The syscall currently only signals via PIDTYPE_PID which covers
3804 * kill(<positive-pid>, <signal>. It does not signal threads or process
3806 * In order to extend the syscall to threads and process groups the @flags
3807 * argument should be used. In essence, the @flags argument will determine
3808 * what is signaled and not the file descriptor itself. Put in other words,
3809 * grouping is a property of the flags argument not a property of the file
3812 * Return: 0 on success, negative errno on failure
3814 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3815 siginfo_t __user *, info, unsigned int, flags)
3820 kernel_siginfo_t kinfo;
3822 /* Enforce flags be set to 0 until we add an extension. */
3830 /* Is this a pidfd? */
3831 pid = pidfd_to_pid(f.file);
3838 if (!access_pidfd_pidns(pid))
3842 ret = copy_siginfo_from_user_any(&kinfo, info);
3847 if (unlikely(sig != kinfo.si_signo))
3850 /* Only allow sending arbitrary signals to yourself. */
3852 if ((task_pid(current) != pid) &&
3853 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3856 prepare_kill_siginfo(sig, &kinfo);
3859 ret = kill_pid_info(sig, &kinfo, pid);
3867 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3869 struct task_struct *p;
3873 p = find_task_by_vpid(pid);
3874 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3875 error = check_kill_permission(sig, info, p);
3877 * The null signal is a permissions and process existence
3878 * probe. No signal is actually delivered.
3880 if (!error && sig) {
3881 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3883 * If lock_task_sighand() failed we pretend the task
3884 * dies after receiving the signal. The window is tiny,
3885 * and the signal is private anyway.
3887 if (unlikely(error == -ESRCH))
3896 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3898 struct kernel_siginfo info;
3900 clear_siginfo(&info);
3901 info.si_signo = sig;
3903 info.si_code = SI_TKILL;
3904 info.si_pid = task_tgid_vnr(current);
3905 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3907 return do_send_specific(tgid, pid, sig, &info);
3911 * sys_tgkill - send signal to one specific thread
3912 * @tgid: the thread group ID of the thread
3913 * @pid: the PID of the thread
3914 * @sig: signal to be sent
3916 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3917 * exists but it's not belonging to the target process anymore. This
3918 * method solves the problem of threads exiting and PIDs getting reused.
3920 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3922 /* This is only valid for single tasks */
3923 if (pid <= 0 || tgid <= 0)
3926 return do_tkill(tgid, pid, sig);
3930 * sys_tkill - send signal to one specific task
3931 * @pid: the PID of the task
3932 * @sig: signal to be sent
3934 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3936 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3938 /* This is only valid for single tasks */
3942 return do_tkill(0, pid, sig);
3945 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3947 /* Not even root can pretend to send signals from the kernel.
3948 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3950 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3951 (task_pid_vnr(current) != pid))
3954 /* POSIX.1b doesn't mention process groups. */
3955 return kill_proc_info(sig, info, pid);
3959 * sys_rt_sigqueueinfo - send signal information to a signal
3960 * @pid: the PID of the thread
3961 * @sig: signal to be sent
3962 * @uinfo: signal info to be sent
3964 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3965 siginfo_t __user *, uinfo)
3967 kernel_siginfo_t info;
3968 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3971 return do_rt_sigqueueinfo(pid, sig, &info);
3974 #ifdef CONFIG_COMPAT
3975 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3978 struct compat_siginfo __user *, uinfo)
3980 kernel_siginfo_t info;
3981 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3984 return do_rt_sigqueueinfo(pid, sig, &info);
3988 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3990 /* This is only valid for single tasks */
3991 if (pid <= 0 || tgid <= 0)
3994 /* Not even root can pretend to send signals from the kernel.
3995 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3997 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3998 (task_pid_vnr(current) != pid))
4001 return do_send_specific(tgid, pid, sig, info);
4004 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4005 siginfo_t __user *, uinfo)
4007 kernel_siginfo_t info;
4008 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4011 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4014 #ifdef CONFIG_COMPAT
4015 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4019 struct compat_siginfo __user *, uinfo)
4021 kernel_siginfo_t info;
4022 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4025 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4030 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4032 void kernel_sigaction(int sig, __sighandler_t action)
4034 spin_lock_irq(¤t->sighand->siglock);
4035 current->sighand->action[sig - 1].sa.sa_handler = action;
4036 if (action == SIG_IGN) {
4040 sigaddset(&mask, sig);
4042 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4043 flush_sigqueue_mask(&mask, ¤t->pending);
4044 recalc_sigpending();
4046 spin_unlock_irq(¤t->sighand->siglock);
4048 EXPORT_SYMBOL(kernel_sigaction);
4050 void __weak sigaction_compat_abi(struct k_sigaction *act,
4051 struct k_sigaction *oact)
4055 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4057 struct task_struct *p = current, *t;
4058 struct k_sigaction *k;
4061 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4064 k = &p->sighand->action[sig-1];
4066 spin_lock_irq(&p->sighand->siglock);
4071 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4072 * e.g. by having an architecture use the bit in their uapi.
4074 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4077 * Clear unknown flag bits in order to allow userspace to detect missing
4078 * support for flag bits and to allow the kernel to use non-uapi bits
4082 act->sa.sa_flags &= UAPI_SA_FLAGS;
4084 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4086 sigaction_compat_abi(act, oact);
4089 sigdelsetmask(&act->sa.sa_mask,
4090 sigmask(SIGKILL) | sigmask(SIGSTOP));
4094 * "Setting a signal action to SIG_IGN for a signal that is
4095 * pending shall cause the pending signal to be discarded,
4096 * whether or not it is blocked."
4098 * "Setting a signal action to SIG_DFL for a signal that is
4099 * pending and whose default action is to ignore the signal
4100 * (for example, SIGCHLD), shall cause the pending signal to
4101 * be discarded, whether or not it is blocked"
4103 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4105 sigaddset(&mask, sig);
4106 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4107 for_each_thread(p, t)
4108 flush_sigqueue_mask(&mask, &t->pending);
4112 spin_unlock_irq(&p->sighand->siglock);
4117 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4120 struct task_struct *t = current;
4123 memset(oss, 0, sizeof(stack_t));
4124 oss->ss_sp = (void __user *) t->sas_ss_sp;
4125 oss->ss_size = t->sas_ss_size;
4126 oss->ss_flags = sas_ss_flags(sp) |
4127 (current->sas_ss_flags & SS_FLAG_BITS);
4131 void __user *ss_sp = ss->ss_sp;
4132 size_t ss_size = ss->ss_size;
4133 unsigned ss_flags = ss->ss_flags;
4136 if (unlikely(on_sig_stack(sp)))
4139 ss_mode = ss_flags & ~SS_FLAG_BITS;
4140 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4144 if (ss_mode == SS_DISABLE) {
4148 if (unlikely(ss_size < min_ss_size))
4152 t->sas_ss_sp = (unsigned long) ss_sp;
4153 t->sas_ss_size = ss_size;
4154 t->sas_ss_flags = ss_flags;
4159 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4163 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4165 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4166 current_user_stack_pointer(),
4168 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4173 int restore_altstack(const stack_t __user *uss)
4176 if (copy_from_user(&new, uss, sizeof(stack_t)))
4178 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4180 /* squash all but EFAULT for now */
4184 int __save_altstack(stack_t __user *uss, unsigned long sp)
4186 struct task_struct *t = current;
4187 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4188 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4189 __put_user(t->sas_ss_size, &uss->ss_size);
4192 if (t->sas_ss_flags & SS_AUTODISARM)
4197 #ifdef CONFIG_COMPAT
4198 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4199 compat_stack_t __user *uoss_ptr)
4205 compat_stack_t uss32;
4206 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4208 uss.ss_sp = compat_ptr(uss32.ss_sp);
4209 uss.ss_flags = uss32.ss_flags;
4210 uss.ss_size = uss32.ss_size;
4212 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4213 compat_user_stack_pointer(),
4214 COMPAT_MINSIGSTKSZ);
4215 if (ret >= 0 && uoss_ptr) {
4217 memset(&old, 0, sizeof(old));
4218 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4219 old.ss_flags = uoss.ss_flags;
4220 old.ss_size = uoss.ss_size;
4221 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4227 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4228 const compat_stack_t __user *, uss_ptr,
4229 compat_stack_t __user *, uoss_ptr)
4231 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4234 int compat_restore_altstack(const compat_stack_t __user *uss)
4236 int err = do_compat_sigaltstack(uss, NULL);
4237 /* squash all but -EFAULT for now */
4238 return err == -EFAULT ? err : 0;
4241 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4244 struct task_struct *t = current;
4245 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4247 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4248 __put_user(t->sas_ss_size, &uss->ss_size);
4251 if (t->sas_ss_flags & SS_AUTODISARM)
4257 #ifdef __ARCH_WANT_SYS_SIGPENDING
4260 * sys_sigpending - examine pending signals
4261 * @uset: where mask of pending signal is returned
4263 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4267 if (sizeof(old_sigset_t) > sizeof(*uset))
4270 do_sigpending(&set);
4272 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4278 #ifdef CONFIG_COMPAT
4279 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4283 do_sigpending(&set);
4285 return put_user(set.sig[0], set32);
4291 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4293 * sys_sigprocmask - examine and change blocked signals
4294 * @how: whether to add, remove, or set signals
4295 * @nset: signals to add or remove (if non-null)
4296 * @oset: previous value of signal mask if non-null
4298 * Some platforms have their own version with special arguments;
4299 * others support only sys_rt_sigprocmask.
4302 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4303 old_sigset_t __user *, oset)
4305 old_sigset_t old_set, new_set;
4306 sigset_t new_blocked;
4308 old_set = current->blocked.sig[0];
4311 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4314 new_blocked = current->blocked;
4318 sigaddsetmask(&new_blocked, new_set);
4321 sigdelsetmask(&new_blocked, new_set);
4324 new_blocked.sig[0] = new_set;
4330 set_current_blocked(&new_blocked);
4334 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4340 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4342 #ifndef CONFIG_ODD_RT_SIGACTION
4344 * sys_rt_sigaction - alter an action taken by a process
4345 * @sig: signal to be sent
4346 * @act: new sigaction
4347 * @oact: used to save the previous sigaction
4348 * @sigsetsize: size of sigset_t type
4350 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4351 const struct sigaction __user *, act,
4352 struct sigaction __user *, oact,
4355 struct k_sigaction new_sa, old_sa;
4358 /* XXX: Don't preclude handling different sized sigset_t's. */
4359 if (sigsetsize != sizeof(sigset_t))
4362 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4365 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4369 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4374 #ifdef CONFIG_COMPAT
4375 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4376 const struct compat_sigaction __user *, act,
4377 struct compat_sigaction __user *, oact,
4378 compat_size_t, sigsetsize)
4380 struct k_sigaction new_ka, old_ka;
4381 #ifdef __ARCH_HAS_SA_RESTORER
4382 compat_uptr_t restorer;
4386 /* XXX: Don't preclude handling different sized sigset_t's. */
4387 if (sigsetsize != sizeof(compat_sigset_t))
4391 compat_uptr_t handler;
4392 ret = get_user(handler, &act->sa_handler);
4393 new_ka.sa.sa_handler = compat_ptr(handler);
4394 #ifdef __ARCH_HAS_SA_RESTORER
4395 ret |= get_user(restorer, &act->sa_restorer);
4396 new_ka.sa.sa_restorer = compat_ptr(restorer);
4398 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4399 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4404 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4406 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4408 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4409 sizeof(oact->sa_mask));
4410 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4411 #ifdef __ARCH_HAS_SA_RESTORER
4412 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4413 &oact->sa_restorer);
4419 #endif /* !CONFIG_ODD_RT_SIGACTION */
4421 #ifdef CONFIG_OLD_SIGACTION
4422 SYSCALL_DEFINE3(sigaction, int, sig,
4423 const struct old_sigaction __user *, act,
4424 struct old_sigaction __user *, oact)
4426 struct k_sigaction new_ka, old_ka;
4431 if (!access_ok(act, sizeof(*act)) ||
4432 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4433 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4434 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4435 __get_user(mask, &act->sa_mask))
4437 #ifdef __ARCH_HAS_KA_RESTORER
4438 new_ka.ka_restorer = NULL;
4440 siginitset(&new_ka.sa.sa_mask, mask);
4443 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4446 if (!access_ok(oact, sizeof(*oact)) ||
4447 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4448 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4449 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4450 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4457 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4458 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4459 const struct compat_old_sigaction __user *, act,
4460 struct compat_old_sigaction __user *, oact)
4462 struct k_sigaction new_ka, old_ka;
4464 compat_old_sigset_t mask;
4465 compat_uptr_t handler, restorer;
4468 if (!access_ok(act, sizeof(*act)) ||
4469 __get_user(handler, &act->sa_handler) ||
4470 __get_user(restorer, &act->sa_restorer) ||
4471 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4472 __get_user(mask, &act->sa_mask))
4475 #ifdef __ARCH_HAS_KA_RESTORER
4476 new_ka.ka_restorer = NULL;
4478 new_ka.sa.sa_handler = compat_ptr(handler);
4479 new_ka.sa.sa_restorer = compat_ptr(restorer);
4480 siginitset(&new_ka.sa.sa_mask, mask);
4483 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4486 if (!access_ok(oact, sizeof(*oact)) ||
4487 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4488 &oact->sa_handler) ||
4489 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4490 &oact->sa_restorer) ||
4491 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4492 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4499 #ifdef CONFIG_SGETMASK_SYSCALL
4502 * For backwards compatibility. Functionality superseded by sigprocmask.
4504 SYSCALL_DEFINE0(sgetmask)
4507 return current->blocked.sig[0];
4510 SYSCALL_DEFINE1(ssetmask, int, newmask)
4512 int old = current->blocked.sig[0];
4515 siginitset(&newset, newmask);
4516 set_current_blocked(&newset);
4520 #endif /* CONFIG_SGETMASK_SYSCALL */
4522 #ifdef __ARCH_WANT_SYS_SIGNAL
4524 * For backwards compatibility. Functionality superseded by sigaction.
4526 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4528 struct k_sigaction new_sa, old_sa;
4531 new_sa.sa.sa_handler = handler;
4532 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4533 sigemptyset(&new_sa.sa.sa_mask);
4535 ret = do_sigaction(sig, &new_sa, &old_sa);
4537 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4539 #endif /* __ARCH_WANT_SYS_SIGNAL */
4541 #ifdef __ARCH_WANT_SYS_PAUSE
4543 SYSCALL_DEFINE0(pause)
4545 while (!signal_pending(current)) {
4546 __set_current_state(TASK_INTERRUPTIBLE);
4549 return -ERESTARTNOHAND;
4554 static int sigsuspend(sigset_t *set)
4556 current->saved_sigmask = current->blocked;
4557 set_current_blocked(set);
4559 while (!signal_pending(current)) {
4560 __set_current_state(TASK_INTERRUPTIBLE);
4563 set_restore_sigmask();
4564 return -ERESTARTNOHAND;
4568 * sys_rt_sigsuspend - replace the signal mask for a value with the
4569 * @unewset value until a signal is received
4570 * @unewset: new signal mask value
4571 * @sigsetsize: size of sigset_t type
4573 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4577 /* XXX: Don't preclude handling different sized sigset_t's. */
4578 if (sigsetsize != sizeof(sigset_t))
4581 if (copy_from_user(&newset, unewset, sizeof(newset)))
4583 return sigsuspend(&newset);
4586 #ifdef CONFIG_COMPAT
4587 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4591 /* XXX: Don't preclude handling different sized sigset_t's. */
4592 if (sigsetsize != sizeof(sigset_t))
4595 if (get_compat_sigset(&newset, unewset))
4597 return sigsuspend(&newset);
4601 #ifdef CONFIG_OLD_SIGSUSPEND
4602 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4605 siginitset(&blocked, mask);
4606 return sigsuspend(&blocked);
4609 #ifdef CONFIG_OLD_SIGSUSPEND3
4610 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4613 siginitset(&blocked, mask);
4614 return sigsuspend(&blocked);
4618 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4623 static inline void siginfo_buildtime_checks(void)
4625 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4627 /* Verify the offsets in the two siginfos match */
4628 #define CHECK_OFFSET(field) \
4629 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4632 CHECK_OFFSET(si_pid);
4633 CHECK_OFFSET(si_uid);
4636 CHECK_OFFSET(si_tid);
4637 CHECK_OFFSET(si_overrun);
4638 CHECK_OFFSET(si_value);
4641 CHECK_OFFSET(si_pid);
4642 CHECK_OFFSET(si_uid);
4643 CHECK_OFFSET(si_value);
4646 CHECK_OFFSET(si_pid);
4647 CHECK_OFFSET(si_uid);
4648 CHECK_OFFSET(si_status);
4649 CHECK_OFFSET(si_utime);
4650 CHECK_OFFSET(si_stime);
4653 CHECK_OFFSET(si_addr);
4654 CHECK_OFFSET(si_trapno);
4655 CHECK_OFFSET(si_addr_lsb);
4656 CHECK_OFFSET(si_lower);
4657 CHECK_OFFSET(si_upper);
4658 CHECK_OFFSET(si_pkey);
4659 CHECK_OFFSET(si_perf_data);
4660 CHECK_OFFSET(si_perf_type);
4663 CHECK_OFFSET(si_band);
4664 CHECK_OFFSET(si_fd);
4667 CHECK_OFFSET(si_call_addr);
4668 CHECK_OFFSET(si_syscall);
4669 CHECK_OFFSET(si_arch);
4673 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4674 offsetof(struct siginfo, si_addr));
4675 if (sizeof(int) == sizeof(void __user *)) {
4676 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4677 sizeof(void __user *));
4679 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4680 sizeof_field(struct siginfo, si_uid)) !=
4681 sizeof(void __user *));
4682 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4683 offsetof(struct siginfo, si_uid));
4685 #ifdef CONFIG_COMPAT
4686 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4687 offsetof(struct compat_siginfo, si_addr));
4688 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4689 sizeof(compat_uptr_t));
4690 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4691 sizeof_field(struct siginfo, si_pid));
4695 void __init signals_init(void)
4697 siginfo_buildtime_checks();
4699 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4702 #ifdef CONFIG_KGDB_KDB
4703 #include <linux/kdb.h>
4705 * kdb_send_sig - Allows kdb to send signals without exposing
4706 * signal internals. This function checks if the required locks are
4707 * available before calling the main signal code, to avoid kdb
4710 void kdb_send_sig(struct task_struct *t, int sig)
4712 static struct task_struct *kdb_prev_t;
4714 if (!spin_trylock(&t->sighand->siglock)) {
4715 kdb_printf("Can't do kill command now.\n"
4716 "The sigmask lock is held somewhere else in "
4717 "kernel, try again later\n");
4720 new_t = kdb_prev_t != t;
4722 if (t->state != TASK_RUNNING && new_t) {
4723 spin_unlock(&t->sighand->siglock);
4724 kdb_printf("Process is not RUNNING, sending a signal from "
4725 "kdb risks deadlock\n"
4726 "on the run queue locks. "
4727 "The signal has _not_ been sent.\n"
4728 "Reissue the kill command if you want to risk "
4732 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4733 spin_unlock(&t->sighand->siglock);
4735 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4738 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4740 #endif /* CONFIG_KGDB_KDB */