X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=kernel%2Fsignal.c;h=2a06f2441805e268bab54af2246246dbb6084c1a;hb=f8c5b5c06f63fe9aaebefbf9f0b79909066b1b6c;hp=afa4f781f924999086c3fe2f34530e1ee84b6496;hpb=d12dbbfe948c89156ad1b0fe7c808ba4d6f00bc2;p=linux-2.6-microblaze.git diff --git a/kernel/signal.c b/kernel/signal.c index afa4f781f924..2a06f2441805 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -39,11 +39,19 @@ static struct kmem_cache *sigqueue_cachep; +static int __sig_ignored(struct task_struct *t, int sig) +{ + void __user *handler; + + /* Is it explicitly or implicitly ignored? */ + + handler = t->sighand->action[sig - 1].sa.sa_handler; + return handler == SIG_IGN || + (handler == SIG_DFL && sig_kernel_ignore(sig)); +} static int sig_ignored(struct task_struct *t, int sig) { - void __user * handler; - /* * Tracers always want to know about signals.. */ @@ -58,10 +66,7 @@ static int sig_ignored(struct task_struct *t, int sig) if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; - /* Is it explicitly or implicitly ignored? */ - handler = t->sighand->action[sig-1].sa.sa_handler; - return handler == SIG_IGN || - (handler == SIG_DFL && sig_kernel_ignore(sig)); + return __sig_ignored(t, sig); } /* @@ -220,7 +225,7 @@ void flush_signals(struct task_struct *t) unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); - clear_tsk_thread_flag(t,TIF_SIGPENDING); + clear_tsk_thread_flag(t, TIF_SIGPENDING); flush_sigqueue(&t->pending); flush_sigqueue(&t->signal->shared_pending); spin_unlock_irqrestore(&t->sighand->siglock, flags); @@ -372,7 +377,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { - int signr = 0; + int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them @@ -405,8 +410,12 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) } } } + recalc_sigpending(); - if (signr && unlikely(sig_kernel_stop(signr))) { + if (!signr) + return 0; + + if (unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending @@ -422,9 +431,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; } - if (signr && - ((info->si_code & __SI_MASK) == __SI_TIMER) && - info->si_sys_private){ + if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave @@ -456,15 +463,15 @@ void signal_wake_up(struct task_struct *t, int resume) set_tsk_thread_flag(t, TIF_SIGPENDING); /* - * For SIGKILL, we want to wake it up in the stopped/traced case. - * We don't check t->state here because there is a race with it + * For SIGKILL, we want to wake it up in the stopped/traced/killable + * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ mask = TASK_INTERRUPTIBLE; if (resume) - mask |= TASK_STOPPED | TASK_TRACED; + mask |= TASK_WAKEKILL; if (!wake_up_state(t, mask)) kick_process(t); } @@ -526,22 +533,23 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { - int error = -EINVAL; + int error; + if (!valid_signal(sig)) - return error; + return -EINVAL; - if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) { - error = audit_signal_info(sig, t); /* Let audit system see the signal */ - if (error) - return error; - error = -EPERM; - if (((sig != SIGCONT) || - (task_session_nr(current) != task_session_nr(t))) - && (current->euid ^ t->suid) && (current->euid ^ t->uid) - && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) + if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) + return 0; + + error = audit_signal_info(sig, t); /* Let audit system see the signal */ + if (error) return error; - } + + if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t))) + && (current->euid ^ t->suid) && (current->euid ^ t->uid) + && (current->uid ^ t->suid) && (current->uid ^ t->uid) + && !capable(CAP_KILL)) + return -EPERM; return security_task_kill(t, info, sig, 0); } @@ -558,9 +566,10 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why); */ static void handle_stop_signal(int sig, struct task_struct *p) { + struct signal_struct *signal = p->signal; struct task_struct *t; - if (p->signal->flags & SIGNAL_GROUP_EXIT) + if (signal->flags & SIGNAL_GROUP_EXIT) /* * The process is in the middle of dying already. */ @@ -570,42 +579,22 @@ static void handle_stop_signal(int sig, struct task_struct *p) /* * This is a stop signal. Remove SIGCONT from all queues. */ - rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); + rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); - t = next_thread(t); - } while (t != p); + } while_each_thread(p, t); } else if (sig == SIGCONT) { + unsigned int why; /* * Remove all stop signals from all queues, * and wake all threads. */ - if (unlikely(p->signal->group_stop_count > 0)) { - /* - * There was a group stop in progress. We'll - * pretend it finished before we got here. We are - * obliged to report it to the parent: if the - * SIGSTOP happened "after" this SIGCONT, then it - * would have cleared this pending SIGCONT. If it - * happened "before" this SIGCONT, then the parent - * got the SIGCHLD about the stop finishing before - * the continue happened. We do the notification - * now, and it's as if the stop had finished and - * the SIGCHLD was pending on entry to this kill. - */ - p->signal->group_stop_count = 0; - p->signal->flags = SIGNAL_STOP_CONTINUED; - spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, CLD_STOPPED); - spin_lock(&p->sighand->siglock); - } - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); + rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { unsigned int state; rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - /* * If there is a handler for SIGCONT, we must make * sure that no thread returns to user mode before @@ -615,53 +604,71 @@ static void handle_stop_signal(int sig, struct task_struct *p) * running the handler. With the TIF_SIGPENDING * flag set, the thread will pause and acquire the * siglock that we hold now and until we've queued - * the pending signal. + * the pending signal. * * Wake up the stopped thread _after_ setting * TIF_SIGPENDING */ - state = TASK_STOPPED; + state = __TASK_STOPPED; if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { set_tsk_thread_flag(t, TIF_SIGPENDING); state |= TASK_INTERRUPTIBLE; } wake_up_state(t, state); + } while_each_thread(p, t); - t = next_thread(t); - } while (t != p); - - if (p->signal->flags & SIGNAL_STOP_STOPPED) { - /* - * We were in fact stopped, and are now continued. - * Notify the parent with CLD_CONTINUED. - */ - p->signal->flags = SIGNAL_STOP_CONTINUED; - p->signal->group_exit_code = 0; - spin_unlock(&p->sighand->siglock); - do_notify_parent_cldstop(p, CLD_CONTINUED); - spin_lock(&p->sighand->siglock); + /* + * Notify the parent with CLD_CONTINUED if we were stopped. + * + * If we were in the middle of a group stop, we pretend it + * was already finished, and then continued. Since SIGCHLD + * doesn't queue we report only CLD_STOPPED, as if the next + * CLD_CONTINUED was dropped. + */ + why = 0; + if (signal->flags & SIGNAL_STOP_STOPPED) + why |= SIGNAL_CLD_CONTINUED; + else if (signal->group_stop_count) + why |= SIGNAL_CLD_STOPPED; + + if (why) { + signal->flags = why | SIGNAL_STOP_CONTINUED; + signal->group_stop_count = 0; + signal->group_exit_code = 0; } else { /* * We are not stopped, but there could be a stop * signal in the middle of being processed after * being removed from the queue. Clear that too. */ - p->signal->flags = 0; + signal->flags &= ~SIGNAL_STOP_DEQUEUED; } } else if (sig == SIGKILL) { /* * Make sure that any pending stop signal already dequeued * is undone by the wakeup for SIGKILL. */ - p->signal->flags = 0; + signal->flags &= ~SIGNAL_STOP_DEQUEUED; } } +static inline int legacy_queue(struct sigpending *signals, int sig) +{ + return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); +} + static int send_signal(int sig, struct siginfo *info, struct task_struct *t, struct sigpending *signals) { struct sigqueue * q = NULL; - int ret = 0; + + /* + * Short-circuit ignored signals and support queuing + * exactly one non-rt signal, so that we can get more + * detailed information about the cause of the signal. + */ + if (sig_ignored(t, sig) || legacy_queue(signals, sig)) + return 0; /* * Deliver the signal to listening signalfds. This must be called @@ -719,12 +726,9 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, out_set: sigaddset(&signals->signal, sig); - return ret; + return 1; } -#define LEGACY_QUEUE(sigptr, sig) \ - (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) - int print_fatal_signals; static void print_fatal_signal(struct pt_regs *regs, int signr) @@ -733,13 +737,13 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) current->comm, task_pid_nr(current), signr); #if defined(__i386__) && !defined(__arch_um__) - printk("code at %08lx: ", regs->eip); + printk("code at %08lx: ", regs->ip); { int i; for (i = 0; i < 16; i++) { unsigned char insn; - __get_user(insn, (unsigned char *)(regs->eip + i)); + __get_user(insn, (unsigned char *)(regs->ip + i)); printk("%02x ", insn); } } @@ -760,26 +764,18 @@ __setup("print-fatal-signals=", setup_print_fatal_signals); static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { - int ret = 0; + int ret; BUG_ON(!irqs_disabled()); assert_spin_locked(&t->sighand->siglock); - /* Short-circuit ignored signals. */ - if (sig_ignored(t, sig)) - goto out; - - /* Support queueing exactly one non-rt signal, so that we - can get more detailed information about the cause of - the signal. */ - if (LEGACY_QUEUE(&t->pending, sig)) - goto out; - ret = send_signal(sig, info, t, &t->pending); - if (!ret && !sigismember(&t->blocked, sig)) + if (ret <= 0) + return ret; + + if (!sigismember(&t->blocked, sig)) signal_wake_up(t, sig == SIGKILL); -out: - return ret; + return 0; } /* @@ -838,7 +834,7 @@ static inline int wants_signal(int sig, struct task_struct *p) return 0; if (sig == SIGKILL) return 1; - if (p->state & (TASK_STOPPED | TASK_TRACED)) + if (task_is_stopped_or_traced(p)) return 0; return task_curr(p) || !signal_pending(p); } @@ -846,6 +842,7 @@ static inline int wants_signal(int sig, struct task_struct *p) static void __group_complete_signal(int sig, struct task_struct *p) { + struct signal_struct *signal = p->signal; struct task_struct *t; /* @@ -866,14 +863,14 @@ __group_complete_signal(int sig, struct task_struct *p) /* * Otherwise try to find a suitable thread. */ - t = p->signal->curr_target; + t = signal->curr_target; if (t == NULL) /* restart balancing at this thread */ - t = p->signal->curr_target = p; + t = signal->curr_target = p; while (!wants_signal(sig, t)) { t = next_thread(t); - if (t == p->signal->curr_target) + if (t == signal->curr_target) /* * No thread needs to be woken. * Any eligible threads will see @@ -881,14 +878,14 @@ __group_complete_signal(int sig, struct task_struct *p) */ return; } - p->signal->curr_target = t; + signal->curr_target = t; } /* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately. */ - if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && + if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) && !sigismember(&t->real_blocked, sig) && (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { /* @@ -901,9 +898,9 @@ __group_complete_signal(int sig, struct task_struct *p) * running and doing things after a slower * thread has the fatal signal pending. */ - p->signal->flags = SIGNAL_GROUP_EXIT; - p->signal->group_exit_code = sig; - p->signal->group_stop_count = 0; + signal->flags = SIGNAL_GROUP_EXIT; + signal->group_exit_code = sig; + signal->group_stop_count = 0; t = p; do { sigaddset(&t->pending.signal, SIGKILL); @@ -911,27 +908,6 @@ __group_complete_signal(int sig, struct task_struct *p) } while_each_thread(p, t); return; } - - /* - * There will be a core dump. We make all threads other - * than the chosen one go into a group stop so that nothing - * happens until it gets scheduled, takes the signal off - * the shared queue, and does the core dump. This is a - * little more complicated than strictly necessary, but it - * keeps the signal state that winds up in the core dump - * unchanged from the death state, e.g. which thread had - * the core-dump signal unblocked. - */ - rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); - p->signal->group_stop_count = 0; - p->signal->group_exit_task = t; - p = t; - do { - p->signal->group_stop_count++; - signal_wake_up(t, t == p); - } while_each_thread(p, t); - return; } /* @@ -945,26 +921,18 @@ __group_complete_signal(int sig, struct task_struct *p) int __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - int ret = 0; + int ret; assert_spin_locked(&p->sighand->siglock); handle_stop_signal(sig, p); - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) - return ret; - - if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) - /* This is a non-RT signal and we already have one queued. */ - return ret; - /* * Put this signal on the shared-pending queue, or fail with EAGAIN. * We always use the shared queue for process-wide signals, * to avoid several races. */ ret = send_signal(sig, info, p, &p->signal->shared_pending); - if (unlikely(ret)) + if (ret <= 0) return ret; __group_complete_signal(sig, p); @@ -978,7 +946,6 @@ void zap_other_threads(struct task_struct *p) { struct task_struct *t; - p->signal->flags = SIGNAL_GROUP_EXIT; p->signal->group_stop_count = 0; for (t = next_thread(p); t != p; t = next_thread(t)) { @@ -994,13 +961,17 @@ void zap_other_threads(struct task_struct *p) } } -/* - * Must be called under rcu_read_lock() or with tasklist_lock read-held. - */ +int __fatal_signal_pending(struct task_struct *tsk) +{ + return sigismember(&tsk->pending.signal, SIGKILL); +} +EXPORT_SYMBOL(__fatal_signal_pending); + struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) { struct sighand_struct *sighand; + rcu_read_lock(); for (;;) { sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) @@ -1011,6 +982,7 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long break; spin_unlock_irqrestore(&sighand->siglock, *flags); } + rcu_read_unlock(); return sighand; } @@ -1034,7 +1006,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) } /* - * kill_pgrp_info() sends a signal to a process group: this is what the tty + * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) */ @@ -1053,34 +1025,27 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) return success ? 0 : retval; } -int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) -{ - int retval; - - read_lock(&tasklist_lock); - retval = __kill_pgrp_info(sig, info, pgrp); - read_unlock(&tasklist_lock); - - return retval; -} - int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) { - int error; + int error = -ESRCH; struct task_struct *p; rcu_read_lock(); - if (unlikely(sig_needs_tasklist(sig))) - read_lock(&tasklist_lock); - +retry: p = pid_task(pid, PIDTYPE_PID); - error = -ESRCH; - if (p) + if (p) { error = group_send_sig_info(sig, info, p); - - if (unlikely(sig_needs_tasklist(sig))) - read_unlock(&tasklist_lock); + if (unlikely(error == -ESRCH)) + /* + * The task was unhashed in between, try again. + * If it is dead, pid_task() will return NULL, + * if we race with de_thread() it will find the + * new leader. + */ + goto retry; + } rcu_read_unlock(); + return error; } @@ -1141,14 +1106,22 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); static int kill_something_info(int sig, struct siginfo *info, int pid) { int ret; - rcu_read_lock(); - if (!pid) { - ret = kill_pgrp_info(sig, info, task_pgrp(current)); - } else if (pid == -1) { + + if (pid > 0) { + rcu_read_lock(); + ret = kill_pid_info(sig, info, find_vpid(pid)); + rcu_read_unlock(); + return ret; + } + + read_lock(&tasklist_lock); + if (pid != -1) { + ret = __kill_pgrp_info(sig, info, + pid ? find_vpid(-pid) : task_pgrp(current)); + } else { int retval = 0, count = 0; struct task_struct * p; - read_lock(&tasklist_lock); for_each_process(p) { if (p->pid > 1 && !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); @@ -1157,14 +1130,10 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) retval = err; } } - read_unlock(&tasklist_lock); ret = count ? retval : -ESRCH; - } else if (pid < 0) { - ret = kill_pgrp_info(sig, info, find_vpid(-pid)); - } else { - ret = kill_pid_info(sig, info, find_vpid(pid)); } - rcu_read_unlock(); + read_unlock(&tasklist_lock); + return ret; } @@ -1212,20 +1181,6 @@ send_sig(int sig, struct task_struct *p, int priv) return send_sig_info(sig, __si_special(priv), p); } -/* - * This is the entry point for "process-wide" signals. - * They will go to an appropriate thread in the thread group. - */ -int -send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) -{ - int ret; - read_lock(&tasklist_lock); - ret = group_send_sig_info(sig, info, p); - read_unlock(&tasklist_lock); - return ret; -} - void force_sig(int sig, struct task_struct *p) { @@ -1253,7 +1208,13 @@ force_sigsegv(int sig, struct task_struct *p) int kill_pgrp(struct pid *pid, int sig, int priv) { - return kill_pgrp_info(sig, __si_special(priv), pid); + int ret; + + read_lock(&tasklist_lock); + ret = __kill_pgrp_info(sig, __si_special(priv), pid); + read_unlock(&tasklist_lock); + + return ret; } EXPORT_SYMBOL(kill_pgrp); @@ -1313,10 +1274,35 @@ void sigqueue_free(struct sigqueue *q) __sigqueue_free(q); } +static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, + struct sigpending *pending) +{ + handle_stop_signal(sig, t); + + if (unlikely(!list_empty(&q->list))) { + /* + * If an SI_TIMER entry is already queue just increment + * the overrun count. + */ + + BUG_ON(q->info.si_code != SI_TIMER); + q->info.si_overrun++; + return 0; + } + + if (sig_ignored(t, sig)) + return 1; + + signalfd_notify(t, sig); + list_add_tail(&q->list, &pending->list); + sigaddset(&pending->signal, sig); + return 0; +} + int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) { unsigned long flags; - int ret = 0; + int ret = -1; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); @@ -1328,43 +1314,16 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) * We return -1, when the task is marked exiting, so * posix_timer_event can redirect it to the group leader */ - rcu_read_lock(); - - if (!likely(lock_task_sighand(p, &flags))) { - ret = -1; + if (!likely(lock_task_sighand(p, &flags))) goto out_err; - } - if (unlikely(!list_empty(&q->list))) { - /* - * If an SI_TIMER entry is already queue just increment - * the overrun count. - */ - BUG_ON(q->info.si_code != SI_TIMER); - q->info.si_overrun++; - goto out; - } - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) { - ret = 1; - goto out; - } - /* - * Deliver the signal to listening signalfds. This must be called - * with the sighand lock held. - */ - signalfd_notify(p, sig); + ret = do_send_sigqueue(sig, q, p, &p->pending); - list_add_tail(&q->list, &p->pending.list); - sigaddset(&p->pending.signal, sig); if (!sigismember(&p->blocked, sig)) signal_wake_up(p, sig == SIGKILL); -out: unlock_task_sighand(p, &flags); out_err: - rcu_read_unlock(); - return ret; } @@ -1372,47 +1331,18 @@ int send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) { unsigned long flags; - int ret = 0; + int ret; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); read_lock(&tasklist_lock); /* Since it_lock is held, p->sighand cannot be NULL. */ spin_lock_irqsave(&p->sighand->siglock, flags); - handle_stop_signal(sig, p); - /* Short-circuit ignored signals. */ - if (sig_ignored(p, sig)) { - ret = 1; - goto out; - } - - if (unlikely(!list_empty(&q->list))) { - /* - * If an SI_TIMER entry is already queue just increment - * the overrun count. Other uses should not try to - * send the signal multiple times. - */ - BUG_ON(q->info.si_code != SI_TIMER); - q->info.si_overrun++; - goto out; - } - /* - * Deliver the signal to listening signalfds. This must be called - * with the sighand lock held. - */ - signalfd_notify(p, sig); - - /* - * Put this signal on the shared-pending queue. - * We always use the shared queue for process-wide signals, - * to avoid several races. - */ - list_add_tail(&q->list, &p->signal->shared_pending.list); - sigaddset(&p->signal->shared_pending.signal, sig); + ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending); __group_complete_signal(sig, p); -out: + spin_unlock_irqrestore(&p->sighand->siglock, flags); read_unlock(&tasklist_lock); return ret; @@ -1441,7 +1371,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ - BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); + BUG_ON(task_is_stopped_or_traced(tsk)); BUG_ON(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); @@ -1572,11 +1502,6 @@ static inline int may_ptrace_stop(void) { if (!likely(current->ptrace & PT_PTRACED)) return 0; - - if (unlikely(current->parent == current->real_parent && - (current->ptrace & PT_ATTACHED))) - return 0; - /* * Are we in the middle of do_coredump? * If so and our tracer is also part of the coredump stopping @@ -1593,6 +1518,17 @@ static inline int may_ptrace_stop(void) return 1; } +/* + * Return nonzero if there is a SIGKILL that should be waking us up. + * Called with the siglock held. + */ +static int sigkill_pending(struct task_struct *tsk) +{ + return ((sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && + !unlikely(sigismember(&tsk->blocked, SIGKILL))); +} + /* * This must be called with current->sighand->siglock held. * @@ -1601,11 +1537,31 @@ static inline int may_ptrace_stop(void) * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * - * If we actually decide not to stop at all because the tracer is gone, - * we leave nostop_code in current->exit_code. + * If we actually decide not to stop at all because the tracer + * is gone, we keep current->exit_code unless clear_code. */ -static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) +static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) { + int killed = 0; + + if (arch_ptrace_stop_needed(exit_code, info)) { + /* + * The arch code has something special to do before a + * ptrace stop. This is allowed to block, e.g. for faults + * on user stack pages. We can't keep the siglock while + * calling arch_ptrace_stop, so we must release it now. + * To preserve proper semantics, we must do this before + * any signal bookkeeping like checking group_stop_count. + * Meanwhile, a SIGKILL could come in before we retake the + * siglock. That must prevent us from sleeping in TASK_TRACED. + * So after regaining the lock, we must check for SIGKILL. + */ + spin_unlock_irq(¤t->sighand->siglock); + arch_ptrace_stop(exit_code, info); + spin_lock_irq(¤t->sighand->siglock); + killed = sigkill_pending(current); + } + /* * If there is a group stop in progress, * we must participate in the bookkeeping. @@ -1617,24 +1573,31 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) current->exit_code = exit_code; /* Let the debugger run. */ - set_current_state(TASK_TRACED); + __set_current_state(TASK_TRACED); spin_unlock_irq(¤t->sighand->siglock); - try_to_freeze(); read_lock(&tasklist_lock); - if (may_ptrace_stop()) { + if (!unlikely(killed) && may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); schedule(); } else { /* * By the time we got the lock, our tracer went away. - * Don't stop here. + * Don't drop the lock yet, another tracer may come. */ + __set_current_state(TASK_RUNNING); + if (clear_code) + current->exit_code = 0; read_unlock(&tasklist_lock); - set_current_state(TASK_RUNNING); - current->exit_code = nostop_code; } + /* + * While in TASK_TRACED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ + try_to_freeze(); + /* * We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with @@ -1665,7 +1628,7 @@ void ptrace_notify(int exit_code) /* Let the debugger run. */ spin_lock_irq(¤t->sighand->siglock); - ptrace_stop(exit_code, 0, &info); + ptrace_stop(exit_code, 1, &info); spin_unlock_irq(¤t->sighand->siglock); } @@ -1703,9 +1666,6 @@ static int do_signal_stop(int signr) struct signal_struct *sig = current->signal; int stop_count; - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) - return 0; - if (sig->group_stop_count > 0) { /* * There is a group stop in progress. We don't need to @@ -1713,12 +1673,15 @@ static int do_signal_stop(int signr) */ stop_count = --sig->group_stop_count; } else { + struct task_struct *t; + + if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || + unlikely(signal_group_exit(sig))) + return 0; /* * There is no group stop already in progress. * We must initiate one now. */ - struct task_struct *t; - sig->group_exit_code = signr; stop_count = 0; @@ -1728,8 +1691,8 @@ static int do_signal_stop(int signr) * stop is always done with the siglock held, * so this check has no races. */ - if (!t->exit_state && - !(t->state & (TASK_STOPPED|TASK_TRACED))) { + if (!(t->flags & PF_EXITING) && + !task_is_stopped_or_traced(t)) { stop_count++; signal_wake_up(t, 0); } @@ -1746,102 +1709,93 @@ static int do_signal_stop(int signr) return 1; } -/* - * Do appropriate magic when group_stop_count > 0. - * We return nonzero if we stopped, after releasing the siglock. - * We return zero if we still hold the siglock and should look - * for another signal without checking group_stop_count again. - */ -static int handle_group_stop(void) +static int ptrace_signal(int signr, siginfo_t *info, + struct pt_regs *regs, void *cookie) { - int stop_count; + if (!(current->ptrace & PT_PTRACED)) + return signr; - if (current->signal->group_exit_task == current) { - /* - * Group stop is so we can do a core dump, - * We are the initiating thread, so get on with it. - */ - current->signal->group_exit_task = NULL; - return 0; + ptrace_signal_deliver(regs, cookie); + + /* Let the debugger run. */ + ptrace_stop(signr, 0, info); + + /* We're back. Did the debugger cancel the sig? */ + signr = current->exit_code; + if (signr == 0) + return signr; + + current->exit_code = 0; + + /* Update the siginfo structure if the signal has + changed. If the debugger wanted something + specific in the siginfo structure then it should + have updated *info via PTRACE_SETSIGINFO. */ + if (signr != info->si_signo) { + info->si_signo = signr; + info->si_errno = 0; + info->si_code = SI_USER; + info->si_pid = task_pid_vnr(current->parent); + info->si_uid = current->parent->uid; } - if (current->signal->flags & SIGNAL_GROUP_EXIT) - /* - * Group stop is so another thread can do a core dump, - * or else we are racing against a death signal. - * Just punt the stop so we can get the next signal. - */ - return 0; + /* If the (new) signal is now blocked, requeue it. */ + if (sigismember(¤t->blocked, signr)) { + specific_send_sig_info(signr, info, current); + signr = 0; + } - /* - * There is a group stop in progress. We stop - * without any associated signal being in our queue. - */ - stop_count = --current->signal->group_stop_count; - if (stop_count == 0) - current->signal->flags = SIGNAL_STOP_STOPPED; - current->exit_code = current->signal->group_exit_code; - set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - finish_stop(stop_count); - return 1; + return signr; } int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { - sigset_t *mask = ¤t->blocked; - int signr = 0; + struct sighand_struct *sighand = current->sighand; + struct signal_struct *signal = current->signal; + int signr; +relock: + /* + * We'll jump back here after any time we were stopped in TASK_STOPPED. + * While in TASK_STOPPED, we were considered "frozen enough". + * Now that we woke up, it's crucial if we're supposed to be + * frozen that we freeze now before running anything substantial. + */ try_to_freeze(); -relock: - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irq(&sighand->siglock); + + if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { + int why = (signal->flags & SIGNAL_STOP_CONTINUED) + ? CLD_CONTINUED : CLD_STOPPED; + signal->flags &= ~SIGNAL_CLD_MASK; + spin_unlock_irq(&sighand->siglock); + + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current->group_leader, why); + read_unlock(&tasklist_lock); + goto relock; + } + for (;;) { struct k_sigaction *ka; - if (unlikely(current->signal->group_stop_count > 0) && - handle_group_stop()) + if (unlikely(signal->group_stop_count > 0) && + do_signal_stop(0)) goto relock; - signr = dequeue_signal(current, mask, info); - + signr = dequeue_signal(current, ¤t->blocked, info); if (!signr) break; /* will return 0 */ - if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { - ptrace_signal_deliver(regs, cookie); - - /* Let the debugger run. */ - ptrace_stop(signr, signr, info); - - /* We're back. Did the debugger cancel the sig? */ - signr = current->exit_code; - if (signr == 0) - continue; - - current->exit_code = 0; - - /* Update the siginfo structure if the signal has - changed. If the debugger wanted something - specific in the siginfo structure then it should - have updated *info via PTRACE_SETSIGINFO. */ - if (signr != info->si_signo) { - info->si_signo = signr; - info->si_errno = 0; - info->si_code = SI_USER; - info->si_pid = task_pid_vnr(current->parent); - info->si_uid = current->parent->uid; - } - - /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { - specific_send_sig_info(signr, info, current); + if (signr != SIGKILL) { + signr = ptrace_signal(signr, info, regs, cookie); + if (!signr) continue; - } } - ka = ¤t->sighand->action[signr-1]; + ka = &sighand->action[signr-1]; if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { @@ -1878,14 +1832,14 @@ relock: * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* signals can be posted during this window */ if (is_current_pgrp_orphaned()) goto relock; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irq(&sighand->siglock); } if (likely(do_signal_stop(signr))) { @@ -1900,7 +1854,7 @@ relock: continue; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); /* * Anything else is fatal, maybe with a core dump. @@ -1926,10 +1880,52 @@ relock: do_group_exit(signr); /* NOTREACHED */ } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irq(&sighand->siglock); return signr; } +void exit_signals(struct task_struct *tsk) +{ + int group_stop = 0; + struct task_struct *t; + + if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { + tsk->flags |= PF_EXITING; + return; + } + + spin_lock_irq(&tsk->sighand->siglock); + /* + * From now this task is not visible for group-wide signals, + * see wants_signal(), do_signal_stop(). + */ + tsk->flags |= PF_EXITING; + if (!signal_pending(tsk)) + goto out; + + /* It could be that __group_complete_signal() choose us to + * notify about group-wide signal. Another thread should be + * woken now to take the signal since we will not. + */ + for (t = tsk; (t = next_thread(t)) != tsk; ) + if (!signal_pending(t) && !(t->flags & PF_EXITING)) + recalc_sigpending_and_wake(t); + + if (unlikely(tsk->signal->group_stop_count) && + !--tsk->signal->group_stop_count) { + tsk->signal->flags = SIGNAL_STOP_STOPPED; + group_stop = 1; + } +out: + spin_unlock_irq(&tsk->sighand->siglock); + + if (unlikely(group_stop)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(tsk, CLD_STOPPED); + read_unlock(&tasklist_lock); + } +} + EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); @@ -2308,13 +2304,14 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { + struct task_struct *t = current; struct k_sigaction *k; sigset_t mask; if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) return -EINVAL; - k = ¤t->sighand->action[sig-1]; + k = &t->sighand->action[sig-1]; spin_lock_irq(¤t->sighand->siglock); if (oact) @@ -2335,9 +2332,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ - if (act->sa.sa_handler == SIG_IGN || - (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { - struct task_struct *t = current; + if (__sig_ignored(t, sig)) { sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending);