Merge branch 'siginfo-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebieder...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Aug 2018 20:47:29 +0000 (13:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Aug 2018 20:47:29 +0000 (13:47 -0700)
Pull core signal handling updates from Eric Biederman:
 "It was observed that a periodic timer in combination with a
  sufficiently expensive fork could prevent fork from every completing.
  This contains the changes to remove the need for that restart.

  This set of changes is split into several parts:

   - The first part makes PIDTYPE_TGID a proper pid type instead
     something only for very special cases. The part starts using
     PIDTYPE_TGID enough so that in __send_signal where signals are
     actually delivered we know if the signal is being sent to a a group
     of processes or just a single process.

   - With that prep work out of the way the logic in fork is modified so
     that fork logically makes signals received while it is running
     appear to be received after the fork completes"

* 'siginfo-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (22 commits)
  signal: Don't send signals to tasks that don't exist
  signal: Don't restart fork when signals come in.
  fork: Have new threads join on-going signal group stops
  fork: Skip setting TIF_SIGPENDING in ptrace_init_task
  signal: Add calculate_sigpending()
  fork: Unconditionally exit if a fatal signal is pending
  fork: Move and describe why the code examines PIDNS_ADDING
  signal: Push pid type down into complete_signal.
  signal: Push pid type down into __send_signal
  signal: Push pid type down into send_signal
  signal: Pass pid type into do_send_sig_info
  signal: Pass pid type into send_sigio_to_task & send_sigurg_to_task
  signal: Pass pid type into group_send_sig_info
  signal: Pass pid and pid type into send_sigqueue
  posix-timers: Noralize good_sigevent
  signal: Use PIDTYPE_TGID to clearly store where file signals will be sent
  pid: Implement PIDTYPE_TGID
  pids: Move the pgrp and session pid pointers from task_struct to signal_struct
  kvm: Don't open code task_pid in kvm_vcpu_ioctl
  pids: Compute task_tgid using signal->leader_pid
  ...

33 files changed:
arch/ia64/kernel/asm-offsets.c
arch/ia64/kernel/fsys.S
arch/s390/kernel/perf_cpum_sf.c
drivers/net/tun.c
drivers/platform/x86/thinkpad_acpi.c
drivers/tty/sysrq.c
drivers/tty/tty_io.c
fs/autofs/autofs_i.h
fs/exec.c
fs/fcntl.c
fs/fuse/file.c
fs/locks.c
fs/notify/dnotify/dnotify.c
fs/notify/fanotify/fanotify.c
include/linux/init_task.h
include/linux/pid.h
include/linux/ptrace.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/signal.h
include/net/scm.h
init/init_task.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/pid.c
kernel/sched/core.c
kernel/signal.c
kernel/time/itimer.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
mm/oom_kill.c
virt/kvm/kvm_main.c

index f4db216..00e8e2a 100644 (file)
@@ -50,8 +50,7 @@ void foo(void)
 
        DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
        DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
-       DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
-       DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
+       DEFINE(IA64_TASK_THREAD_PID_OFFSET,offsetof (struct task_struct, thread_pid));
        DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
        DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
        DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
@@ -68,6 +67,7 @@ void foo(void)
        DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
                                                             group_stop_count));
        DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));
+       DEFINE(IA64_SIGNAL_PIDS_TGID_OFFSET, offsetof (struct signal_struct, pids[PIDTYPE_TGID]));
 
        BLANK();
 
index fe742ff..d80c99a 100644 (file)
@@ -62,16 +62,16 @@ ENTRY(fsys_getpid)
        .prologue
        .altrp b6
        .body
-       add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
+       add r17=IA64_TASK_SIGNAL_OFFSET,r16
        ;;
-       ld8 r17=[r17]                           // r17 = current->group_leader
+       ld8 r17=[r17]                           // r17 = current->signal
        add r9=TI_FLAGS+IA64_TASK_SIZE,r16
        ;;
        ld4 r9=[r9]
-       add r17=IA64_TASK_TGIDLINK_OFFSET,r17
+       add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17
        ;;
        and r9=TIF_ALLWORK_MASK,r9
-       ld8 r17=[r17]                           // r17 = current->group_leader->pids[PIDTYPE_PID].pid
+       ld8 r17=[r17]                           // r17 = current->signal->pids[PIDTYPE_TGID]
        ;;
        add r8=IA64_PID_LEVEL_OFFSET,r17
        ;;
@@ -96,11 +96,11 @@ ENTRY(fsys_set_tid_address)
        .altrp b6
        .body
        add r9=TI_FLAGS+IA64_TASK_SIZE,r16
-       add r17=IA64_TASK_TGIDLINK_OFFSET,r16
+       add r17=IA64_TASK_THREAD_PID_OFFSET,r16
        ;;
        ld4 r9=[r9]
        tnat.z p6,p7=r32                // check argument register for being NaT
-       ld8 r17=[r17]                           // r17 = current->pids[PIDTYPE_PID].pid
+       ld8 r17=[r17]                           // r17 = current->thread_pid
        ;;
        and r9=TIF_ALLWORK_MASK,r9
        add r8=IA64_PID_LEVEL_OFFSET,r17
index cb198d4..5c53e97 100644 (file)
@@ -665,7 +665,7 @@ static void cpumsf_output_event_pid(struct perf_event *event,
                goto out;
 
        /* Update the process ID (see also kernel/events/core.c) */
-       data->tid_entry.pid = cpumsf_pid_type(event, pid, __PIDTYPE_TGID);
+       data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID);
        data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
 
        perf_output_sample(&handle, &header, data, event);
index 2bbefe8..ebd07ad 100644 (file)
@@ -3217,7 +3217,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
                goto out;
 
        if (on) {
-               __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
+               __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
                tfile->flags |= TUN_FASYNC;
        } else
                tfile->flags &= ~TUN_FASYNC;
index cae9b05..d556e95 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/delay.h>
index 6364890..06ed20d 100644 (file)
@@ -348,7 +348,7 @@ static void send_sig_all(int sig)
                if (is_global_init(p))
                        continue;
 
-               do_send_sig_info(sig, SEND_SIG_FORCED, p, true);
+               do_send_sig_info(sig, SEND_SIG_FORCED, p, PIDTYPE_MAX);
        }
        read_unlock(&tasklist_lock);
 }
index 11c2df9..32bc3e3 100644 (file)
@@ -2113,7 +2113,7 @@ static int __tty_fasync(int fd, struct file *filp, int on)
                        type = PIDTYPE_PGID;
                } else {
                        pid = task_pid(current);
-                       type = PIDTYPE_PID;
+                       type = PIDTYPE_TGID;
                }
                get_pid(pid);
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
index 9400a9f..5028122 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/uaccess.h>
index bdd0eac..1ebf6e5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1145,6 +1145,7 @@ static int de_thread(struct task_struct *tsk)
                 */
                tsk->pid = leader->pid;
                change_pid(tsk, PIDTYPE_PID, task_pid(leader));
+               transfer_pid(leader, tsk, PIDTYPE_TGID);
                transfer_pid(leader, tsk, PIDTYPE_PGID);
                transfer_pid(leader, tsk, PIDTYPE_SID);
 
index 12273b6..4137d96 100644 (file)
@@ -116,7 +116,7 @@ int f_setown(struct file *filp, unsigned long arg, int force)
        struct pid *pid = NULL;
        int who = arg, ret = 0;
 
-       type = PIDTYPE_PID;
+       type = PIDTYPE_TGID;
        if (who < 0) {
                /* avoid overflow below */
                if (who == INT_MIN)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(f_setown);
 
 void f_delown(struct file *filp)
 {
-       f_modown(filp, NULL, PIDTYPE_PID, 1);
+       f_modown(filp, NULL, PIDTYPE_TGID, 1);
 }
 
 pid_t f_getown(struct file *filp)
@@ -171,11 +171,11 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
 
        switch (owner.type) {
        case F_OWNER_TID:
-               type = PIDTYPE_MAX;
+               type = PIDTYPE_PID;
                break;
 
        case F_OWNER_PID:
-               type = PIDTYPE_PID;
+               type = PIDTYPE_TGID;
                break;
 
        case F_OWNER_PGRP:
@@ -206,11 +206,11 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
        read_lock(&filp->f_owner.lock);
        owner.pid = pid_vnr(filp->f_owner.pid);
        switch (filp->f_owner.pid_type) {
-       case PIDTYPE_MAX:
+       case PIDTYPE_PID:
                owner.type = F_OWNER_TID;
                break;
 
-       case PIDTYPE_PID:
+       case PIDTYPE_TGID:
                owner.type = F_OWNER_PID;
                break;
 
@@ -723,7 +723,7 @@ static inline int sigio_perm(struct task_struct *p,
 
 static void send_sigio_to_task(struct task_struct *p,
                               struct fown_struct *fown,
-                              int fd, int reason, int group)
+                              int fd, int reason, enum pid_type type)
 {
        /*
         * F_SETSIG can change ->signum lockless in parallel, make
@@ -767,11 +767,11 @@ static void send_sigio_to_task(struct task_struct *p,
                        else
                                si.si_band = mangle_poll(band_table[reason - POLL_IN]);
                        si.si_fd    = fd;
-                       if (!do_send_sig_info(signum, &si, p, group))
+                       if (!do_send_sig_info(signum, &si, p, type))
                                break;
                /* fall-through: fall back on the old plain SIGIO signal */
                case 0:
-                       do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
+                       do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
        }
 }
 
@@ -780,34 +780,36 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
        struct task_struct *p;
        enum pid_type type;
        struct pid *pid;
-       int group = 1;
        
        read_lock(&fown->lock);
 
        type = fown->pid_type;
-       if (type == PIDTYPE_MAX) {
-               group = 0;
-               type = PIDTYPE_PID;
-       }
-
        pid = fown->pid;
        if (!pid)
                goto out_unlock_fown;
-       
-       read_lock(&tasklist_lock);
-       do_each_pid_task(pid, type, p) {
-               send_sigio_to_task(p, fown, fd, band, group);
-       } while_each_pid_task(pid, type, p);
-       read_unlock(&tasklist_lock);
+
+       if (type <= PIDTYPE_TGID) {
+               rcu_read_lock();
+               p = pid_task(pid, PIDTYPE_PID);
+               if (p)
+                       send_sigio_to_task(p, fown, fd, band, type);
+               rcu_read_unlock();
+       } else {
+               read_lock(&tasklist_lock);
+               do_each_pid_task(pid, type, p) {
+                       send_sigio_to_task(p, fown, fd, band, type);
+               } while_each_pid_task(pid, type, p);
+               read_unlock(&tasklist_lock);
+       }
  out_unlock_fown:
        read_unlock(&fown->lock);
 }
 
 static void send_sigurg_to_task(struct task_struct *p,
-                               struct fown_struct *fown, int group)
+                               struct fown_struct *fown, enum pid_type type)
 {
        if (sigio_perm(p, fown, SIGURG))
-               do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
+               do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
 }
 
 int send_sigurg(struct fown_struct *fown)
@@ -815,28 +817,30 @@ int send_sigurg(struct fown_struct *fown)
        struct task_struct *p;
        enum pid_type type;
        struct pid *pid;
-       int group = 1;
        int ret = 0;
        
        read_lock(&fown->lock);
 
        type = fown->pid_type;
-       if (type == PIDTYPE_MAX) {
-               group = 0;
-               type = PIDTYPE_PID;
-       }
-
        pid = fown->pid;
        if (!pid)
                goto out_unlock_fown;
 
        ret = 1;
-       
-       read_lock(&tasklist_lock);
-       do_each_pid_task(pid, type, p) {
-               send_sigurg_to_task(p, fown, group);
-       } while_each_pid_task(pid, type, p);
-       read_unlock(&tasklist_lock);
+
+       if (type <= PIDTYPE_TGID) {
+               rcu_read_lock();
+               p = pid_task(pid, PIDTYPE_PID);
+               if (p)
+                       send_sigurg_to_task(p, fown, type);
+               rcu_read_unlock();
+       } else {
+               read_lock(&tasklist_lock);
+               do_each_pid_task(pid, type, p) {
+                       send_sigurg_to_task(p, fown, type);
+               } while_each_pid_task(pid, type, p);
+               read_unlock(&tasklist_lock);
+       }
  out_unlock_fown:
        read_unlock(&fown->lock);
        return ret;
index a201fb0..b00a3f1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/module.h>
 #include <linux/compat.h>
 #include <linux/swap.h>
index bc047a7..5086bde 100644 (file)
@@ -542,7 +542,7 @@ lease_setup(struct file_lock *fl, void **priv)
        if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
                *priv = NULL;
 
-       __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
+       __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
 }
 
 static const struct lock_manager_operations lease_manager_ops = {
index a6365e6..58d77dc 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/dnotify.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
@@ -353,7 +354,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
                goto out;
        }
 
-       __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
+       __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
 
        error = attach_dn(dn, dn_mark, id, fd, filp, mask);
        /* !error means that we attached the dn to the dn_mark, so don't free it */
index eb4e751..94b5215 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/mount.h>
 #include <linux/sched.h>
 #include <linux/sched/user.h>
+#include <linux/sched/signal.h>
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/audit.h>
index a454b8a..a7083a4 100644 (file)
@@ -46,15 +46,6 @@ extern struct cred init_cred;
 #define INIT_CPU_TIMERS(s)
 #endif
 
-#define INIT_PID_LINK(type)                                    \
-{                                                              \
-       .node = {                                               \
-               .next = NULL,                                   \
-               .pprev = NULL,                                  \
-       },                                                      \
-       .pid = &init_struct_pid,                                \
-}
-
 #define INIT_TASK_COMM "swapper"
 
 /* Attach to the init_task data structure for proper alignment */
index 7633d55..14a9a39 100644 (file)
@@ -7,11 +7,10 @@
 enum pid_type
 {
        PIDTYPE_PID,
+       PIDTYPE_TGID,
        PIDTYPE_PGID,
        PIDTYPE_SID,
        PIDTYPE_MAX,
-       /* only valid to __task_pid_nr_ns() */
-       __PIDTYPE_TGID
 };
 
 /*
@@ -67,12 +66,6 @@ struct pid
 
 extern struct pid init_struct_pid;
 
-struct pid_link
-{
-       struct hlist_node node;
-       struct pid *pid;
-};
-
 static inline struct pid *get_pid(struct pid *pid)
 {
        if (pid)
@@ -177,7 +170,7 @@ pid_t pid_vnr(struct pid *pid);
        do {                                                            \
                if ((pid) != NULL)                                      \
                        hlist_for_each_entry_rcu((task),                \
-                               &(pid)->tasks[type], pids[type].node) {
+                               &(pid)->tasks[type], pid_links[type]) {
 
                        /*
                         * Both old and new leaders may be attached to
index 037bf0e..4f36431 100644 (file)
@@ -214,8 +214,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
                else
                        sigaddset(&child->pending.signal, SIGSTOP);
-
-               set_tsk_thread_flag(child, TIF_SIGPENDING);
        }
        else
                child->ptracer_cred = NULL;
index 789923f..00de3e9 100644 (file)
@@ -779,7 +779,8 @@ struct task_struct {
        struct list_head                ptrace_entry;
 
        /* PID/PID hash table linkage. */
-       struct pid_link                 pids[PIDTYPE_MAX];
+       struct pid                      *thread_pid;
+       struct hlist_node               pid_links[PIDTYPE_MAX];
        struct list_head                thread_group;
        struct list_head                thread_node;
 
@@ -1209,27 +1210,7 @@ struct task_struct {
 
 static inline struct pid *task_pid(struct task_struct *task)
 {
-       return task->pids[PIDTYPE_PID].pid;
-}
-
-static inline struct pid *task_tgid(struct task_struct *task)
-{
-       return task->group_leader->pids[PIDTYPE_PID].pid;
-}
-
-/*
- * Without tasklist or RCU lock it is not safe to dereference
- * the result of task_pgrp/task_session even if task == current,
- * we can race with another thread doing sys_setsid/sys_setpgid.
- */
-static inline struct pid *task_pgrp(struct task_struct *task)
-{
-       return task->group_leader->pids[PIDTYPE_PGID].pid;
-}
-
-static inline struct pid *task_session(struct task_struct *task)
-{
-       return task->group_leader->pids[PIDTYPE_SID].pid;
+       return task->thread_pid;
 }
 
 /*
@@ -1278,7 +1259,7 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
  */
 static inline int pid_alive(const struct task_struct *p)
 {
-       return p->pids[PIDTYPE_PID].pid != NULL;
+       return p->thread_pid != NULL;
 }
 
 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
@@ -1304,12 +1285,12 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
 
 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 {
-       return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
+       return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
 }
 
 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
 {
-       return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
+       return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
 }
 
 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
index 113d1ad..4e9b77f 100644 (file)
@@ -69,6 +69,11 @@ struct thread_group_cputimer {
        bool checking_timer;
 };
 
+struct multiprocess_signals {
+       sigset_t signal;
+       struct hlist_node node;
+};
+
 /*
  * NOTE! "signal_struct" does not have its own
  * locking, because a shared signal_struct always
@@ -90,6 +95,9 @@ struct signal_struct {
        /* shared signal handling: */
        struct sigpending       shared_pending;
 
+       /* For collecting multiprocess signals during fork */
+       struct hlist_head       multiprocess;
+
        /* thread group exit support */
        int                     group_exit_code;
        /* overloaded:
@@ -146,7 +154,8 @@ struct signal_struct {
 
 #endif
 
-       struct pid *leader_pid;
+       /* PID/PID hash table linkage. */
+       struct pid *pids[PIDTYPE_MAX];
 
 #ifdef CONFIG_NO_HZ_FULL
        atomic_t tick_dep_mask;
@@ -329,7 +338,7 @@ extern int send_sig(int, struct task_struct *, int);
 extern int zap_other_threads(struct task_struct *p);
 extern struct sigqueue *sigqueue_alloc(void);
 extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
+extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 
 static inline int restart_syscall(void)
@@ -371,6 +380,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
  */
 extern void recalc_sigpending_and_wake(struct task_struct *t);
 extern void recalc_sigpending(void);
+extern void calculate_sigpending(void);
 
 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
 
@@ -383,6 +393,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
        signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
 }
 
+void task_join_group_stop(struct task_struct *task);
+
 #ifdef TIF_RESTORE_SIGMASK
 /*
  * Legacy restore_sigmask accessors.  These are inefficient on
@@ -556,6 +568,37 @@ extern bool current_is_single_threaded(void);
 typedef int (*proc_visitor)(struct task_struct *p, void *data);
 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
 
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+{
+       struct pid *pid;
+       if (type == PIDTYPE_PID)
+               pid = task_pid(task);
+       else
+               pid = task->signal->pids[type];
+       return pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+       return task->signal->pids[PIDTYPE_TGID];
+}
+
+/*
+ * Without tasklist or RCU lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+       return task->signal->pids[PIDTYPE_PGID];
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+       return task->signal->pids[PIDTYPE_SID];
+}
+
 static inline int get_nr_threads(struct task_struct *tsk)
 {
        return tsk->signal->nr_threads;
@@ -574,7 +617,7 @@ static inline bool thread_group_leader(struct task_struct *p)
  */
 static inline bool has_group_leader_pid(struct task_struct *p)
 {
-       return task_pid(p) == p->signal->leader_pid;
+       return task_pid(p) == task_tgid(p);
 }
 
 static inline
index 3c52001..fe125b0 100644 (file)
@@ -254,11 +254,13 @@ static inline int valid_signal(unsigned long sig)
 
 struct timespec;
 struct pt_regs;
+enum pid_type;
 
 extern int next_signal(struct sigpending *pending, sigset_t *mask);
 extern int do_send_sig_info(int sig, struct siginfo *info,
-                               struct task_struct *p, bool group);
-extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
+                               struct task_struct *p, enum pid_type type);
+extern int group_send_sig_info(int sig, struct siginfo *info,
+                              struct task_struct *p, enum pid_type type);
 extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
 extern int sigprocmask(int, sigset_t *, sigset_t *);
 extern void set_current_blocked(sigset_t *);
index 903771c..1ce365f 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/security.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
+#include <linux/sched/signal.h>
 
 /* Well, we should have at least one descriptor open
  * to accept passed FDs 8)
index 74f60ba..5aebe3b 100644 (file)
@@ -22,6 +22,7 @@ static struct signal_struct init_signals = {
                .list = LIST_HEAD_INIT(init_signals.shared_pending.list),
                .signal =  {{0}}
        },
+       .multiprocess   = HLIST_HEAD_INIT,
        .rlim           = INIT_RLIMITS,
        .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
 #ifdef CONFIG_POSIX_TIMERS
@@ -33,6 +34,12 @@ static struct signal_struct init_signals = {
        },
 #endif
        INIT_CPU_TIMERS(init_signals)
+       .pids = {
+               [PIDTYPE_PID]   = &init_struct_pid,
+               [PIDTYPE_TGID]  = &init_struct_pid,
+               [PIDTYPE_PGID]  = &init_struct_pid,
+               [PIDTYPE_SID]   = &init_struct_pid,
+       },
        INIT_PREV_CPUTIME(init_signals)
 };
 
@@ -111,11 +118,7 @@ struct task_struct init_task
        INIT_CPU_TIMERS(init_task)
        .pi_lock        = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
        .timer_slack_ns = 50000, /* 50 usec default slack */
-       .pids = {
-               [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),
-               [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),
-               [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),
-       },
+       .thread_pid     = &init_struct_pid,
        .thread_group   = LIST_HEAD_INIT(init_task.thread_group),
        .thread_node    = LIST_HEAD_INIT(init_signals.thread_head),
 #ifdef CONFIG_AUDITSYSCALL
index 80f456e..2a62b96 100644 (file)
@@ -1334,7 +1334,7 @@ static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
 
 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
 {
-       return perf_event_pid_type(event, p, __PIDTYPE_TGID);
+       return perf_event_pid_type(event, p, PIDTYPE_TGID);
 }
 
 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
index c3c7ac5..0e21e6d 100644 (file)
@@ -73,6 +73,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
        nr_threads--;
        detach_pid(p, PIDTYPE_PID);
        if (group_dead) {
+               detach_pid(p, PIDTYPE_TGID);
                detach_pid(p, PIDTYPE_PGID);
                detach_pid(p, PIDTYPE_SID);
 
@@ -680,7 +681,8 @@ static void forget_original_parent(struct task_struct *father,
                                t->parent = t->real_parent;
                        if (t->pdeath_signal)
                                group_send_sig_info(t->pdeath_signal,
-                                                   SEND_SIG_NOINFO, t);
+                                                   SEND_SIG_NOINFO, t,
+                                                   PIDTYPE_TGID);
                }
                /*
                 * If this is a threaded reparent there is no need to
@@ -1001,14 +1003,6 @@ struct wait_opts {
        int                     notask_error;
 };
 
-static inline
-struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
-{
-       if (type != PIDTYPE_PID)
-               task = task->group_leader;
-       return task->pids[type].pid;
-}
-
 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
 {
        return  wo->wo_type == PIDTYPE_MAX ||
index 5ee74c1..ff5037b 100644 (file)
@@ -1487,6 +1487,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        init_waitqueue_head(&sig->wait_chldexit);
        sig->curr_target = tsk;
        init_sigpending(&sig->shared_pending);
+       INIT_HLIST_HEAD(&sig->multiprocess);
        seqlock_init(&sig->stats_lock);
        prev_cputime_init(&sig->prev_cputime);
 
@@ -1580,10 +1581,22 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
 static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
 #endif
 
+static inline void init_task_pid_links(struct task_struct *task)
+{
+       enum pid_type type;
+
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+               INIT_HLIST_NODE(&task->pid_links[type]);
+       }
+}
+
 static inline void
 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
 {
-        task->pids[type].pid = pid;
+       if (type == PIDTYPE_PID)
+               task->thread_pid = pid;
+       else
+               task->signal->pids[type] = pid;
 }
 
 static inline void rcu_copy_process(struct task_struct *p)
@@ -1621,6 +1634,7 @@ static __latent_entropy struct task_struct *copy_process(
 {
        int retval;
        struct task_struct *p;
+       struct multiprocess_signals delayed;
 
        /*
         * Don't allow sharing the root directory with processes in a different
@@ -1668,6 +1682,24 @@ static __latent_entropy struct task_struct *copy_process(
                        return ERR_PTR(-EINVAL);
        }
 
+       /*
+        * Force any signals received before this point to be delivered
+        * before the fork happens.  Collect up signals sent to multiple
+        * processes that happen during the fork and delay them so that
+        * they appear to happen after the fork.
+        */
+       sigemptyset(&delayed.signal);
+       INIT_HLIST_NODE(&delayed.node);
+
+       spin_lock_irq(&current->sighand->siglock);
+       if (!(clone_flags & CLONE_THREAD))
+               hlist_add_head(&delayed.node, &current->signal->multiprocess);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       retval = -ERESTARTNOINTR;
+       if (signal_pending(current))
+               goto fork_out;
+
        retval = -ENOMEM;
        p = dup_task_struct(current, node);
        if (!p)
@@ -1941,29 +1973,26 @@ static __latent_entropy struct task_struct *copy_process(
 
        rseq_fork(p, clone_flags);
 
-       /*
-        * Process group and session signals need to be delivered to just the
-        * parent before the fork or both the parent and the child after the
-        * fork. Restart if a signal comes in before we add the new process to
-        * it's process group.
-        * A fatal signal pending means that current will exit, so the new
-        * thread can't slip out of an OOM kill (or normal SIGKILL).
-       */
-       recalc_sigpending();
-       if (signal_pending(current)) {
-               retval = -ERESTARTNOINTR;
-               goto bad_fork_cancel_cgroup;
-       }
+       /* Don't start children in a dying pid namespace */
        if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
                retval = -ENOMEM;
                goto bad_fork_cancel_cgroup;
        }
 
+       /* Let kill terminate clone/fork in the middle */
+       if (fatal_signal_pending(current)) {
+               retval = -EINTR;
+               goto bad_fork_cancel_cgroup;
+       }
+
+
+       init_task_pid_links(p);
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
                init_task_pid(p, PIDTYPE_PID, pid);
                if (thread_group_leader(p)) {
+                       init_task_pid(p, PIDTYPE_TGID, pid);
                        init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
                        init_task_pid(p, PIDTYPE_SID, task_session(current));
 
@@ -1971,8 +2000,7 @@ static __latent_entropy struct task_struct *copy_process(
                                ns_of_pid(pid)->child_reaper = p;
                                p->signal->flags |= SIGNAL_UNKILLABLE;
                        }
-
-                       p->signal->leader_pid = pid;
+                       p->signal->shared_pending.signal = delayed.signal;
                        p->signal->tty = tty_kref_get(current->signal->tty);
                        /*
                         * Inherit has_child_subreaper flag under the same
@@ -1983,6 +2011,7 @@ static __latent_entropy struct task_struct *copy_process(
                                                         p->real_parent->signal->is_child_subreaper;
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
+                       attach_pid(p, PIDTYPE_TGID);
                        attach_pid(p, PIDTYPE_PGID);
                        attach_pid(p, PIDTYPE_SID);
                        __this_cpu_inc(process_counts);
@@ -1990,6 +2019,7 @@ static __latent_entropy struct task_struct *copy_process(
                        current->signal->nr_threads++;
                        atomic_inc(&current->signal->live);
                        atomic_inc(&current->signal->sigcnt);
+                       task_join_group_stop(p);
                        list_add_tail_rcu(&p->thread_group,
                                          &p->group_leader->thread_group);
                        list_add_tail_rcu(&p->thread_node,
@@ -1998,8 +2028,8 @@ static __latent_entropy struct task_struct *copy_process(
                attach_pid(p, PIDTYPE_PID);
                nr_threads++;
        }
-
        total_forks++;
+       hlist_del_init(&delayed.node);
        spin_unlock(&current->sighand->siglock);
        syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
@@ -2064,16 +2094,19 @@ bad_fork_free:
        put_task_stack(p);
        free_task(p);
 fork_out:
+       spin_lock_irq(&current->sighand->siglock);
+       hlist_del_init(&delayed.node);
+       spin_unlock_irq(&current->sighand->siglock);
        return ERR_PTR(retval);
 }
 
-static inline void init_idle_pids(struct pid_link *links)
+static inline void init_idle_pids(struct task_struct *idle)
 {
        enum pid_type type;
 
        for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
-               INIT_HLIST_NODE(&links[type].node); /* not really needed */
-               links[type].pid = &init_struct_pid;
+               INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
+               init_task_pid(idle, type, &init_struct_pid);
        }
 }
 
@@ -2083,7 +2116,7 @@ struct task_struct *fork_idle(int cpu)
        task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
                            cpu_to_node(cpu));
        if (!IS_ERR(task)) {
-               init_idle_pids(task->pids);
+               init_idle_pids(task);
                init_idle(task, cpu);
        }
 
index 157fe4b..de1cfc4 100644 (file)
@@ -265,27 +265,33 @@ struct pid *find_vpid(int nr)
 }
 EXPORT_SYMBOL_GPL(find_vpid);
 
+static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
+{
+       return (type == PIDTYPE_PID) ?
+               &task->thread_pid :
+               &task->signal->pids[type];
+}
+
 /*
  * attach_pid() must be called with the tasklist_lock write-held.
  */
 void attach_pid(struct task_struct *task, enum pid_type type)
 {
-       struct pid_link *link = &task->pids[type];
-       hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
+       struct pid *pid = *task_pid_ptr(task, type);
+       hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
 }
 
 static void __change_pid(struct task_struct *task, enum pid_type type,
                        struct pid *new)
 {
-       struct pid_link *link;
+       struct pid **pid_ptr = task_pid_ptr(task, type);
        struct pid *pid;
        int tmp;
 
-       link = &task->pids[type];
-       pid = link->pid;
+       pid = *pid_ptr;
 
-       hlist_del_rcu(&link->node);
-       link->pid = new;
+       hlist_del_rcu(&task->pid_links[type]);
+       *pid_ptr = new;
 
        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
                if (!hlist_empty(&pid->tasks[tmp]))
@@ -310,8 +316,9 @@ void change_pid(struct task_struct *task, enum pid_type type,
 void transfer_pid(struct task_struct *old, struct task_struct *new,
                           enum pid_type type)
 {
-       new->pids[type].pid = old->pids[type].pid;
-       hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
+       if (type == PIDTYPE_PID)
+               new->thread_pid = old->thread_pid;
+       hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
 }
 
 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
@@ -322,7 +329,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
                first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
                                              lockdep_tasklist_lock_is_held());
                if (first)
-                       result = hlist_entry(first, struct task_struct, pids[(type)].node);
+                       result = hlist_entry(first, struct task_struct, pid_links[(type)]);
        }
        return result;
 }
@@ -360,9 +367,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 {
        struct pid *pid;
        rcu_read_lock();
-       if (type != PIDTYPE_PID)
-               task = task->group_leader;
-       pid = get_pid(rcu_dereference(task->pids[type].pid));
+       pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
        rcu_read_unlock();
        return pid;
 }
@@ -420,15 +425,8 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
        rcu_read_lock();
        if (!ns)
                ns = task_active_pid_ns(current);
-       if (likely(pid_alive(task))) {
-               if (type != PIDTYPE_PID) {
-                       if (type == __PIDTYPE_TGID)
-                               type = PIDTYPE_PID;
-
-                       task = task->group_leader;
-               }
-               nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
-       }
+       if (likely(pid_alive(task)))
+               nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
        rcu_read_unlock();
 
        return nr;
index c89302d..625bc98 100644 (file)
@@ -2774,6 +2774,8 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
 
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
+
+       calculate_sigpending();
 }
 
 /*
index 8d8a940..cfa9d10 100644 (file)
@@ -172,6 +172,17 @@ void recalc_sigpending(void)
 
 }
 
+void calculate_sigpending(void)
+{
+       /* Have any signals or users of TIF_SIGPENDING been delayed
+        * until after fork?
+        */
+       spin_lock_irq(&current->sighand->siglock);
+       set_tsk_thread_flag(current, TIF_SIGPENDING);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+}
+
 /* Given the mask, find the first available signal that should be serviced. */
 
 #define SYNCHRONOUS_MASK \
@@ -362,6 +373,20 @@ static bool task_participate_group_stop(struct task_struct *task)
        return false;
 }
 
+void task_join_group_stop(struct task_struct *task)
+{
+       /* Have the new thread join an on-going signal group stop */
+       unsigned long jobctl = current->jobctl;
+       if (jobctl & JOBCTL_STOP_PENDING) {
+               struct signal_struct *sig = current->signal;
+               unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
+               unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
+               if (task_set_jobctl_pending(task, signr | gstop)) {
+                       sig->group_stop_count++;
+               }
+       }
+}
+
 /*
  * allocate a new signal queue record
  * - this may be called without locks if and only if t == current, otherwise an
@@ -895,7 +920,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
        return task_curr(p) || !signal_pending(p);
 }
 
-static void complete_signal(int sig, struct task_struct *p, int group)
+static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 {
        struct signal_struct *signal = p->signal;
        struct task_struct *t;
@@ -908,7 +933,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
         */
        if (wants_signal(sig, p))
                t = p;
-       else if (!group || thread_group_empty(p))
+       else if ((type == PIDTYPE_PID) || thread_group_empty(p))
                /*
                 * There is just one thread and it does not need to be woken.
                 * It will dequeue unblocked signals before it runs again.
@@ -998,7 +1023,7 @@ static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_str
 #endif
 
 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
-                       int group, int from_ancestor_ns)
+                       enum pid_type type, int from_ancestor_ns)
 {
        struct sigpending *pending;
        struct sigqueue *q;
@@ -1012,7 +1037,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        from_ancestor_ns || (info == SEND_SIG_FORCED)))
                goto ret;
 
-       pending = group ? &t->signal->shared_pending : &t->pending;
+       pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
        /*
         * Short-circuit ignored signals and support queuing
         * exactly one non-rt signal, so that we can get more
@@ -1096,14 +1121,29 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 out_set:
        signalfd_notify(t, sig);
        sigaddset(&pending->signal, sig);
-       complete_signal(sig, t, group);
+
+       /* Let multiprocess signals appear after on-going forks */
+       if (type > PIDTYPE_TGID) {
+               struct multiprocess_signals *delayed;
+               hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
+                       sigset_t *signal = &delayed->signal;
+                       /* Can't queue both a stop and a continue signal */
+                       if (sig == SIGCONT)
+                               sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
+                       else if (sig_kernel_stop(sig))
+                               sigdelset(signal, SIGCONT);
+                       sigaddset(signal, sig);
+               }
+       }
+
+       complete_signal(sig, t, type);
 ret:
-       trace_signal_generate(sig, info, t, group, result);
+       trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
        return ret;
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-                       int group)
+                       enum pid_type type)
 {
        int from_ancestor_ns = 0;
 
@@ -1112,7 +1152,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
                           !task_pid_nr_ns(current, task_active_pid_ns(t));
 #endif
 
-       return __send_signal(sig, info, t, group, from_ancestor_ns);
+       return __send_signal(sig, info, t, type, from_ancestor_ns);
 }
 
 static void print_fatal_signal(int signr)
@@ -1151,23 +1191,23 @@ __setup("print-fatal-signals=", setup_print_fatal_signals);
 int
 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
-       return send_signal(sig, info, p, 1);
+       return send_signal(sig, info, p, PIDTYPE_TGID);
 }
 
 static int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
-       return send_signal(sig, info, t, 0);
+       return send_signal(sig, info, t, PIDTYPE_PID);
 }
 
 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
-                       bool group)
+                       enum pid_type type)
 {
        unsigned long flags;
        int ret = -ESRCH;
 
        if (lock_task_sighand(p, &flags)) {
-               ret = send_signal(sig, info, p, group);
+               ret = send_signal(sig, info, p, type);
                unlock_task_sighand(p, &flags);
        }
 
@@ -1274,7 +1314,8 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 /*
  * send signal info to all the members of a group
  */
-int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
+                       enum pid_type type)
 {
        int ret;
 
@@ -1283,7 +1324,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        rcu_read_unlock();
 
        if (!ret && sig)
-               ret = do_send_sig_info(sig, info, p, true);
+               ret = do_send_sig_info(sig, info, p, type);
 
        return ret;
 }
@@ -1301,7 +1342,7 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
        success = 0;
        retval = -ESRCH;
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-               int err = group_send_sig_info(sig, info, p);
+               int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
                success |= !err;
                retval = err;
        } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
@@ -1317,7 +1358,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
                rcu_read_lock();
                p = pid_task(pid, PIDTYPE_PID);
                if (p)
-                       error = group_send_sig_info(sig, info, p);
+                       error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
                rcu_read_unlock();
                if (likely(!p || error != -ESRCH))
                        return error;
@@ -1376,7 +1417,7 @@ int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
 
        if (sig) {
                if (lock_task_sighand(p, &flags)) {
-                       ret = __send_signal(sig, info, p, 1, 0);
+                       ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
                        unlock_task_sighand(p, &flags);
                } else
                        ret = -ESRCH;
@@ -1420,7 +1461,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
                for_each_process(p) {
                        if (task_pid_vnr(p) > 1 &&
                                        !same_thread_group(p, current)) {
-                               int err = group_send_sig_info(sig, info, p);
+                               int err = group_send_sig_info(sig, info, p,
+                                                             PIDTYPE_MAX);
                                ++count;
                                if (err != -EPERM)
                                        retval = err;
@@ -1446,7 +1488,7 @@ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        if (!valid_signal(sig))
                return -EINVAL;
 
-       return do_send_sig_info(sig, info, p, false);
+       return do_send_sig_info(sig, info, p, PIDTYPE_PID);
 }
 
 #define __si_special(priv) \
@@ -1664,17 +1706,20 @@ void sigqueue_free(struct sigqueue *q)
                __sigqueue_free(q);
 }
 
-int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
+int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
 {
        int sig = q->info.si_signo;
        struct sigpending *pending;
+       struct task_struct *t;
        unsigned long flags;
        int ret, result;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
        ret = -1;
-       if (!likely(lock_task_sighand(t, &flags)))
+       rcu_read_lock();
+       t = pid_task(pid, type);
+       if (!t || !likely(lock_task_sighand(t, &flags)))
                goto ret;
 
        ret = 1; /* the signal is ignored */
@@ -1696,15 +1741,16 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        q->info.si_overrun = 0;
 
        signalfd_notify(t, sig);
-       pending = group ? &t->signal->shared_pending : &t->pending;
+       pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
        list_add_tail(&q->list, &pending->list);
        sigaddset(&pending->signal, sig);
-       complete_signal(sig, t, group);
+       complete_signal(sig, t, type);
        result = TRACE_SIGNAL_DELIVERED;
 out:
-       trace_signal_generate(sig, &q->info, t, group, result);
+       trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
        unlock_task_sighand(t, &flags);
 ret:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -3193,7 +3239,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
                 * probe.  No signal is actually delivered.
                 */
                if (!error && sig) {
-                       error = do_send_sig_info(sig, info, p, false);
+                       error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
                        /*
                         * If lock_task_sighand() failed we pretend the task
                         * dies after receiving the signal. The window is tiny,
@@ -3960,7 +4006,7 @@ void kdb_send_sig(struct task_struct *t, int sig)
                           "the deadlock.\n");
                return;
        }
-       ret = send_signal(sig, SEND_SIG_PRIV, t, false);
+       ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
        spin_unlock(&t->sighand->siglock);
        if (ret)
                kdb_printf("Fail to deliver Signal %d to process %d.\n",
index f26acef..9a65713 100644 (file)
@@ -139,9 +139,10 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
 {
        struct signal_struct *sig =
                container_of(timer, struct signal_struct, real_timer);
+       struct pid *leader_pid = sig->pids[PIDTYPE_TGID];
 
-       trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
-       kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
+       trace_itimer_expire(ITIMER_REAL, leader_pid, 0);
+       kill_pid_info(SIGALRM, SEND_SIG_PRIV, leader_pid);
 
        return HRTIMER_NORESTART;
 }
index 294d7b6..ce32cf7 100644 (file)
@@ -894,7 +894,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 
                trace_itimer_expire(signo == SIGPROF ?
                                    ITIMER_PROF : ITIMER_VIRTUAL,
-                                   tsk->signal->leader_pid, cur_time);
+                                   task_tgid(tsk), cur_time);
                __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
        }
 
index f23cc46..4b9127e 100644 (file)
@@ -333,8 +333,8 @@ void posixtimer_rearm(struct siginfo *info)
 
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
-       struct task_struct *task;
-       int shared, ret = -1;
+       enum pid_type type;
+       int ret = -1;
        /*
         * FIXME: if ->sigq is queued we can race with
         * dequeue_signal()->posixtimer_rearm().
@@ -348,13 +348,8 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
         */
        timr->sigq->info.si_sys_private = si_private;
 
-       rcu_read_lock();
-       task = pid_task(timr->it_pid, PIDTYPE_PID);
-       if (task) {
-               shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
-               ret = send_sigqueue(timr->sigq, task, shared);
-       }
-       rcu_read_unlock();
+       type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
+       ret = send_sigqueue(timr->sigq, timr->it_pid, type);
        /* If we failed to send the signal the timer stops. */
        return ret > 0;
 }
@@ -433,11 +428,13 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
 
 static struct pid *good_sigevent(sigevent_t * event)
 {
-       struct task_struct *rtn = current->group_leader;
+       struct pid *pid = task_tgid(current);
+       struct task_struct *rtn;
 
        switch (event->sigev_notify) {
        case SIGEV_SIGNAL | SIGEV_THREAD_ID:
-               rtn = find_task_by_vpid(event->sigev_notify_thread_id);
+               pid = find_vpid(event->sigev_notify_thread_id);
+               rtn = pid_task(pid, PIDTYPE_PID);
                if (!rtn || !same_thread_group(rtn, current))
                        return NULL;
                /* FALLTHRU */
@@ -447,7 +444,7 @@ static struct pid *good_sigevent(sigevent_t * event)
                        return NULL;
                /* FALLTHRU */
        case SIGEV_NONE:
-               return task_pid(rtn);
+               return pid;
        default:
                return NULL;
        }
index 412f434..7c74dcc 100644 (file)
@@ -928,7 +928,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
         * in order to prevent the OOM victim from depleting the memory
         * reserves from the user space under its control.
         */
-       do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
+       do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
        mark_oom_victim(victim);
        pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
@@ -966,7 +966,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
                 */
                if (unlikely(p->flags & PF_KTHREAD))
                        continue;
-               do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
+               do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
        }
        rcu_read_unlock();
 
index 9263ead..0df592c 100644 (file)
@@ -2568,7 +2568,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
                if (arg)
                        goto out;
                oldpid = rcu_access_pointer(vcpu->pid);
-               if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
+               if (unlikely(oldpid != task_pid(current))) {
                        /* The thread running this VCPU changed. */
                        struct pid *newpid;