kernel/latencytop.c: rename clear_all_latency_tracing to clear_tsk_latency_tracing
[linux-2.6-microblaze.git] / kernel / fork.c
index 8b03d93..b4cba95 100644 (file)
@@ -955,6 +955,15 @@ static void mm_init_aio(struct mm_struct *mm)
 #endif
 }
 
+static __always_inline void mm_clear_owner(struct mm_struct *mm,
+                                          struct task_struct *p)
+{
+#ifdef CONFIG_MEMCG
+       if (mm->owner == p)
+               WRITE_ONCE(mm->owner, NULL);
+#endif
+}
+
 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 {
 #ifdef CONFIG_MEMCG
@@ -1225,7 +1234,9 @@ static int wait_for_vfork_done(struct task_struct *child,
        int killed;
 
        freezer_do_not_count();
+       cgroup_enter_frozen();
        killed = wait_for_completion_killable(vfork);
+       cgroup_leave_frozen(false);
        freezer_count();
 
        if (killed) {
@@ -1341,6 +1352,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
 free_pt:
        /* don't put binfmt in mmput, we haven't got module yet */
        mm->binfmt = NULL;
+       mm_init_owner(mm, NULL);
        mmput(mm);
 
 fail_nomem:
@@ -1724,6 +1736,21 @@ static int pidfd_create(struct pid *pid)
        return fd;
 }
 
+static void __delayed_free_task(struct rcu_head *rhp)
+{
+       struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+
+       free_task(tsk);
+}
+
+static __always_inline void delayed_free_task(struct task_struct *tsk)
+{
+       if (IS_ENABLED(CONFIG_MEMCG))
+               call_rcu(&tsk->rcu, __delayed_free_task);
+       else
+               free_task(tsk);
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -2066,7 +2093,7 @@ static __latent_entropy struct task_struct *copy_process(
 #ifdef TIF_SYSCALL_EMU
        clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
 #endif
-       clear_all_latency_tracing(p);
+       clear_tsk_latency_tracing(p);
 
        /* ok, now we should be set up.. */
        p->pid = pid_nr(pid);
@@ -2100,7 +2127,7 @@ static __latent_entropy struct task_struct *copy_process(
         */
        retval = cgroup_can_fork(p);
        if (retval)
-               goto bad_fork_put_pidfd;
+               goto bad_fork_cgroup_threadgroup_change_end;
 
        /*
         * From this point on we must avoid any synchronous user-space
@@ -2215,11 +2242,12 @@ bad_fork_cancel_cgroup:
        spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
        cgroup_cancel_fork(p);
+bad_fork_cgroup_threadgroup_change_end:
+       cgroup_threadgroup_change_end(current);
 bad_fork_put_pidfd:
        if (clone_flags & CLONE_PIDFD)
                ksys_close(pidfd);
 bad_fork_free_pid:
-       cgroup_threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
@@ -2230,8 +2258,10 @@ bad_fork_cleanup_io:
 bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
 bad_fork_cleanup_mm:
-       if (p->mm)
+       if (p->mm) {
+               mm_clear_owner(p->mm, p);
                mmput(p->mm);
+       }
 bad_fork_cleanup_signal:
        if (!(clone_flags & CLONE_THREAD))
                free_signal_struct(p->signal);
@@ -2262,7 +2292,7 @@ bad_fork_cleanup_count:
 bad_fork_free:
        p->state = TASK_DEAD;
        put_task_stack(p);
-       free_task(p);
+       delayed_free_task(p);
 fork_out:
        spin_lock_irq(&current->sighand->siglock);
        hlist_del_init(&delayed.node);