tools headers UAPI: Sync linux/kvm.h with the kernel sources
[linux-2.6-microblaze.git] / kernel / fork.c
index dc06afd..bc94b2c 100644 (file)
@@ -425,7 +425,7 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
 
 static void release_task_stack(struct task_struct *tsk)
 {
-       if (WARN_ON(tsk->state != TASK_DEAD))
+       if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
                return;  /* Better to leak the stack than to free prematurely */
 
        account_kernel_stack(tsk, -1);
@@ -742,6 +742,7 @@ void __put_task_struct(struct task_struct *tsk)
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
        put_signal_struct(tsk->signal);
+       sched_core_free(tsk);
 
        if (!profile_handoff_task(tsk))
                free_task(tsk);
@@ -824,9 +825,14 @@ void __init fork_init(void)
        init_task.signal->rlim[RLIMIT_SIGPENDING] =
                init_task.signal->rlim[RLIMIT_NPROC];
 
-       for (i = 0; i < UCOUNT_COUNTS; i++)
+       for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
 
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
+
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
                          NULL, free_vm_stack_cache);
@@ -1029,7 +1035,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_pgtables_bytes_init(mm);
        mm->map_count = 0;
        mm->locked_vm = 0;
-       atomic_set(&mm->has_pinned, 0);
        atomic64_set(&mm->pinned_vm, 0);
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
@@ -1977,8 +1982,7 @@ static __latent_entropy struct task_struct *copy_process(
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
        retval = -EAGAIN;
-       if (atomic_read(&p->real_cred->user->processes) >=
-                       task_rlimit(p, RLIMIT_NPROC)) {
+       if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
                if (p->real_cred->user != INIT_USER &&
                    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
                        goto bad_fork_free;
@@ -1999,7 +2003,7 @@ static __latent_entropy struct task_struct *copy_process(
                goto bad_fork_cleanup_count;
 
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
-       p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
+       p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
        p->flags |= PF_FORKNOEXEC;
        INIT_LIST_HEAD(&p->children);
        INIT_LIST_HEAD(&p->sibling);
@@ -2008,7 +2012,6 @@ static __latent_entropy struct task_struct *copy_process(
        spin_lock_init(&p->alloc_lock);
 
        init_sigpending(&p->pending);
-       p->sigqueue_cache = NULL;
 
        p->utime = p->stime = p->gtime = 0;
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -2250,6 +2253,8 @@ static __latent_entropy struct task_struct *copy_process(
 
        klp_copy_process(p);
 
+       sched_core_fork(p);
+
        spin_lock(&current->sighand->siglock);
 
        /*
@@ -2337,6 +2342,7 @@ static __latent_entropy struct task_struct *copy_process(
        return p;
 
 bad_fork_cancel_cgroup:
+       sched_core_free(p);
        spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
        cgroup_cancel_fork(p, args);
@@ -2385,10 +2391,10 @@ bad_fork_cleanup_threadgroup_lock:
 #endif
        delayacct_tsk_free(p);
 bad_fork_cleanup_count:
-       atomic_dec(&p->cred->user->processes);
+       dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
        exit_creds(p);
 bad_fork_free:
-       p->state = TASK_DEAD;
+       WRITE_ONCE(p->__state, TASK_DEAD);
        put_task_stack(p);
        delayed_free_task(p);
 fork_out:
@@ -2408,7 +2414,7 @@ static inline void init_idle_pids(struct task_struct *idle)
        }
 }
 
-struct task_struct *fork_idle(int cpu)
+struct task_struct * __init fork_idle(int cpu)
 {
        struct task_struct *task;
        struct kernel_clone_args args = {
@@ -2998,6 +3004,12 @@ int ksys_unshare(unsigned long unshare_flags)
        if (err)
                goto bad_unshare_cleanup_cred;
 
+       if (new_cred) {
+               err = set_cred_ucounts(new_cred);
+               if (err)
+                       goto bad_unshare_cleanup_cred;
+       }
+
        if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
                if (do_sysvsem) {
                        /*