powerpc: Move spinlock implementation to simple_spinlock
[linux-2.6-microblaze.git] / kernel / exit.c
index 1b772f2..727150f 100644 (file)
@@ -66,7 +66,6 @@
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
-#include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 
 static void __unhash_process(struct task_struct *p, bool group_dead)
@@ -228,8 +227,9 @@ repeat:
                goto repeat;
 }
 
-void rcuwait_wake_up(struct rcuwait *w)
+int rcuwait_wake_up(struct rcuwait *w)
 {
+       int ret = 0;
        struct task_struct *task;
 
        rcu_read_lock();
@@ -237,7 +237,7 @@ void rcuwait_wake_up(struct rcuwait *w)
        /*
         * Order condition vs @task, such that everything prior to the load
         * of @task is visible. This is the condition as to why the user called
-        * rcuwait_trywake() in the first place. Pairs with set_current_state()
+        * rcuwait_wake() in the first place. Pairs with set_current_state()
         * barrier (A) in rcuwait_wait_event().
         *
         *    WAIT                WAKE
@@ -249,8 +249,10 @@ void rcuwait_wake_up(struct rcuwait *w)
 
        task = rcu_dereference(w->task);
        if (task)
-               wake_up_process(task);
+               ret = wake_up_process(task);
        rcu_read_unlock();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(rcuwait_wake_up);
 
@@ -438,17 +440,17 @@ static void exit_mm(void)
        sync_mm_rss(mm);
        /*
         * Serialize with any possible pending coredump.
-        * We must hold mmap_sem around checking core_state
+        * We must hold mmap_lock around checking core_state
         * and clearing tsk->mm.  The core-inducing thread
         * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        core_state = mm->core_state;
        if (core_state) {
                struct core_thread self;
 
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
 
                self.task = current;
                self.next = xchg(&core_state->dumper.next, &self);
@@ -466,14 +468,14 @@ static void exit_mm(void)
                        freezable_schedule();
                }
                __set_current_state(TASK_RUNNING);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
        }
        mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
        current->mm = NULL;
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        enter_lazy_tlb(mm, current);
        task_unlock(current);
        mm_update_next_owner(mm);
@@ -708,8 +710,12 @@ void __noreturn do_exit(long code)
        struct task_struct *tsk = current;
        int group_dead;
 
-       profile_task_exit(tsk);
-       kcov_task_exit(tsk);
+       /*
+        * We can get here from a kernel oops, sometimes with preemption off.
+        * Start by checking for critical errors.
+        * Then fix up important state like USER_DS and preemption.
+        * Then do everything else.
+        */
 
        WARN_ON(blk_needs_flush_plug(tsk));
 
@@ -727,6 +733,16 @@ void __noreturn do_exit(long code)
         */
        set_fs(USER_DS);
 
+       if (unlikely(in_atomic())) {
+               pr_info("note: %s[%d] exited with preempt_count %d\n",
+                       current->comm, task_pid_nr(current),
+                       preempt_count());
+               preempt_count_set(PREEMPT_ENABLED);
+       }
+
+       profile_task_exit(tsk);
+       kcov_task_exit(tsk);
+
        ptrace_event(PTRACE_EVENT_EXIT, code);
 
        validate_creds_for_do_exit(tsk);
@@ -744,13 +760,6 @@ void __noreturn do_exit(long code)
 
        exit_signals(tsk);  /* sets PF_EXITING */
 
-       if (unlikely(in_atomic())) {
-               pr_info("note: %s[%d] exited with preempt_count %d\n",
-                       current->comm, task_pid_nr(current),
-                       preempt_count());
-               preempt_count_set(PREEMPT_ENABLED);
-       }
-
        /* sync mm's RSS info before statistics gathering */
        if (tsk->mm)
                sync_mm_rss(tsk->mm);