locking/mutex: Restructure wait loop
authorPeter Zijlstra <peterz@infradead.org>
Fri, 2 Sep 2016 11:42:12 +0000 (13:42 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 25 Oct 2016 09:31:53 +0000 (11:31 +0200)
Doesn't really matter yet, but pull the HANDOFF and trylock out from
under the wait_lock.

The intention is to add an optimistic spin loop here, which requires
we do not hold the wait_lock, so shuffle code around in preparation.

Also clarify the purpose of taking the wait_lock in the wait loop, its
tempting to want to avoid it altogether, but the cancellation cases
need to to avoid losing wakeups.

Suggested-by: Waiman Long <waiman.long@hpe.com>
Tested-by: Jason Low <jason.low2@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/mutex.c

index b4ebd8b..8bb2304 100644 (file)
@@ -631,13 +631,21 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
        lock_contended(&lock->dep_map, ip);
 
+       set_task_state(task, state);
        for (;;) {
+               /*
+                * Once we hold wait_lock, we're serialized against
+                * mutex_unlock() handing the lock off to us, do a trylock
+                * before testing the error conditions to make sure we pick up
+                * the handoff.
+                */
                if (__mutex_trylock(lock, first))
-                       break;
+                       goto acquired;
 
                /*
-                * got a signal? (This code gets eliminated in the
-                * TASK_UNINTERRUPTIBLE case.)
+                * Check for signals and wound conditions while holding
+                * wait_lock. This ensures the lock cancellation is ordered
+                * against mutex_unlock() and wake-ups do not go missing.
                 */
                if (unlikely(signal_pending_state(state, task))) {
                        ret = -EINTR;
@@ -650,16 +658,27 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                                goto err;
                }
 
-               __set_task_state(task, state);
                spin_unlock_mutex(&lock->wait_lock, flags);
                schedule_preempt_disabled();
-               spin_lock_mutex(&lock->wait_lock, flags);
 
                if (!first && __mutex_waiter_is_first(lock, &waiter)) {
                        first = true;
                        __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
                }
+
+               set_task_state(task, state);
+               /*
+                * Here we order against unlock; we must either see it change
+                * state back to RUNNING and fall through the next schedule(),
+                * or we must see its unlock and acquire.
+                */
+               if (__mutex_trylock(lock, first))
+                       break;
+
+               spin_lock_mutex(&lock->wait_lock, flags);
        }
+       spin_lock_mutex(&lock->wait_lock, flags);
+acquired:
        __set_task_state(task, TASK_RUNNING);
 
        mutex_remove_waiter(lock, &waiter, task);
@@ -682,6 +701,7 @@ skip_wait:
        return 0;
 
 err:
+       __set_task_state(task, TASK_RUNNING);
        mutex_remove_waiter(lock, &waiter, task);
        spin_unlock_mutex(&lock->wait_lock, flags);
        debug_mutex_free_waiter(&waiter);