locking/mutex: Make mutex::wait_lock raw
authorThomas Gleixner <tglx@linutronix.de>
Sun, 15 Aug 2021 21:28:36 +0000 (23:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Aug 2021 17:03:33 +0000 (19:03 +0200)
The wait_lock of mutex is really a low level lock. Convert it to a
raw_spinlock like the wait_lock of rtmutex.

[ mingo: backmerged the test_lockup.c build fix by bigeasy. ]

Co-developed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211304.166863404@linutronix.de
include/linux/mutex.h
kernel/locking/mutex.c
lib/test_lockup.c

index db33675..0bbc872 100644 (file)
@@ -50,7 +50,7 @@
  */
 struct mutex {
        atomic_long_t           owner;
-       spinlock_t              wait_lock;
+       raw_spinlock_t          wait_lock;
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
@@ -105,7 +105,7 @@ do {                                                                        \
 
 #define __MUTEX_INITIALIZER(lockname) \
                { .owner = ATOMIC_LONG_INIT(0) \
-               , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+               , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
                , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
                __DEBUG_MUTEX_INITIALIZER(lockname) \
                __DEP_MAP_MUTEX_INITIALIZER(lockname) }
index acbe43d..17c194b 100644 (file)
@@ -42,7 +42,7 @@ void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
        atomic_long_set(&lock->owner, 0);
-       spin_lock_init(&lock->wait_lock);
+       raw_spin_lock_init(&lock->wait_lock);
        INIT_LIST_HEAD(&lock->wait_list);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        osq_lock_init(&lock->osq);
@@ -486,9 +486,9 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
         * Uh oh, we raced in fastpath, check if any of the waiters need to
         * die or wound us.
         */
-       spin_lock(&lock->base.wait_lock);
+       raw_spin_lock(&lock->base.wait_lock);
        __ww_mutex_check_waiters(&lock->base, ctx);
-       spin_unlock(&lock->base.wait_lock);
+       raw_spin_unlock(&lock->base.wait_lock);
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -966,7 +966,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                return 0;
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -1032,7 +1032,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                                goto err;
                }
 
-               spin_unlock(&lock->wait_lock);
+               raw_spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
                first = __mutex_waiter_is_first(lock, &waiter);
@@ -1047,9 +1047,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
                        break;
 
-               spin_lock(&lock->wait_lock);
+               raw_spin_lock(&lock->wait_lock);
        }
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -1074,7 +1074,7 @@ skip_wait:
        if (ww_ctx)
                ww_mutex_lock_acquired(ww, ww_ctx);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -1082,7 +1082,7 @@ err:
        __set_current_state(TASK_RUNNING);
        __mutex_remove_waiter(lock, &waiter);
 err_early_kill:
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, ip);
        preempt_enable();
@@ -1243,7 +1243,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
                }
        }
 
-       spin_lock(&lock->wait_lock);
+       raw_spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1260,7 +1260,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
index 864554e..4d93b02 100644 (file)
@@ -502,7 +502,7 @@ static int __init test_lockup_init(void)
                       offsetof(rwlock_t, magic),
                       RWLOCK_MAGIC) ||
            test_magic(lock_mutex_ptr,
-                      offsetof(struct mutex, wait_lock.rlock.magic),
+                      offsetof(struct mutex, wait_lock.magic),
                       SPINLOCK_MAGIC) ||
            test_magic(lock_rwsem_ptr,
                       offsetof(struct rw_semaphore, wait_lock.magic),