locking/rtmutex: Provide the spin/rwlock core lock function
authorThomas Gleixner <tglx@linutronix.de>
Sun, 15 Aug 2021 21:28:25 +0000 (23:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Aug 2021 15:45:37 +0000 (17:45 +0200)
A simplified version of the rtmutex slowlock function, which neither handles
signals nor timeouts, and is careful about preserving the state of the
blocked task across the lock operation.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211303.770228446@linutronix.de
kernel/locking/rtmutex.c
kernel/locking/rtmutex_common.h

index 949781a..951bef0 100644 (file)
@@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
        return rt_mutex_slowlock(lock, state);
 }
 #endif /* RT_MUTEX_BUILD_MUTEX */
+
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
+/*
+ * Functions required for spin/rw_lock substitution on RT kernels
+ */
+
+/**
+ * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
+ * @lock:      The underlying RT mutex
+ */
+static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
+{
+       struct rt_mutex_waiter waiter;
+
+       lockdep_assert_held(&lock->wait_lock);
+
+       if (try_to_take_rt_mutex(lock, current, NULL))
+               return;
+
+       rt_mutex_init_rtlock_waiter(&waiter);
+
+       /* Save current state and set state to TASK_RTLOCK_WAIT */
+       current_save_and_set_rtlock_wait_state();
+
+       task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
+
+       for (;;) {
+               /* Try to acquire the lock again */
+               if (try_to_take_rt_mutex(lock, current, &waiter))
+                       break;
+
+               raw_spin_unlock_irq(&lock->wait_lock);
+
+               schedule_rtlock();
+
+               raw_spin_lock_irq(&lock->wait_lock);
+               set_current_state(TASK_RTLOCK_WAIT);
+       }
+
+       /* Restore the task state */
+       current_restore_rtlock_saved_state();
+
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+        * We might have to fix that up:
+        */
+       fixup_rt_mutex_waiters(lock);
+       debug_rt_mutex_free_waiter(&waiter);
+}
+
+static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+       rtlock_slowlock_locked(lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+}
+
+#endif /* RT_MUTEX_BUILD_SPINLOCKS */
index 424ee0f..ccf0e36 100644 (file)
@@ -181,7 +181,7 @@ static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
        waiter->task = NULL;
 }
 
-static inline void rtlock_init_rtmutex_waiter(struct rt_mutex_waiter *waiter)
+static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
 {
        rt_mutex_init_waiter(waiter);
        waiter->wake_state = TASK_RTLOCK_WAIT;