From ebbdc41e90ffce8b6bb3cbba1801ede2dd07a89b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:00 +0200 Subject: [PATCH] locking/rtmutex: Provide rt_mutex_slowlock_locked() Split the inner workings of rt_mutex_slowlock() out into a separate function, which can be reused by the upcoming RT lock substitutions, e.g. for rw_semaphores. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.841971086@linutronix.de --- kernel/locking/rtmutex.c | 100 ++++++++++++++++++++--------------- kernel/locking/rtmutex_api.c | 2 +- 2 files changed, 59 insertions(+), 43 deletions(-) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b31f6cbe3a30..3d0b29cb5e63 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, } /** - * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) @@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) +static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, + unsigned int state, + struct hrtimer_sleeper *timeout, + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, } } -/* - * Slow path lock function: +/** + * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + * @chwalk: Indicator whether full or partial chainwalk is requested + * @waiter: Initializer waiter for blocking */ -static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state, + enum rtmutex_chainwalk chwalk, + struct rt_mutex_waiter *waiter) { - struct rt_mutex_waiter waiter; - unsigned long flags; - int ret = 0; - - rt_mutex_init_waiter(&waiter); + int ret; - /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can - * be called in early boot if the cmpxchg() fast path is disabled - * (debug, no architecture support). In this case we will acquire the - * rtmutex with lock->wait_lock held. But we cannot unconditionally - * enable interrupts in that early boot case. So we need to use the - * irqsave/restore variants. - */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + lockdep_assert_held(&lock->wait_lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + if (try_to_take_rt_mutex(lock, current, NULL)) return 0; - } set_current_state(state); - /* Setup the timer, when timeout != NULL */ - if (unlikely(timeout)) - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); if (likely(!ret)) - /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = rt_mutex_slowlock_block(lock, state, NULL, waiter); if (unlikely(ret)) { __set_current_state(TASK_RUNNING); - remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + remove_waiter(lock, waiter); + rt_mutex_handle_deadlock(ret, chwalk, waiter); } /* @@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); + return ret; +} - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); +static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, + unsigned int state) +{ + struct rt_mutex_waiter waiter; + int ret; + + rt_mutex_init_waiter(&waiter); - /* Remove pending timer: */ - if (unlikely(timeout)) - hrtimer_cancel(&timeout->timer); + ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter); debug_rt_mutex_free_waiter(&waiter); + return ret; +} + +/* + * rt_mutex_slowlock - Locking slowpath invoked when fast path fails + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + */ +static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state) +{ + unsigned long flags; + int ret; + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled + * (debug, no architecture support). In this case we will acquire the + * rtmutex with lock->wait_lock held. But we cannot unconditionally + * enable interrupts in that early boot case. So we need to use the + * irqsave/restore variants. + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + ret = __rt_mutex_slowlock_locked(lock, state); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; } @@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + return rt_mutex_slowlock(lock, state); } static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c index 38de4b137b9e..c5136f4998bb 100644 --- a/kernel/locking/rtmutex_api.c +++ b/kernel/locking/rtmutex_api.c @@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter); /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. -- 2.20.1