1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
8 #define RT_MUTEX_BUILD_MUTEX
12 * Max number of times we'll walk the boosting chain:
14 int max_lock_depth = 1024;
17 * Debug aware fast / slowpath lock,trylock,unlock
19 * The atomic acquire/release ops are compiled away, when either the
20 * architecture does not support cmpxchg or when debugging is enabled.
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
24 unsigned int subclass)
29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
30 ret = __rt_mutex_lock(&lock->rtmutex, state);
32 mutex_release(&lock->dep_map, _RET_IP_);
36 void rt_mutex_base_init(struct rt_mutex_base *rtb)
38 __rt_mutex_base_init(rtb);
40 EXPORT_SYMBOL(rt_mutex_base_init);
42 #ifdef CONFIG_DEBUG_LOCK_ALLOC
44 * rt_mutex_lock_nested - lock a rt_mutex
46 * @lock: the rt_mutex to be locked
47 * @subclass: the lockdep subclass
49 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
51 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
53 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
55 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
58 * rt_mutex_lock - lock a rt_mutex
60 * @lock: the rt_mutex to be locked
62 void __sched rt_mutex_lock(struct rt_mutex *lock)
64 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
66 EXPORT_SYMBOL_GPL(rt_mutex_lock);
70 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
72 * @lock: the rt_mutex to be locked
76 * -EINTR when interrupted by a signal
78 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
80 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
82 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
85 * rt_mutex_trylock - try to lock a rt_mutex
87 * @lock: the rt_mutex to be locked
89 * This function can only be called in thread context. It's safe to call it
90 * from atomic regions, but not from hard or soft interrupt context.
96 int __sched rt_mutex_trylock(struct rt_mutex *lock)
100 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
103 ret = __rt_mutex_trylock(&lock->rtmutex);
105 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
109 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
112 * rt_mutex_unlock - unlock a rt_mutex
114 * @lock: the rt_mutex to be unlocked
116 void __sched rt_mutex_unlock(struct rt_mutex *lock)
118 mutex_release(&lock->dep_map, _RET_IP_);
119 __rt_mutex_unlock(&lock->rtmutex);
121 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
124 * Futex variants, must not use fastpath.
126 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
128 return rt_mutex_slowtrylock(lock);
131 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
133 return __rt_mutex_slowtrylock(lock);
137 * __rt_mutex_futex_unlock - Futex variant, that since futex variants
138 * do not use the fast-path, can be simple and will not need to retry.
140 * @lock: The rt_mutex to be unlocked
141 * @wqh: The wake queue head from which to get the next lock waiter
143 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
144 struct rt_wake_q_head *wqh)
146 lockdep_assert_held(&lock->wait_lock);
148 debug_rt_mutex_unlock(lock);
150 if (!rt_mutex_has_waiters(lock)) {
152 return false; /* done */
156 * We've already deboosted, mark_wakeup_next_waiter() will
157 * retain preempt_disabled when we drop the wait_lock, to
158 * avoid inversion prior to the wakeup. preempt_disable()
159 * therein pairs with rt_mutex_postunlock().
161 mark_wakeup_next_waiter(wqh, lock);
163 return true; /* call postunlock() */
166 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
168 DEFINE_RT_WAKE_Q(wqh);
172 raw_spin_lock_irqsave(&lock->wait_lock, flags);
173 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
174 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
177 rt_mutex_postunlock(&wqh);
181 * __rt_mutex_init - initialize the rt_mutex
183 * @lock: The rt_mutex to be initialized
184 * @name: The lock name used for debugging
185 * @key: The lock class key used for debugging
187 * Initialize the rt_mutex to unlocked state.
189 * Initializing of a locked rt_mutex is not allowed
191 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
192 struct lock_class_key *key)
194 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
195 __rt_mutex_base_init(&lock->rtmutex);
196 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
198 EXPORT_SYMBOL_GPL(__rt_mutex_init);
201 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
204 * @lock: the rt_mutex to be locked
205 * @proxy_owner:the task to set as owner
207 * No locking. Caller has to do serializing itself
209 * Special API call for PI-futex support. This initializes the rtmutex and
210 * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
211 * possible at this point because the pi_state which contains the rtmutex
212 * is not yet visible to other tasks.
214 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
215 struct task_struct *proxy_owner)
217 __rt_mutex_base_init(lock);
218 rt_mutex_set_owner(lock, proxy_owner);
222 * rt_mutex_proxy_unlock - release a lock on behalf of owner
224 * @lock: the rt_mutex to be locked
226 * No locking. Caller has to do serializing itself
228 * Special API call for PI-futex support. This just cleans up the rtmutex
229 * (debugging) state. Concurrent operations on this rt_mutex are not
230 * possible because it belongs to the pi_state which is about to be freed
231 * and it is not longer visible to other tasks.
233 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
235 debug_rt_mutex_proxy_unlock(lock);
236 rt_mutex_set_owner(lock, NULL);
240 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
241 * @lock: the rt_mutex to take
242 * @waiter: the pre-initialized rt_mutex_waiter
243 * @task: the task to prepare
245 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
246 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
248 * NOTE: does _NOT_ remove the @waiter on failure; must either call
249 * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
252 * 0 - task blocked on lock
253 * 1 - acquired the lock for task, caller should wake it up
256 * Special API call for PI-futex support.
258 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
259 struct rt_mutex_waiter *waiter,
260 struct task_struct *task)
264 lockdep_assert_held(&lock->wait_lock);
266 if (try_to_take_rt_mutex(lock, task, NULL))
269 /* We enforce deadlock detection for futexes */
270 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
271 RT_MUTEX_FULL_CHAINWALK);
273 if (ret && !rt_mutex_owner(lock)) {
275 * Reset the return value. We might have
276 * returned with -EDEADLK and the owner
277 * released the lock while we were walking the
278 * pi chain. Let the waiter sort it out.
287 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
288 * @lock: the rt_mutex to take
289 * @waiter: the pre-initialized rt_mutex_waiter
290 * @task: the task to prepare
292 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
293 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
295 * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
299 * 0 - task blocked on lock
300 * 1 - acquired the lock for task, caller should wake it up
303 * Special API call for PI-futex support.
305 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
306 struct rt_mutex_waiter *waiter,
307 struct task_struct *task)
311 raw_spin_lock_irq(&lock->wait_lock);
312 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
314 remove_waiter(lock, waiter);
315 raw_spin_unlock_irq(&lock->wait_lock);
321 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
322 * @lock: the rt_mutex we were woken on
323 * @to: the timeout, null if none. hrtimer should already have
325 * @waiter: the pre-initialized rt_mutex_waiter
327 * Wait for the lock acquisition started on our behalf by
328 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
329 * rt_mutex_cleanup_proxy_lock().
333 * <0 - error, one of -EINTR, -ETIMEDOUT
335 * Special API call for PI-futex support
337 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
338 struct hrtimer_sleeper *to,
339 struct rt_mutex_waiter *waiter)
343 raw_spin_lock_irq(&lock->wait_lock);
344 /* sleep on the mutex */
345 set_current_state(TASK_INTERRUPTIBLE);
346 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
348 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
349 * have to fix that up.
351 fixup_rt_mutex_waiters(lock);
352 raw_spin_unlock_irq(&lock->wait_lock);
358 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
359 * @lock: the rt_mutex we were woken on
360 * @waiter: the pre-initialized rt_mutex_waiter
362 * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
363 * rt_mutex_wait_proxy_lock().
365 * Unless we acquired the lock; we're still enqueued on the wait-list and can
366 * in fact still be granted ownership until we're removed. Therefore we can
367 * find we are in fact the owner and must disregard the
368 * rt_mutex_wait_proxy_lock() failure.
371 * true - did the cleanup, we done.
372 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
373 * caller should disregards its return value.
375 * Special API call for PI-futex support
377 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
378 struct rt_mutex_waiter *waiter)
380 bool cleanup = false;
382 raw_spin_lock_irq(&lock->wait_lock);
384 * Do an unconditional try-lock, this deals with the lock stealing
385 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
388 * We're not interested in the return value, because the subsequent
389 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
390 * we will own the lock and it will have removed the waiter. If we
391 * failed the trylock, we're still not owner and we need to remove
394 try_to_take_rt_mutex(lock, current, waiter);
396 * Unless we're the owner; we're still enqueued on the wait_list.
397 * So check if we became owner, if not, take us off the wait_list.
399 if (rt_mutex_owner(lock) != current) {
400 remove_waiter(lock, waiter);
404 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
405 * have to fix that up.
407 fixup_rt_mutex_waiters(lock);
409 raw_spin_unlock_irq(&lock->wait_lock);
415 * Recheck the pi chain, in case we got a priority setting
417 * Called from sched_setscheduler
419 void __sched rt_mutex_adjust_pi(struct task_struct *task)
421 struct rt_mutex_waiter *waiter;
422 struct rt_mutex_base *next_lock;
425 raw_spin_lock_irqsave(&task->pi_lock, flags);
427 waiter = task->pi_blocked_on;
428 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
429 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
432 next_lock = waiter->lock;
433 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
435 /* gets dropped in rt_mutex_adjust_prio_chain()! */
436 get_task_struct(task);
438 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
439 next_lock, NULL, task);
443 * Performs the wakeup of the top-waiter and re-enables preemption.
445 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
447 rt_mutex_wake_up_q(wqh);
450 #ifdef CONFIG_DEBUG_RT_MUTEXES
451 void rt_mutex_debug_task_free(struct task_struct *task)
453 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
454 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);