1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/locking/mutex.c
5 * Mutexes: blocking mutual exclusion locks
7 * Started by Ingo Molnar:
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
33 #ifndef CONFIG_PREEMPT_RT
36 #ifdef CONFIG_DEBUG_MUTEXES
37 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
39 # define MUTEX_WARN_ON(cond)
43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
45 atomic_long_set(&lock->owner, 0);
46 raw_spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
49 osq_lock_init(&lock->osq);
52 debug_mutex_init(lock, name, key);
54 EXPORT_SYMBOL(__mutex_init);
57 * @owner: contains: 'struct task_struct *' to the current lock owner,
58 * NULL means not owned. Since task_struct pointers are aligned at
59 * at least L1_CACHE_BYTES, we have low bits to store extra state.
61 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
62 * Bit1 indicates unlock needs to hand the lock to the top-waiter
63 * Bit2 indicates handoff has been done and we're waiting for pickup.
65 #define MUTEX_FLAG_WAITERS 0x01
66 #define MUTEX_FLAG_HANDOFF 0x02
67 #define MUTEX_FLAG_PICKUP 0x04
69 #define MUTEX_FLAGS 0x07
72 * Internal helper function; C doesn't allow us to hide it :/
74 * DO NOT USE (outside of mutex code).
76 static inline struct task_struct *__mutex_owner(struct mutex *lock)
78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
81 static inline struct task_struct *__owner_task(unsigned long owner)
83 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
86 bool mutex_is_locked(struct mutex *lock)
88 return __mutex_owner(lock) != NULL;
90 EXPORT_SYMBOL(mutex_is_locked);
92 static inline unsigned long __owner_flags(unsigned long owner)
94 return owner & MUTEX_FLAGS;
98 * Returns: __mutex_owner(lock) on failure or NULL on success.
100 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
102 unsigned long owner, curr = (unsigned long)current;
104 owner = atomic_long_read(&lock->owner);
105 for (;;) { /* must loop, can race against a flag */
106 unsigned long flags = __owner_flags(owner);
107 unsigned long task = owner & ~MUTEX_FLAGS;
110 if (flags & MUTEX_FLAG_PICKUP) {
113 flags &= ~MUTEX_FLAG_PICKUP;
114 } else if (handoff) {
115 if (flags & MUTEX_FLAG_HANDOFF)
117 flags |= MUTEX_FLAG_HANDOFF;
122 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
126 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
133 return __owner_task(owner);
137 * Trylock or set HANDOFF
139 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
141 return !__mutex_trylock_common(lock, handoff);
145 * Actual trylock that will work on any unlocked state.
147 static inline bool __mutex_trylock(struct mutex *lock)
149 return !__mutex_trylock_common(lock, false);
152 #ifndef CONFIG_DEBUG_LOCK_ALLOC
154 * Lockdep annotations are contained to the slow paths for simplicity.
155 * There is nothing that would stop spreading the lockdep annotations outwards
160 * Optimistic trylock that only works in the uncontended case. Make sure to
161 * follow with a __mutex_trylock() before failing.
163 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
165 unsigned long curr = (unsigned long)current;
166 unsigned long zero = 0UL;
168 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
174 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
176 unsigned long curr = (unsigned long)current;
178 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
182 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
184 atomic_long_or(flag, &lock->owner);
187 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
189 atomic_long_andnot(flag, &lock->owner);
192 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
194 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
198 * Add @waiter to a given location in the lock wait_list and set the
199 * FLAG_WAITERS flag if it's the first waiter.
202 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
203 struct list_head *list)
205 debug_mutex_add_waiter(lock, waiter, current);
207 list_add_tail(&waiter->list, list);
208 if (__mutex_waiter_is_first(lock, waiter))
209 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
213 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
215 list_del(&waiter->list);
216 if (likely(list_empty(&lock->wait_list)))
217 __mutex_clear_flag(lock, MUTEX_FLAGS);
219 debug_mutex_remove_waiter(lock, waiter, current);
223 * Give up ownership to a specific task, when @task = NULL, this is equivalent
224 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
225 * WAITERS. Provides RELEASE semantics like a regular unlock, the
226 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
228 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
230 unsigned long owner = atomic_long_read(&lock->owner);
235 MUTEX_WARN_ON(__owner_task(owner) != current);
236 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
238 new = (owner & MUTEX_FLAG_WAITERS);
239 new |= (unsigned long)task;
241 new |= MUTEX_FLAG_PICKUP;
243 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
248 #ifndef CONFIG_DEBUG_LOCK_ALLOC
250 * We split the mutex lock/unlock logic into separate fastpath and
251 * slowpath functions, to reduce the register pressure on the fastpath.
252 * We also put the fastpath first in the kernel image, to make sure the
253 * branch is predicted by the CPU as default-untaken.
255 static void __sched __mutex_lock_slowpath(struct mutex *lock);
258 * mutex_lock - acquire the mutex
259 * @lock: the mutex to be acquired
261 * Lock the mutex exclusively for this task. If the mutex is not
262 * available right now, it will sleep until it can get it.
264 * The mutex must later on be released by the same task that
265 * acquired it. Recursive locking is not allowed. The task
266 * may not exit without first unlocking the mutex. Also, kernel
267 * memory where the mutex resides must not be freed with
268 * the mutex still locked. The mutex must first be initialized
269 * (or statically defined) before it can be locked. memset()-ing
270 * the mutex to 0 is not allowed.
272 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
273 * checks that will enforce the restrictions and will also do
274 * deadlock debugging)
276 * This function is similar to (but not equivalent to) down().
278 void __sched mutex_lock(struct mutex *lock)
282 if (!__mutex_trylock_fast(lock))
283 __mutex_lock_slowpath(lock);
285 EXPORT_SYMBOL(mutex_lock);
288 #include "ww_mutex.h"
290 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
293 * Trylock variant that returns the owning task on failure.
295 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
297 return __mutex_trylock_common(lock, false);
301 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
302 struct mutex_waiter *waiter)
306 ww = container_of(lock, struct ww_mutex, base);
309 * If ww->ctx is set the contents are undefined, only
310 * by acquiring wait_lock there is a guarantee that
311 * they are not invalid when reading.
313 * As such, when deadlock detection needs to be
314 * performed the optimistic spinning cannot be done.
316 * Check this in every inner iteration because we may
317 * be racing against another thread's ww_mutex_lock.
319 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
323 * If we aren't on the wait list yet, cancel the spin
324 * if there are waiters. We want to avoid stealing the
325 * lock from a waiter with an earlier stamp, since the
326 * other thread may already own a lock that we also
329 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
333 * Similarly, stop spinning if we are no longer the
336 if (waiter && !__mutex_waiter_is_first(lock, waiter))
343 * Look out! "owner" is an entirely speculative pointer access and not
346 * "noinline" so that this function shows up on perf profiles.
349 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
350 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
354 lockdep_assert_preemption_disabled();
356 while (__mutex_owner(lock) == owner) {
358 * Ensure we emit the owner->on_cpu, dereference _after_
359 * checking lock->owner still matches owner. And we already
360 * disabled preemption which is equal to the RCU read-side
361 * crital section in optimistic spinning code. Thus the
362 * task_strcut structure won't go away during the spinning
368 * Use vcpu_is_preempted to detect lock holder preemption issue.
370 if (!owner_on_cpu(owner) || need_resched()) {
375 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
387 * Initial check for entering the mutex spinning loop
389 static inline int mutex_can_spin_on_owner(struct mutex *lock)
391 struct task_struct *owner;
394 lockdep_assert_preemption_disabled();
400 * We already disabled preemption which is equal to the RCU read-side
401 * crital section in optimistic spinning code. Thus the task_strcut
402 * structure won't go away during the spinning period.
404 owner = __mutex_owner(lock);
406 retval = owner_on_cpu(owner);
409 * If lock->owner is not set, the mutex has been released. Return true
410 * such that we'll trylock in the spin path, which is a faster option
411 * than the blocking slow path.
417 * Optimistic spinning.
419 * We try to spin for acquisition when we find that the lock owner
420 * is currently running on a (different) CPU and while we don't
421 * need to reschedule. The rationale is that if the lock owner is
422 * running, it is likely to release the lock soon.
424 * The mutex spinners are queued up using MCS lock so that only one
425 * spinner can compete for the mutex. However, if mutex spinning isn't
426 * going to happen, there is no point in going through the lock/unlock
429 * Returns true when the lock was taken, otherwise false, indicating
430 * that we need to jump to the slowpath and sleep.
432 * The waiter flag is set to true if the spinner is a waiter in the wait
433 * queue. The waiter-spinner will spin on the lock directly and concurrently
434 * with the spinner at the head of the OSQ, if present, until the owner is
437 static __always_inline bool
438 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
439 struct mutex_waiter *waiter)
443 * The purpose of the mutex_can_spin_on_owner() function is
444 * to eliminate the overhead of osq_lock() and osq_unlock()
445 * in case spinning isn't possible. As a waiter-spinner
446 * is not going to take OSQ lock anyway, there is no need
447 * to call mutex_can_spin_on_owner().
449 if (!mutex_can_spin_on_owner(lock))
453 * In order to avoid a stampede of mutex spinners trying to
454 * acquire the mutex all at once, the spinners need to take a
455 * MCS (queued) lock first before spinning on the owner field.
457 if (!osq_lock(&lock->osq))
462 struct task_struct *owner;
464 /* Try to acquire the mutex... */
465 owner = __mutex_trylock_or_owner(lock);
470 * There's an owner, wait for it to either
471 * release the lock or go to sleep.
473 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
477 * The cpu_relax() call is a compiler barrier which forces
478 * everything in this loop to be re-loaded. We don't need
479 * memory barriers as we'll eventually observe the right
480 * values at the cost of a few extra spins.
486 osq_unlock(&lock->osq);
493 osq_unlock(&lock->osq);
497 * If we fell out of the spin path because of need_resched(),
498 * reschedule now, before we try-lock the mutex. This avoids getting
499 * scheduled out right after we obtained the mutex.
501 if (need_resched()) {
503 * We _should_ have TASK_RUNNING here, but just in case
504 * we do not, make it so, otherwise we might get stuck.
506 __set_current_state(TASK_RUNNING);
507 schedule_preempt_disabled();
513 static __always_inline bool
514 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
515 struct mutex_waiter *waiter)
521 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
524 * mutex_unlock - release the mutex
525 * @lock: the mutex to be released
527 * Unlock a mutex that has been locked by this task previously.
529 * This function must not be used in interrupt context. Unlocking
530 * of a not locked mutex is not allowed.
532 * This function is similar to (but not equivalent to) up().
534 void __sched mutex_unlock(struct mutex *lock)
536 #ifndef CONFIG_DEBUG_LOCK_ALLOC
537 if (__mutex_unlock_fast(lock))
540 __mutex_unlock_slowpath(lock, _RET_IP_);
542 EXPORT_SYMBOL(mutex_unlock);
545 * ww_mutex_unlock - release the w/w mutex
546 * @lock: the mutex to be released
548 * Unlock a mutex that has been locked by this task previously with any of the
549 * ww_mutex_lock* functions (with or without an acquire context). It is
550 * forbidden to release the locks after releasing the acquire context.
552 * This function must not be used in interrupt context. Unlocking
553 * of a unlocked mutex is not allowed.
555 void __sched ww_mutex_unlock(struct ww_mutex *lock)
557 __ww_mutex_unlock(lock);
558 mutex_unlock(&lock->base);
560 EXPORT_SYMBOL(ww_mutex_unlock);
563 * Lock a mutex (possibly interruptible), slowpath:
565 static __always_inline int __sched
566 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
567 struct lockdep_map *nest_lock, unsigned long ip,
568 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
570 struct mutex_waiter waiter;
579 MUTEX_WARN_ON(lock->magic != lock);
581 ww = container_of(lock, struct ww_mutex, base);
583 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
587 * Reset the wounded flag after a kill. No other process can
588 * race and wound us here since they can't have a valid owner
589 * pointer if we don't have any locks held.
591 if (ww_ctx->acquired == 0)
594 #ifdef CONFIG_DEBUG_LOCK_ALLOC
595 nest_lock = &ww_ctx->dep_map;
600 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
602 if (__mutex_trylock(lock) ||
603 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
604 /* got the lock, yay! */
605 lock_acquired(&lock->dep_map, ip);
607 ww_mutex_set_context_fastpath(ww, ww_ctx);
612 raw_spin_lock(&lock->wait_lock);
614 * After waiting to acquire the wait_lock, try again.
616 if (__mutex_trylock(lock)) {
618 __ww_mutex_check_waiters(lock, ww_ctx);
623 debug_mutex_lock_common(lock, &waiter);
624 waiter.task = current;
626 waiter.ww_ctx = ww_ctx;
628 lock_contended(&lock->dep_map, ip);
631 /* add waiting tasks to the end of the waitqueue (FIFO): */
632 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
635 * Add in stamp order, waking up waiters that must kill
638 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
643 set_current_state(state);
648 * Once we hold wait_lock, we're serialized against
649 * mutex_unlock() handing the lock off to us, do a trylock
650 * before testing the error conditions to make sure we pick up
653 if (__mutex_trylock(lock))
657 * Check for signals and kill conditions while holding
658 * wait_lock. This ensures the lock cancellation is ordered
659 * against mutex_unlock() and wake-ups do not go missing.
661 if (signal_pending_state(state, current)) {
667 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
672 raw_spin_unlock(&lock->wait_lock);
673 schedule_preempt_disabled();
675 first = __mutex_waiter_is_first(lock, &waiter);
677 set_current_state(state);
679 * Here we order against unlock; we must either see it change
680 * state back to RUNNING and fall through the next schedule(),
681 * or we must see its unlock and acquire.
683 if (__mutex_trylock_or_handoff(lock, first) ||
684 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
687 raw_spin_lock(&lock->wait_lock);
689 raw_spin_lock(&lock->wait_lock);
691 __set_current_state(TASK_RUNNING);
695 * Wound-Wait; we stole the lock (!first_waiter), check the
696 * waiters as anyone might want to wound us.
698 if (!ww_ctx->is_wait_die &&
699 !__mutex_waiter_is_first(lock, &waiter))
700 __ww_mutex_check_waiters(lock, ww_ctx);
703 __mutex_remove_waiter(lock, &waiter);
705 debug_mutex_free_waiter(&waiter);
708 /* got the lock - cleanup and rejoice! */
709 lock_acquired(&lock->dep_map, ip);
712 ww_mutex_lock_acquired(ww, ww_ctx);
714 raw_spin_unlock(&lock->wait_lock);
719 __set_current_state(TASK_RUNNING);
720 __mutex_remove_waiter(lock, &waiter);
722 raw_spin_unlock(&lock->wait_lock);
723 debug_mutex_free_waiter(&waiter);
724 mutex_release(&lock->dep_map, ip);
730 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
731 struct lockdep_map *nest_lock, unsigned long ip)
733 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
737 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
738 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
740 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
744 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
746 * @ww_ctx: optional w/w acquire context
748 * Trylocks a mutex with the optional acquire context; no deadlock detection is
749 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
751 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
752 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
754 * A mutex acquired with this function must be released with ww_mutex_unlock.
756 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
759 return mutex_trylock(&ww->base);
761 MUTEX_WARN_ON(ww->base.magic != &ww->base);
764 * Reset the wounded flag after a kill. No other process can
765 * race and wound us here, since they can't have a valid owner
766 * pointer if we don't have any locks held.
768 if (ww_ctx->acquired == 0)
771 if (__mutex_trylock(&ww->base)) {
772 ww_mutex_set_context_fastpath(ww, ww_ctx);
773 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
779 EXPORT_SYMBOL(ww_mutex_trylock);
781 #ifdef CONFIG_DEBUG_LOCK_ALLOC
783 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
785 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
788 EXPORT_SYMBOL_GPL(mutex_lock_nested);
791 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
793 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
795 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
798 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
800 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
802 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
805 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
807 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
809 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
812 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
818 token = io_schedule_prepare();
819 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
820 subclass, NULL, _RET_IP_, NULL, 0);
821 io_schedule_finish(token);
823 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
826 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
828 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
831 if (ctx->deadlock_inject_countdown-- == 0) {
832 tmp = ctx->deadlock_inject_interval;
833 if (tmp > UINT_MAX/4)
836 tmp = tmp*2 + tmp + tmp/2;
838 ctx->deadlock_inject_interval = tmp;
839 ctx->deadlock_inject_countdown = tmp;
840 ctx->contending_lock = lock;
842 ww_mutex_unlock(lock);
852 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
857 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
859 if (!ret && ctx && ctx->acquired > 1)
860 return ww_mutex_deadlock_injection(lock, ctx);
864 EXPORT_SYMBOL_GPL(ww_mutex_lock);
867 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
872 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
875 if (!ret && ctx && ctx->acquired > 1)
876 return ww_mutex_deadlock_injection(lock, ctx);
880 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
885 * Release the lock, slowpath:
887 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
889 struct task_struct *next = NULL;
890 DEFINE_WAKE_Q(wake_q);
893 mutex_release(&lock->dep_map, ip);
896 * Release the lock before (potentially) taking the spinlock such that
897 * other contenders can get on with things ASAP.
899 * Except when HANDOFF, in that case we must not clear the owner field,
900 * but instead set it to the top waiter.
902 owner = atomic_long_read(&lock->owner);
904 MUTEX_WARN_ON(__owner_task(owner) != current);
905 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
907 if (owner & MUTEX_FLAG_HANDOFF)
910 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
911 if (owner & MUTEX_FLAG_WAITERS)
918 raw_spin_lock(&lock->wait_lock);
919 debug_mutex_unlock(lock);
920 if (!list_empty(&lock->wait_list)) {
921 /* get the first entry from the wait-list: */
922 struct mutex_waiter *waiter =
923 list_first_entry(&lock->wait_list,
924 struct mutex_waiter, list);
928 debug_mutex_wake_waiter(lock, waiter);
929 wake_q_add(&wake_q, next);
932 if (owner & MUTEX_FLAG_HANDOFF)
933 __mutex_handoff(lock, next);
935 raw_spin_unlock(&lock->wait_lock);
940 #ifndef CONFIG_DEBUG_LOCK_ALLOC
942 * Here come the less common (and hence less performance-critical) APIs:
943 * mutex_lock_interruptible() and mutex_trylock().
945 static noinline int __sched
946 __mutex_lock_killable_slowpath(struct mutex *lock);
948 static noinline int __sched
949 __mutex_lock_interruptible_slowpath(struct mutex *lock);
952 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
953 * @lock: The mutex to be acquired.
955 * Lock the mutex like mutex_lock(). If a signal is delivered while the
956 * process is sleeping, this function will return without acquiring the
959 * Context: Process context.
960 * Return: 0 if the lock was successfully acquired or %-EINTR if a
963 int __sched mutex_lock_interruptible(struct mutex *lock)
967 if (__mutex_trylock_fast(lock))
970 return __mutex_lock_interruptible_slowpath(lock);
973 EXPORT_SYMBOL(mutex_lock_interruptible);
976 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
977 * @lock: The mutex to be acquired.
979 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
980 * the current process is delivered while the process is sleeping, this
981 * function will return without acquiring the mutex.
983 * Context: Process context.
984 * Return: 0 if the lock was successfully acquired or %-EINTR if a
985 * fatal signal arrived.
987 int __sched mutex_lock_killable(struct mutex *lock)
991 if (__mutex_trylock_fast(lock))
994 return __mutex_lock_killable_slowpath(lock);
996 EXPORT_SYMBOL(mutex_lock_killable);
999 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1000 * @lock: The mutex to be acquired.
1002 * Lock the mutex like mutex_lock(). While the task is waiting for this
1003 * mutex, it will be accounted as being in the IO wait state by the
1006 * Context: Process context.
1008 void __sched mutex_lock_io(struct mutex *lock)
1012 token = io_schedule_prepare();
1014 io_schedule_finish(token);
1016 EXPORT_SYMBOL_GPL(mutex_lock_io);
1018 static noinline void __sched
1019 __mutex_lock_slowpath(struct mutex *lock)
1021 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1024 static noinline int __sched
1025 __mutex_lock_killable_slowpath(struct mutex *lock)
1027 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1030 static noinline int __sched
1031 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1033 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1036 static noinline int __sched
1037 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1039 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1043 static noinline int __sched
1044 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1045 struct ww_acquire_ctx *ctx)
1047 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1054 * mutex_trylock - try to acquire the mutex, without waiting
1055 * @lock: the mutex to be acquired
1057 * Try to acquire the mutex atomically. Returns 1 if the mutex
1058 * has been acquired successfully, and 0 on contention.
1060 * NOTE: this function follows the spin_trylock() convention, so
1061 * it is negated from the down_trylock() return values! Be careful
1062 * about this when converting semaphore users to mutexes.
1064 * This function must not be used in interrupt context. The
1065 * mutex must be released by the same task that acquired it.
1067 int __sched mutex_trylock(struct mutex *lock)
1071 MUTEX_WARN_ON(lock->magic != lock);
1073 locked = __mutex_trylock(lock);
1075 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1079 EXPORT_SYMBOL(mutex_trylock);
1081 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1083 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1087 if (__mutex_trylock_fast(&lock->base)) {
1089 ww_mutex_set_context_fastpath(lock, ctx);
1093 return __ww_mutex_lock_slowpath(lock, ctx);
1095 EXPORT_SYMBOL(ww_mutex_lock);
1098 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1102 if (__mutex_trylock_fast(&lock->base)) {
1104 ww_mutex_set_context_fastpath(lock, ctx);
1108 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1110 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1112 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1113 #endif /* !CONFIG_PREEMPT_RT */
1116 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1117 * @cnt: the atomic which we are to dec
1118 * @lock: the mutex to return holding if we dec to 0
1120 * return true and hold lock if we dec to 0, return false otherwise
1122 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1124 /* dec if we can't possibly hit 0 */
1125 if (atomic_add_unless(cnt, -1, 1))
1127 /* we might hit 0, so take the lock */
1129 if (!atomic_dec_and_test(cnt)) {
1130 /* when we actually did the dec, we didn't hit 0 */
1134 /* we hit 0, and we hold the lock */
1137 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);