1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/locking/mutex.c
5 * Mutexes: blocking mutual exclusion locks
7 * Started by Ingo Molnar:
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
49 debug_mutex_init(lock, name, key);
51 EXPORT_SYMBOL(__mutex_init);
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
66 #define MUTEX_FLAGS 0x07
68 static inline struct task_struct *__owner_task(unsigned long owner)
70 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
73 static inline unsigned long __owner_flags(unsigned long owner)
75 return owner & MUTEX_FLAGS;
79 * Trylock variant that retuns the owning task on failure.
81 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
83 unsigned long owner, curr = (unsigned long)current;
85 owner = atomic_long_read(&lock->owner);
86 for (;;) { /* must loop, can race against a flag */
87 unsigned long old, flags = __owner_flags(owner);
88 unsigned long task = owner & ~MUTEX_FLAGS;
91 if (likely(task != curr))
94 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
97 flags &= ~MUTEX_FLAG_PICKUP;
99 #ifdef CONFIG_DEBUG_MUTEXES
100 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
105 * We set the HANDOFF bit, we must make sure it doesn't live
106 * past the point where we acquire it. This would be possible
107 * if we (accidentally) set the bit on an unlocked mutex.
109 flags &= ~MUTEX_FLAG_HANDOFF;
111 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
118 return __owner_task(owner);
122 * Actual trylock that will work on any unlocked state.
124 static inline bool __mutex_trylock(struct mutex *lock)
126 return !__mutex_trylock_or_owner(lock);
129 #ifndef CONFIG_DEBUG_LOCK_ALLOC
131 * Lockdep annotations are contained to the slow paths for simplicity.
132 * There is nothing that would stop spreading the lockdep annotations outwards
137 * Optimistic trylock that only works in the uncontended case. Make sure to
138 * follow with a __mutex_trylock() before failing.
140 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
142 unsigned long curr = (unsigned long)current;
143 unsigned long zero = 0UL;
145 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
151 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
153 unsigned long curr = (unsigned long)current;
155 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
162 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
164 atomic_long_or(flag, &lock->owner);
167 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
169 atomic_long_andnot(flag, &lock->owner);
172 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
174 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
178 * Add @waiter to a given location in the lock wait_list and set the
179 * FLAG_WAITERS flag if it's the first waiter.
182 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
183 struct list_head *list)
185 debug_mutex_add_waiter(lock, waiter, current);
187 list_add_tail(&waiter->list, list);
188 if (__mutex_waiter_is_first(lock, waiter))
189 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
193 * Give up ownership to a specific task, when @task = NULL, this is equivalent
194 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
195 * WAITERS. Provides RELEASE semantics like a regular unlock, the
196 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
198 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
200 unsigned long owner = atomic_long_read(&lock->owner);
203 unsigned long old, new;
205 #ifdef CONFIG_DEBUG_MUTEXES
206 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
207 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
210 new = (owner & MUTEX_FLAG_WAITERS);
211 new |= (unsigned long)task;
213 new |= MUTEX_FLAG_PICKUP;
215 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
223 #ifndef CONFIG_DEBUG_LOCK_ALLOC
225 * We split the mutex lock/unlock logic into separate fastpath and
226 * slowpath functions, to reduce the register pressure on the fastpath.
227 * We also put the fastpath first in the kernel image, to make sure the
228 * branch is predicted by the CPU as default-untaken.
230 static void __sched __mutex_lock_slowpath(struct mutex *lock);
233 * mutex_lock - acquire the mutex
234 * @lock: the mutex to be acquired
236 * Lock the mutex exclusively for this task. If the mutex is not
237 * available right now, it will sleep until it can get it.
239 * The mutex must later on be released by the same task that
240 * acquired it. Recursive locking is not allowed. The task
241 * may not exit without first unlocking the mutex. Also, kernel
242 * memory where the mutex resides must not be freed with
243 * the mutex still locked. The mutex must first be initialized
244 * (or statically defined) before it can be locked. memset()-ing
245 * the mutex to 0 is not allowed.
247 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
248 * checks that will enforce the restrictions and will also do
249 * deadlock debugging)
251 * This function is similar to (but not equivalent to) down().
253 void __sched mutex_lock(struct mutex *lock)
257 if (!__mutex_trylock_fast(lock))
258 __mutex_lock_slowpath(lock);
260 EXPORT_SYMBOL(mutex_lock);
265 * The newer transactions are killed when:
266 * It (the new transaction) makes a request for a lock being held
267 * by an older transaction.
270 * The newer transactions are wounded when:
271 * An older transaction makes a request for a lock being held by
272 * the newer transaction.
276 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
279 static __always_inline void
280 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
282 #ifdef CONFIG_DEBUG_MUTEXES
284 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
285 * but released with a normal mutex_unlock in this call.
287 * This should never happen, always use ww_mutex_unlock.
289 DEBUG_LOCKS_WARN_ON(ww->ctx);
292 * Not quite done after calling ww_acquire_done() ?
294 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
296 if (ww_ctx->contending_lock) {
298 * After -EDEADLK you tried to
299 * acquire a different ww_mutex? Bad!
301 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
304 * You called ww_mutex_lock after receiving -EDEADLK,
305 * but 'forgot' to unlock everything else first?
307 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
308 ww_ctx->contending_lock = NULL;
312 * Naughty, using a different class will lead to undefined behavior!
314 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
321 * Determine if context @a is 'after' context @b. IOW, @a is a younger
322 * transaction than @b and depending on algorithm either needs to wait for
325 static inline bool __sched
326 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
329 return (signed long)(a->stamp - b->stamp) > 0;
333 * Wait-Die; wake a younger waiter context (when locks held) such that it can
336 * Among waiters with context, only the first one can have other locks acquired
337 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
338 * __ww_mutex_check_kill() wake any but the earliest context.
341 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
342 struct ww_acquire_ctx *ww_ctx)
344 if (!ww_ctx->is_wait_die)
347 if (waiter->ww_ctx->acquired > 0 &&
348 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
349 debug_mutex_wake_waiter(lock, waiter);
350 wake_up_process(waiter->task);
357 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
359 * Wound the lock holder if there are waiters with older transactions than
360 * the lock holders. Even if multiple waiters may wound the lock holder,
361 * it's sufficient that only one does.
363 static bool __ww_mutex_wound(struct mutex *lock,
364 struct ww_acquire_ctx *ww_ctx,
365 struct ww_acquire_ctx *hold_ctx)
367 struct task_struct *owner = __mutex_owner(lock);
369 lockdep_assert_held(&lock->wait_lock);
372 * Possible through __ww_mutex_add_waiter() when we race with
373 * ww_mutex_set_context_fastpath(). In that case we'll get here again
374 * through __ww_mutex_check_waiters().
380 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
381 * it cannot go away because we'll have FLAG_WAITERS set and hold
387 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
388 hold_ctx->wounded = 1;
391 * wake_up_process() paired with set_current_state()
392 * inserts sufficient barriers to make sure @owner either sees
393 * it's wounded in __ww_mutex_check_kill() or has a
394 * wakeup pending to re-read the wounded state.
396 if (owner != current)
397 wake_up_process(owner);
406 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
407 * behind us on the wait-list, check if they need to die, or wound us.
409 * See __ww_mutex_add_waiter() for the list-order construction; basically the
410 * list is ordered by stamp, smallest (oldest) first.
412 * This relies on never mixing wait-die/wound-wait on the same wait-list;
413 * which is currently ensured by that being a ww_class property.
415 * The current task must not be on the wait list.
418 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
420 struct mutex_waiter *cur;
422 lockdep_assert_held(&lock->wait_lock);
424 list_for_each_entry(cur, &lock->wait_list, list) {
428 if (__ww_mutex_die(lock, cur, ww_ctx) ||
429 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
435 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
436 * and wake up any waiters so they can recheck.
438 static __always_inline void
439 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
441 ww_mutex_lock_acquired(lock, ctx);
444 * The lock->ctx update should be visible on all cores before
445 * the WAITERS check is done, otherwise contended waiters might be
446 * missed. The contended waiters will either see ww_ctx == NULL
447 * and keep spinning, or it will acquire wait_lock, add itself
448 * to waiter list and sleep.
450 smp_mb(); /* See comments above and below. */
453 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
455 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
457 * The memory barrier above pairs with the memory barrier in
458 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
459 * and/or !empty list.
461 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
465 * Uh oh, we raced in fastpath, check if any of the waiters need to
468 spin_lock(&lock->base.wait_lock);
469 __ww_mutex_check_waiters(&lock->base, ctx);
470 spin_unlock(&lock->base.wait_lock);
473 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
476 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
477 struct mutex_waiter *waiter)
481 ww = container_of(lock, struct ww_mutex, base);
484 * If ww->ctx is set the contents are undefined, only
485 * by acquiring wait_lock there is a guarantee that
486 * they are not invalid when reading.
488 * As such, when deadlock detection needs to be
489 * performed the optimistic spinning cannot be done.
491 * Check this in every inner iteration because we may
492 * be racing against another thread's ww_mutex_lock.
494 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
498 * If we aren't on the wait list yet, cancel the spin
499 * if there are waiters. We want to avoid stealing the
500 * lock from a waiter with an earlier stamp, since the
501 * other thread may already own a lock that we also
504 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
508 * Similarly, stop spinning if we are no longer the
511 if (waiter && !__mutex_waiter_is_first(lock, waiter))
518 * Look out! "owner" is an entirely speculative pointer access and not
521 * "noinline" so that this function shows up on perf profiles.
524 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
525 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
530 while (__mutex_owner(lock) == owner) {
532 * Ensure we emit the owner->on_cpu, dereference _after_
533 * checking lock->owner still matches owner. If that fails,
534 * owner might point to freed memory. If it still matches,
535 * the rcu_read_lock() ensures the memory stays valid.
540 * Use vcpu_is_preempted to detect lock holder preemption issue.
542 if (!owner->on_cpu || need_resched() ||
543 vcpu_is_preempted(task_cpu(owner))) {
548 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
561 * Initial check for entering the mutex spinning loop
563 static inline int mutex_can_spin_on_owner(struct mutex *lock)
565 struct task_struct *owner;
572 owner = __mutex_owner(lock);
575 * As lock holder preemption issue, we both skip spinning if task is not
576 * on cpu or its cpu is preempted
579 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
583 * If lock->owner is not set, the mutex has been released. Return true
584 * such that we'll trylock in the spin path, which is a faster option
585 * than the blocking slow path.
591 * Optimistic spinning.
593 * We try to spin for acquisition when we find that the lock owner
594 * is currently running on a (different) CPU and while we don't
595 * need to reschedule. The rationale is that if the lock owner is
596 * running, it is likely to release the lock soon.
598 * The mutex spinners are queued up using MCS lock so that only one
599 * spinner can compete for the mutex. However, if mutex spinning isn't
600 * going to happen, there is no point in going through the lock/unlock
603 * Returns true when the lock was taken, otherwise false, indicating
604 * that we need to jump to the slowpath and sleep.
606 * The waiter flag is set to true if the spinner is a waiter in the wait
607 * queue. The waiter-spinner will spin on the lock directly and concurrently
608 * with the spinner at the head of the OSQ, if present, until the owner is
611 static __always_inline bool
612 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
613 const bool use_ww_ctx, struct mutex_waiter *waiter)
617 * The purpose of the mutex_can_spin_on_owner() function is
618 * to eliminate the overhead of osq_lock() and osq_unlock()
619 * in case spinning isn't possible. As a waiter-spinner
620 * is not going to take OSQ lock anyway, there is no need
621 * to call mutex_can_spin_on_owner().
623 if (!mutex_can_spin_on_owner(lock))
627 * In order to avoid a stampede of mutex spinners trying to
628 * acquire the mutex all at once, the spinners need to take a
629 * MCS (queued) lock first before spinning on the owner field.
631 if (!osq_lock(&lock->osq))
636 struct task_struct *owner;
638 /* Try to acquire the mutex... */
639 owner = __mutex_trylock_or_owner(lock);
644 * There's an owner, wait for it to either
645 * release the lock or go to sleep.
647 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
651 * The cpu_relax() call is a compiler barrier which forces
652 * everything in this loop to be re-loaded. We don't need
653 * memory barriers as we'll eventually observe the right
654 * values at the cost of a few extra spins.
660 osq_unlock(&lock->osq);
667 osq_unlock(&lock->osq);
671 * If we fell out of the spin path because of need_resched(),
672 * reschedule now, before we try-lock the mutex. This avoids getting
673 * scheduled out right after we obtained the mutex.
675 if (need_resched()) {
677 * We _should_ have TASK_RUNNING here, but just in case
678 * we do not, make it so, otherwise we might get stuck.
680 __set_current_state(TASK_RUNNING);
681 schedule_preempt_disabled();
687 static __always_inline bool
688 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
689 const bool use_ww_ctx, struct mutex_waiter *waiter)
695 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
698 * mutex_unlock - release the mutex
699 * @lock: the mutex to be released
701 * Unlock a mutex that has been locked by this task previously.
703 * This function must not be used in interrupt context. Unlocking
704 * of a not locked mutex is not allowed.
706 * This function is similar to (but not equivalent to) up().
708 void __sched mutex_unlock(struct mutex *lock)
710 #ifndef CONFIG_DEBUG_LOCK_ALLOC
711 if (__mutex_unlock_fast(lock))
714 __mutex_unlock_slowpath(lock, _RET_IP_);
716 EXPORT_SYMBOL(mutex_unlock);
719 * ww_mutex_unlock - release the w/w mutex
720 * @lock: the mutex to be released
722 * Unlock a mutex that has been locked by this task previously with any of the
723 * ww_mutex_lock* functions (with or without an acquire context). It is
724 * forbidden to release the locks after releasing the acquire context.
726 * This function must not be used in interrupt context. Unlocking
727 * of a unlocked mutex is not allowed.
729 void __sched ww_mutex_unlock(struct ww_mutex *lock)
732 * The unlocking fastpath is the 0->1 transition from 'locked'
733 * into 'unlocked' state:
736 #ifdef CONFIG_DEBUG_MUTEXES
737 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
739 if (lock->ctx->acquired > 0)
740 lock->ctx->acquired--;
744 mutex_unlock(&lock->base);
746 EXPORT_SYMBOL(ww_mutex_unlock);
749 static __always_inline int __sched
750 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
752 if (ww_ctx->acquired > 0) {
753 #ifdef CONFIG_DEBUG_MUTEXES
756 ww = container_of(lock, struct ww_mutex, base);
757 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
758 ww_ctx->contending_lock = ww;
768 * Check the wound condition for the current lock acquire.
770 * Wound-Wait: If we're wounded, kill ourself.
772 * Wait-Die: If we're trying to acquire a lock already held by an older
773 * context, kill ourselves.
775 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
776 * look at waiters before us in the wait-list.
778 static inline int __sched
779 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
780 struct ww_acquire_ctx *ctx)
782 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
783 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
784 struct mutex_waiter *cur;
786 if (ctx->acquired == 0)
789 if (!ctx->is_wait_die) {
791 return __ww_mutex_kill(lock, ctx);
796 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
797 return __ww_mutex_kill(lock, ctx);
800 * If there is a waiter in front of us that has a context, then its
801 * stamp is earlier than ours and we must kill ourself.
804 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
808 return __ww_mutex_kill(lock, ctx);
815 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
816 * first. Such that older contexts are preferred to acquire the lock over
819 * Waiters without context are interspersed in FIFO order.
821 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
822 * older contexts already waiting) to avoid unnecessary waiting and for
823 * Wound-Wait ensure we wound the owning context when it is younger.
825 static inline int __sched
826 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
828 struct ww_acquire_ctx *ww_ctx)
830 struct mutex_waiter *cur;
831 struct list_head *pos;
835 __mutex_add_waiter(lock, waiter, &lock->wait_list);
839 is_wait_die = ww_ctx->is_wait_die;
842 * Add the waiter before the first waiter with a higher stamp.
843 * Waiters without a context are skipped to avoid starving
844 * them. Wait-Die waiters may die here. Wound-Wait waiters
845 * never die here, but they are sorted in stamp order and
846 * may wound the lock holder.
848 pos = &lock->wait_list;
849 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
853 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
855 * Wait-Die: if we find an older context waiting, there
856 * is no point in queueing behind it, as we'd have to
857 * die the moment it would acquire the lock.
860 int ret = __ww_mutex_kill(lock, ww_ctx);
871 /* Wait-Die: ensure younger waiters die. */
872 __ww_mutex_die(lock, cur, ww_ctx);
875 __mutex_add_waiter(lock, waiter, pos);
878 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
879 * wound that such that we might proceed.
882 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
885 * See ww_mutex_set_context_fastpath(). Orders setting
886 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
887 * such that either we or the fastpath will wound @ww->ctx.
890 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
897 * Lock a mutex (possibly interruptible), slowpath:
899 static __always_inline int __sched
900 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
901 struct lockdep_map *nest_lock, unsigned long ip,
902 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
904 struct mutex_waiter waiter;
911 #ifdef CONFIG_DEBUG_MUTEXES
912 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
915 ww = container_of(lock, struct ww_mutex, base);
916 if (use_ww_ctx && ww_ctx) {
917 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
921 * Reset the wounded flag after a kill. No other process can
922 * race and wound us here since they can't have a valid owner
923 * pointer if we don't have any locks held.
925 if (ww_ctx->acquired == 0)
930 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
932 if (__mutex_trylock(lock) ||
933 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
934 /* got the lock, yay! */
935 lock_acquired(&lock->dep_map, ip);
936 if (use_ww_ctx && ww_ctx)
937 ww_mutex_set_context_fastpath(ww, ww_ctx);
942 spin_lock(&lock->wait_lock);
944 * After waiting to acquire the wait_lock, try again.
946 if (__mutex_trylock(lock)) {
947 if (use_ww_ctx && ww_ctx)
948 __ww_mutex_check_waiters(lock, ww_ctx);
953 debug_mutex_lock_common(lock, &waiter);
955 lock_contended(&lock->dep_map, ip);
958 /* add waiting tasks to the end of the waitqueue (FIFO): */
959 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
962 #ifdef CONFIG_DEBUG_MUTEXES
963 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
967 * Add in stamp order, waking up waiters that must kill
970 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
974 waiter.ww_ctx = ww_ctx;
977 waiter.task = current;
979 set_current_state(state);
982 * Once we hold wait_lock, we're serialized against
983 * mutex_unlock() handing the lock off to us, do a trylock
984 * before testing the error conditions to make sure we pick up
987 if (__mutex_trylock(lock))
991 * Check for signals and kill conditions while holding
992 * wait_lock. This ensures the lock cancellation is ordered
993 * against mutex_unlock() and wake-ups do not go missing.
995 if (signal_pending_state(state, current)) {
1000 if (use_ww_ctx && ww_ctx) {
1001 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1006 spin_unlock(&lock->wait_lock);
1007 schedule_preempt_disabled();
1010 * ww_mutex needs to always recheck its position since its waiter
1011 * list is not FIFO ordered.
1013 if ((use_ww_ctx && ww_ctx) || !first) {
1014 first = __mutex_waiter_is_first(lock, &waiter);
1016 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1019 set_current_state(state);
1021 * Here we order against unlock; we must either see it change
1022 * state back to RUNNING and fall through the next schedule(),
1023 * or we must see its unlock and acquire.
1025 if (__mutex_trylock(lock) ||
1026 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1029 spin_lock(&lock->wait_lock);
1031 spin_lock(&lock->wait_lock);
1033 __set_current_state(TASK_RUNNING);
1035 if (use_ww_ctx && ww_ctx) {
1037 * Wound-Wait; we stole the lock (!first_waiter), check the
1038 * waiters as anyone might want to wound us.
1040 if (!ww_ctx->is_wait_die &&
1041 !__mutex_waiter_is_first(lock, &waiter))
1042 __ww_mutex_check_waiters(lock, ww_ctx);
1045 mutex_remove_waiter(lock, &waiter, current);
1046 if (likely(list_empty(&lock->wait_list)))
1047 __mutex_clear_flag(lock, MUTEX_FLAGS);
1049 debug_mutex_free_waiter(&waiter);
1052 /* got the lock - cleanup and rejoice! */
1053 lock_acquired(&lock->dep_map, ip);
1055 if (use_ww_ctx && ww_ctx)
1056 ww_mutex_lock_acquired(ww, ww_ctx);
1058 spin_unlock(&lock->wait_lock);
1063 __set_current_state(TASK_RUNNING);
1064 mutex_remove_waiter(lock, &waiter, current);
1066 spin_unlock(&lock->wait_lock);
1067 debug_mutex_free_waiter(&waiter);
1068 mutex_release(&lock->dep_map, 1, ip);
1074 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1075 struct lockdep_map *nest_lock, unsigned long ip)
1077 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1081 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1082 struct lockdep_map *nest_lock, unsigned long ip,
1083 struct ww_acquire_ctx *ww_ctx)
1085 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1088 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1090 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1092 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1095 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1098 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1100 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1102 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1105 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1107 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1109 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1112 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1114 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1116 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1119 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1125 token = io_schedule_prepare();
1126 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1127 subclass, NULL, _RET_IP_, NULL, 0);
1128 io_schedule_finish(token);
1130 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1133 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1135 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1138 if (ctx->deadlock_inject_countdown-- == 0) {
1139 tmp = ctx->deadlock_inject_interval;
1140 if (tmp > UINT_MAX/4)
1143 tmp = tmp*2 + tmp + tmp/2;
1145 ctx->deadlock_inject_interval = tmp;
1146 ctx->deadlock_inject_countdown = tmp;
1147 ctx->contending_lock = lock;
1149 ww_mutex_unlock(lock);
1159 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1164 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1165 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1167 if (!ret && ctx && ctx->acquired > 1)
1168 return ww_mutex_deadlock_injection(lock, ctx);
1172 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1175 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1180 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1181 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1184 if (!ret && ctx && ctx->acquired > 1)
1185 return ww_mutex_deadlock_injection(lock, ctx);
1189 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1194 * Release the lock, slowpath:
1196 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1198 struct task_struct *next = NULL;
1199 DEFINE_WAKE_Q(wake_q);
1200 unsigned long owner;
1202 mutex_release(&lock->dep_map, 1, ip);
1205 * Release the lock before (potentially) taking the spinlock such that
1206 * other contenders can get on with things ASAP.
1208 * Except when HANDOFF, in that case we must not clear the owner field,
1209 * but instead set it to the top waiter.
1211 owner = atomic_long_read(&lock->owner);
1215 #ifdef CONFIG_DEBUG_MUTEXES
1216 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1217 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1220 if (owner & MUTEX_FLAG_HANDOFF)
1223 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1224 __owner_flags(owner));
1226 if (owner & MUTEX_FLAG_WAITERS)
1235 spin_lock(&lock->wait_lock);
1236 debug_mutex_unlock(lock);
1237 if (!list_empty(&lock->wait_list)) {
1238 /* get the first entry from the wait-list: */
1239 struct mutex_waiter *waiter =
1240 list_first_entry(&lock->wait_list,
1241 struct mutex_waiter, list);
1243 next = waiter->task;
1245 debug_mutex_wake_waiter(lock, waiter);
1246 wake_q_add(&wake_q, next);
1249 if (owner & MUTEX_FLAG_HANDOFF)
1250 __mutex_handoff(lock, next);
1252 spin_unlock(&lock->wait_lock);
1257 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1259 * Here come the less common (and hence less performance-critical) APIs:
1260 * mutex_lock_interruptible() and mutex_trylock().
1262 static noinline int __sched
1263 __mutex_lock_killable_slowpath(struct mutex *lock);
1265 static noinline int __sched
1266 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1269 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1270 * @lock: The mutex to be acquired.
1272 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1273 * process is sleeping, this function will return without acquiring the
1276 * Context: Process context.
1277 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1280 int __sched mutex_lock_interruptible(struct mutex *lock)
1284 if (__mutex_trylock_fast(lock))
1287 return __mutex_lock_interruptible_slowpath(lock);
1290 EXPORT_SYMBOL(mutex_lock_interruptible);
1293 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1294 * @lock: The mutex to be acquired.
1296 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1297 * the current process is delivered while the process is sleeping, this
1298 * function will return without acquiring the mutex.
1300 * Context: Process context.
1301 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1302 * fatal signal arrived.
1304 int __sched mutex_lock_killable(struct mutex *lock)
1308 if (__mutex_trylock_fast(lock))
1311 return __mutex_lock_killable_slowpath(lock);
1313 EXPORT_SYMBOL(mutex_lock_killable);
1316 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1317 * @lock: The mutex to be acquired.
1319 * Lock the mutex like mutex_lock(). While the task is waiting for this
1320 * mutex, it will be accounted as being in the IO wait state by the
1323 * Context: Process context.
1325 void __sched mutex_lock_io(struct mutex *lock)
1329 token = io_schedule_prepare();
1331 io_schedule_finish(token);
1333 EXPORT_SYMBOL_GPL(mutex_lock_io);
1335 static noinline void __sched
1336 __mutex_lock_slowpath(struct mutex *lock)
1338 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1341 static noinline int __sched
1342 __mutex_lock_killable_slowpath(struct mutex *lock)
1344 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1347 static noinline int __sched
1348 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1350 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1353 static noinline int __sched
1354 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1356 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1360 static noinline int __sched
1361 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1362 struct ww_acquire_ctx *ctx)
1364 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1371 * mutex_trylock - try to acquire the mutex, without waiting
1372 * @lock: the mutex to be acquired
1374 * Try to acquire the mutex atomically. Returns 1 if the mutex
1375 * has been acquired successfully, and 0 on contention.
1377 * NOTE: this function follows the spin_trylock() convention, so
1378 * it is negated from the down_trylock() return values! Be careful
1379 * about this when converting semaphore users to mutexes.
1381 * This function must not be used in interrupt context. The
1382 * mutex must be released by the same task that acquired it.
1384 int __sched mutex_trylock(struct mutex *lock)
1388 #ifdef CONFIG_DEBUG_MUTEXES
1389 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1392 locked = __mutex_trylock(lock);
1394 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1398 EXPORT_SYMBOL(mutex_trylock);
1400 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1402 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1406 if (__mutex_trylock_fast(&lock->base)) {
1408 ww_mutex_set_context_fastpath(lock, ctx);
1412 return __ww_mutex_lock_slowpath(lock, ctx);
1414 EXPORT_SYMBOL(ww_mutex_lock);
1417 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1421 if (__mutex_trylock_fast(&lock->base)) {
1423 ww_mutex_set_context_fastpath(lock, ctx);
1427 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1429 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1434 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1435 * @cnt: the atomic which we are to dec
1436 * @lock: the mutex to return holding if we dec to 0
1438 * return true and hold lock if we dec to 0, return false otherwise
1440 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1442 /* dec if we can't possibly hit 0 */
1443 if (atomic_add_unless(cnt, -1, 1))
1445 /* we might hit 0, so take the lock */
1447 if (!atomic_dec_and_test(cnt)) {
1448 /* when we actually did the dec, we didn't hit 0 */
1452 /* we hit 0, and we hold the lock */
1455 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);