1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture
14 * by Waiman Long <longman@redhat.com>.
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/sched/rt.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/wake_q.h>
24 #include <linux/sched/signal.h>
25 #include <linux/export.h>
26 #include <linux/rwsem.h>
27 #include <linux/atomic.h>
30 #include "lock_events.h"
33 * The least significant 2 bits of the owner value has the following
35 * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
36 * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
37 * i.e. the owner(s) cannot be readily determined. It can be reader
38 * owned or the owning writer is indeterminate.
40 * When a writer acquires a rwsem, it puts its task_struct pointer
41 * into the owner field. It is cleared after an unlock.
43 * When a reader acquires a rwsem, it will also puts its task_struct
44 * pointer into the owner field with both the RWSEM_READER_OWNED and
45 * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
46 * largely be left untouched. So for a free or reader-owned rwsem,
47 * the owner value may contain information about the last reader that
48 * acquires the rwsem. The anonymous bit is set because that particular
49 * reader may or may not still own the lock.
51 * That information may be helpful in debugging cases where the system
52 * seems to hang on a reader owned rwsem especially if only one reader
53 * is involved. Ideally we would like to track all the readers that own
54 * a rwsem, but the overhead is simply too big.
56 #define RWSEM_READER_OWNED (1UL << 0)
57 #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
59 #ifdef CONFIG_DEBUG_RWSEMS
60 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
61 if (!debug_locks_silent && \
62 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
63 #c, atomic_long_read(&(sem)->count), \
64 (long)((sem)->owner), (long)current, \
65 list_empty(&(sem)->wait_list) ? "" : "not ")) \
69 # define DEBUG_RWSEMS_WARN_ON(c, sem)
73 * The definition of the atomic counter in the semaphore:
75 * Bit 0 - writer locked bit
76 * Bit 1 - waiters present bit
78 * Bits 8-X - 24-bit (32-bit) or 56-bit reader count
80 * atomic_long_fetch_add() is used to obtain reader lock, whereas
81 * atomic_long_cmpxchg() will be used to obtain writer lock.
83 #define RWSEM_WRITER_LOCKED (1UL << 0)
84 #define RWSEM_FLAG_WAITERS (1UL << 1)
85 #define RWSEM_READER_SHIFT 8
86 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
87 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
88 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
89 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
90 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS)
93 * All writes to owner are protected by WRITE_ONCE() to make sure that
94 * store tearing can't happen as optimistic spinners may read and use
95 * the owner value concurrently without lock. Read from owner, however,
96 * may not need READ_ONCE() as long as the pointer value is only used
97 * for comparison and isn't being dereferenced.
99 static inline void rwsem_set_owner(struct rw_semaphore *sem)
101 WRITE_ONCE(sem->owner, current);
104 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
106 WRITE_ONCE(sem->owner, NULL);
110 * The task_struct pointer of the last owning reader will be left in
113 * Note that the owner value just indicates the task has owned the rwsem
114 * previously, it may not be the real owner or one of the real owners
115 * anymore when that field is examined, so take it with a grain of salt.
117 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
118 struct task_struct *owner)
120 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
121 | RWSEM_ANONYMOUSLY_OWNED;
123 WRITE_ONCE(sem->owner, (struct task_struct *)val);
126 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
128 __rwsem_set_reader_owned(sem, current);
132 * Return true if the a rwsem waiter can spin on the rwsem's owner
133 * and steal the lock, i.e. the lock is not anonymously owned.
134 * N.B. !owner is considered spinnable.
136 static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
138 return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
142 * Return true if rwsem is owned by an anonymous writer or readers.
144 static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
146 return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
149 #ifdef CONFIG_DEBUG_RWSEMS
151 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
152 * is a task pointer in owner of a reader-owned rwsem, it will be the
153 * real owner or one of the real owners. The only exception is when the
154 * unlock is done by up_read_non_owner().
156 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
158 unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
159 | RWSEM_ANONYMOUSLY_OWNED;
160 if (READ_ONCE(sem->owner) == (struct task_struct *)val)
161 cmpxchg_relaxed((unsigned long *)&sem->owner, val,
162 RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
165 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
171 * Guide to the rw_semaphore's count field.
173 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
176 * The lock is owned by readers when
177 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
178 * (2) some of the reader bits are set in count, and
179 * (3) the owner field has RWSEM_READ_OWNED bit set.
181 * Having some reader bits set is not enough to guarantee a readers owned
182 * lock as the readers may be in the process of backing out from the count
183 * and a writer has just released the lock. So another writer may steal
184 * the lock immediately after that.
188 * Initialize an rwsem:
190 void __init_rwsem(struct rw_semaphore *sem, const char *name,
191 struct lock_class_key *key)
193 #ifdef CONFIG_DEBUG_LOCK_ALLOC
195 * Make sure we are not reinitializing a held semaphore:
197 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
198 lockdep_init_map(&sem->dep_map, name, key, 0);
200 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
201 raw_spin_lock_init(&sem->wait_lock);
202 INIT_LIST_HEAD(&sem->wait_list);
204 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
205 osq_lock_init(&sem->osq);
208 EXPORT_SYMBOL(__init_rwsem);
210 enum rwsem_waiter_type {
211 RWSEM_WAITING_FOR_WRITE,
212 RWSEM_WAITING_FOR_READ
215 struct rwsem_waiter {
216 struct list_head list;
217 struct task_struct *task;
218 enum rwsem_waiter_type type;
221 enum rwsem_wake_type {
222 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
223 RWSEM_WAKE_READERS, /* Wake readers only */
224 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
228 * handle the lock release when processes blocked on it that can now run
229 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
231 * - there must be someone on the queue
232 * - the wait_lock must be held by the caller
233 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
234 * to actually wakeup the blocked task(s) and drop the reference count,
235 * preferably when the wait_lock is released
236 * - woken process blocks are discarded from the list after having task zeroed
237 * - writers are only marked woken if downgrading is false
239 static void rwsem_mark_wake(struct rw_semaphore *sem,
240 enum rwsem_wake_type wake_type,
241 struct wake_q_head *wake_q)
243 struct rwsem_waiter *waiter, *tmp;
244 long oldcount, woken = 0, adjustment = 0;
245 struct list_head wlist;
248 * Take a peek at the queue head waiter such that we can determine
249 * the wakeup(s) to perform.
251 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
253 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
254 if (wake_type == RWSEM_WAKE_ANY) {
256 * Mark writer at the front of the queue for wakeup.
257 * Until the task is actually later awoken later by
258 * the caller, other writers are able to steal it.
259 * Readers, on the other hand, will block as they
260 * will notice the queued writer.
262 wake_q_add(wake_q, waiter->task);
263 lockevent_inc(rwsem_wake_writer);
270 * Writers might steal the lock before we grant it to the next reader.
271 * We prefer to do the first reader grant before counting readers
272 * so we can bail out early if a writer stole the lock.
274 if (wake_type != RWSEM_WAKE_READ_OWNED) {
275 adjustment = RWSEM_READER_BIAS;
276 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
277 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
278 atomic_long_sub(adjustment, &sem->count);
282 * Set it to reader-owned to give spinners an early
283 * indication that readers now have the lock.
285 __rwsem_set_reader_owned(sem, waiter->task);
289 * Grant an infinite number of read locks to the readers at the front
290 * of the queue. We know that woken will be at least 1 as we accounted
291 * for above. Note we increment the 'active part' of the count by the
292 * number of readers before waking any processes up.
294 * We have to do wakeup in 2 passes to prevent the possibility that
295 * the reader count may be decremented before it is incremented. It
296 * is because the to-be-woken waiter may not have slept yet. So it
297 * may see waiter->task got cleared, finish its critical section and
298 * do an unlock before the reader count increment.
300 * 1) Collect the read-waiters in a separate list, count them and
301 * fully increment the reader count in rwsem.
302 * 2) For each waiters in the new list, clear waiter->task and
303 * put them into wake_q to be woken up later.
305 list_for_each_entry(waiter, &sem->wait_list, list) {
306 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
311 list_cut_before(&wlist, &sem->wait_list, &waiter->list);
313 adjustment = woken * RWSEM_READER_BIAS - adjustment;
314 lockevent_cond_inc(rwsem_wake_reader, woken);
315 if (list_empty(&sem->wait_list)) {
316 /* hit end of list above */
317 adjustment -= RWSEM_FLAG_WAITERS;
321 atomic_long_add(adjustment, &sem->count);
324 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
325 struct task_struct *tsk;
328 get_task_struct(tsk);
331 * Ensure calling get_task_struct() before setting the reader
332 * waiter to nil such that rwsem_down_read_slowpath() cannot
333 * race with do_exit() by always holding a reference count
334 * to the task to wakeup.
336 smp_store_release(&waiter->task, NULL);
338 * Ensure issuing the wakeup (either by us or someone else)
339 * after setting the reader waiter to nil.
341 wake_q_add_safe(wake_q, tsk);
346 * This function must be called with the sem->wait_lock held to prevent
347 * race conditions between checking the rwsem wait list and setting the
348 * sem->count accordingly.
350 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
354 if (count & RWSEM_LOCK_MASK)
357 new = count + RWSEM_WRITER_LOCKED -
358 (list_is_singular(&sem->wait_list) ? RWSEM_FLAG_WAITERS : 0);
360 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)) {
361 rwsem_set_owner(sem);
368 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
370 * Try to acquire write lock before the writer has been put on wait queue.
372 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
374 long count = atomic_long_read(&sem->count);
376 while (!(count & RWSEM_LOCK_MASK)) {
377 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
378 count + RWSEM_WRITER_LOCKED)) {
379 rwsem_set_owner(sem);
380 lockevent_inc(rwsem_opt_wlock);
387 static inline bool owner_on_cpu(struct task_struct *owner)
390 * As lock holder preemption issue, we both skip spinning if
391 * task is not on cpu or its cpu is preempted
393 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
396 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
398 struct task_struct *owner;
401 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
407 owner = READ_ONCE(sem->owner);
409 ret = is_rwsem_owner_spinnable(owner) &&
417 * The rwsem_spin_on_owner() function returns the folowing 4 values
418 * depending on the lock owner state.
419 * OWNER_NULL : owner is currently NULL
420 * OWNER_WRITER: when owner changes and is a writer
421 * OWNER_READER: when owner changes and the new owner may be a reader.
422 * OWNER_NONSPINNABLE:
423 * when optimistic spinning has to stop because either the
424 * owner stops running, is unknown, or its timeslice has
429 OWNER_WRITER = 1 << 1,
430 OWNER_READER = 1 << 2,
431 OWNER_NONSPINNABLE = 1 << 3,
433 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
435 static inline enum owner_state rwsem_owner_state(unsigned long owner)
440 if (owner & RWSEM_ANONYMOUSLY_OWNED)
441 return OWNER_NONSPINNABLE;
443 if (owner & RWSEM_READER_OWNED)
449 static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
451 struct task_struct *tmp, *owner = READ_ONCE(sem->owner);
452 enum owner_state state = rwsem_owner_state((unsigned long)owner);
454 if (state != OWNER_WRITER)
459 tmp = READ_ONCE(sem->owner);
461 state = rwsem_owner_state((unsigned long)tmp);
466 * Ensure we emit the owner->on_cpu, dereference _after_
467 * checking sem->owner still matches owner, if that fails,
468 * owner might point to free()d memory, if it still matches,
469 * the rcu_read_lock() ensures the memory stays valid.
473 if (need_resched() || !owner_on_cpu(owner)) {
474 state = OWNER_NONSPINNABLE;
485 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
491 /* sem->wait_lock should not be held when doing optimistic spinning */
492 if (!rwsem_can_spin_on_owner(sem))
495 if (!osq_lock(&sem->osq))
499 * Optimistically spin on the owner field and attempt to acquire the
500 * lock whenever the owner changes. Spinning will be stopped when:
501 * 1) the owning writer isn't running; or
502 * 2) readers own the lock as we can't determine if they are
503 * actively running or not.
505 while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
507 * Try to acquire the lock
509 if (rwsem_try_write_lock_unqueued(sem)) {
515 * When there's no owner, we might have preempted between the
516 * owner acquiring the lock and setting the owner field. If
517 * we're an RT task that will live-lock because we won't let
518 * the owner complete.
520 if (!sem->owner && (need_resched() || rt_task(current)))
524 * The cpu_relax() call is a compiler barrier which forces
525 * everything in this loop to be re-loaded. We don't need
526 * memory barriers as we'll eventually observe the right
527 * values at the cost of a few extra spins.
531 osq_unlock(&sem->osq);
534 lockevent_cond_inc(rwsem_opt_fail, !taken);
538 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
545 * Wait for the read lock to be granted
547 static struct rw_semaphore __sched *
548 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
550 long count, adjustment = -RWSEM_READER_BIAS;
551 struct rwsem_waiter waiter;
552 DEFINE_WAKE_Q(wake_q);
554 waiter.task = current;
555 waiter.type = RWSEM_WAITING_FOR_READ;
557 raw_spin_lock_irq(&sem->wait_lock);
558 if (list_empty(&sem->wait_list)) {
560 * In case the wait queue is empty and the lock isn't owned
561 * by a writer, this reader can exit the slowpath and return
562 * immediately as its RWSEM_READER_BIAS has already been
565 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
566 raw_spin_unlock_irq(&sem->wait_lock);
567 rwsem_set_reader_owned(sem);
568 lockevent_inc(rwsem_rlock_fast);
571 adjustment += RWSEM_FLAG_WAITERS;
573 list_add_tail(&waiter.list, &sem->wait_list);
575 /* we're now waiting on the lock, but no longer actively locking */
576 count = atomic_long_add_return(adjustment, &sem->count);
579 * If there are no active locks, wake the front queued process(es).
581 * If there are no writers and we are first in the queue,
582 * wake our own waiter to join the existing active readers !
584 if (!(count & RWSEM_LOCK_MASK) ||
585 (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS)))
586 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
588 raw_spin_unlock_irq(&sem->wait_lock);
591 /* wait to be given the lock */
593 set_current_state(state);
596 if (signal_pending_state(state, current)) {
597 raw_spin_lock_irq(&sem->wait_lock);
600 raw_spin_unlock_irq(&sem->wait_lock);
604 lockevent_inc(rwsem_sleep_reader);
607 __set_current_state(TASK_RUNNING);
608 lockevent_inc(rwsem_rlock);
611 list_del(&waiter.list);
612 if (list_empty(&sem->wait_list))
613 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
614 raw_spin_unlock_irq(&sem->wait_lock);
615 __set_current_state(TASK_RUNNING);
616 lockevent_inc(rwsem_rlock_fail);
617 return ERR_PTR(-EINTR);
621 * Wait until we successfully acquire the write lock
623 static struct rw_semaphore *
624 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
627 bool waiting = true; /* any queued threads before us */
628 struct rwsem_waiter waiter;
629 struct rw_semaphore *ret = sem;
630 DEFINE_WAKE_Q(wake_q);
632 /* do optimistic spinning and steal lock if possible */
633 if (rwsem_optimistic_spin(sem))
637 * Optimistic spinning failed, proceed to the slowpath
638 * and block until we can acquire the sem.
640 waiter.task = current;
641 waiter.type = RWSEM_WAITING_FOR_WRITE;
643 raw_spin_lock_irq(&sem->wait_lock);
645 /* account for this before adding a new element to the list */
646 if (list_empty(&sem->wait_list))
649 list_add_tail(&waiter.list, &sem->wait_list);
651 /* we're now waiting on the lock */
653 count = atomic_long_read(&sem->count);
656 * If there were already threads queued before us and there are
657 * no active writers and some readers, the lock must be read
658 * owned; so we try to any read locks that were queued ahead
661 if (!(count & RWSEM_WRITER_MASK) &&
662 (count & RWSEM_READER_MASK)) {
663 rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
665 * The wakeup is normally called _after_ the wait_lock
666 * is released, but given that we are proactively waking
667 * readers we can deal with the wake_q overhead as it is
668 * similar to releasing and taking the wait_lock again
669 * for attempting rwsem_try_write_lock().
674 * Reinitialize wake_q after use.
676 wake_q_init(&wake_q);
680 count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count);
683 /* wait until we successfully acquire the lock */
684 set_current_state(state);
686 if (rwsem_try_write_lock(count, sem))
688 raw_spin_unlock_irq(&sem->wait_lock);
690 /* Block until there are no active lockers. */
692 if (signal_pending_state(state, current))
696 lockevent_inc(rwsem_sleep_writer);
697 set_current_state(state);
698 count = atomic_long_read(&sem->count);
699 } while (count & RWSEM_LOCK_MASK);
701 raw_spin_lock_irq(&sem->wait_lock);
703 __set_current_state(TASK_RUNNING);
704 list_del(&waiter.list);
705 raw_spin_unlock_irq(&sem->wait_lock);
706 lockevent_inc(rwsem_wlock);
711 __set_current_state(TASK_RUNNING);
712 raw_spin_lock_irq(&sem->wait_lock);
713 list_del(&waiter.list);
714 if (list_empty(&sem->wait_list))
715 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
717 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
718 raw_spin_unlock_irq(&sem->wait_lock);
720 lockevent_inc(rwsem_wlock_fail);
722 return ERR_PTR(-EINTR);
726 * handle waking up a waiter on the semaphore
727 * - up_read/up_write has decremented the active part of count if we come here
729 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
732 DEFINE_WAKE_Q(wake_q);
734 raw_spin_lock_irqsave(&sem->wait_lock, flags);
736 if (!list_empty(&sem->wait_list))
737 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
739 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
746 * downgrade a write lock into a read lock
747 * - caller incremented waiting part of count and discovered it still negative
748 * - just wake up any readers at the front of the queue
750 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
753 DEFINE_WAKE_Q(wake_q);
755 raw_spin_lock_irqsave(&sem->wait_lock, flags);
757 if (!list_empty(&sem->wait_list))
758 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
760 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
769 inline void __down_read(struct rw_semaphore *sem)
771 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
772 &sem->count) & RWSEM_READ_FAILED_MASK)) {
773 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
774 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
775 RWSEM_READER_OWNED), sem);
777 rwsem_set_reader_owned(sem);
781 static inline int __down_read_killable(struct rw_semaphore *sem)
783 if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
784 &sem->count) & RWSEM_READ_FAILED_MASK)) {
785 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
787 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
788 RWSEM_READER_OWNED), sem);
790 rwsem_set_reader_owned(sem);
795 static inline int __down_read_trylock(struct rw_semaphore *sem)
798 * Optimize for the case when the rwsem is not locked at all.
800 long tmp = RWSEM_UNLOCKED_VALUE;
803 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
804 tmp + RWSEM_READER_BIAS)) {
805 rwsem_set_reader_owned(sem);
808 } while (!(tmp & RWSEM_READ_FAILED_MASK));
815 static inline void __down_write(struct rw_semaphore *sem)
817 long tmp = RWSEM_UNLOCKED_VALUE;
819 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
820 RWSEM_WRITER_LOCKED)))
821 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
822 rwsem_set_owner(sem);
825 static inline int __down_write_killable(struct rw_semaphore *sem)
827 long tmp = RWSEM_UNLOCKED_VALUE;
829 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
830 RWSEM_WRITER_LOCKED))) {
831 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
834 rwsem_set_owner(sem);
838 static inline int __down_write_trylock(struct rw_semaphore *sem)
840 long tmp = RWSEM_UNLOCKED_VALUE;
842 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
843 RWSEM_WRITER_LOCKED)) {
844 rwsem_set_owner(sem);
851 * unlock after reading
853 inline void __up_read(struct rw_semaphore *sem)
857 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), sem);
858 rwsem_clear_reader_owned(sem);
859 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
860 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
866 * unlock after writing
868 static inline void __up_write(struct rw_semaphore *sem)
872 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
873 rwsem_clear_owner(sem);
874 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
875 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
880 * downgrade write lock to read lock
882 static inline void __downgrade_write(struct rw_semaphore *sem)
887 * When downgrading from exclusive to shared ownership,
888 * anything inside the write-locked region cannot leak
889 * into the read side. In contrast, anything in the
890 * read-locked region is ok to be re-ordered into the
891 * write side. As such, rely on RELEASE semantics.
893 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
894 tmp = atomic_long_fetch_add_release(
895 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
896 rwsem_set_reader_owned(sem);
897 if (tmp & RWSEM_FLAG_WAITERS)
898 rwsem_downgrade_wake(sem);
904 void __sched down_read(struct rw_semaphore *sem)
907 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
909 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
911 EXPORT_SYMBOL(down_read);
913 int __sched down_read_killable(struct rw_semaphore *sem)
916 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
918 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
919 rwsem_release(&sem->dep_map, 1, _RET_IP_);
925 EXPORT_SYMBOL(down_read_killable);
928 * trylock for reading -- returns 1 if successful, 0 if contention
930 int down_read_trylock(struct rw_semaphore *sem)
932 int ret = __down_read_trylock(sem);
935 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
938 EXPORT_SYMBOL(down_read_trylock);
943 void __sched down_write(struct rw_semaphore *sem)
946 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
947 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
949 EXPORT_SYMBOL(down_write);
954 int __sched down_write_killable(struct rw_semaphore *sem)
957 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
959 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
960 __down_write_killable)) {
961 rwsem_release(&sem->dep_map, 1, _RET_IP_);
967 EXPORT_SYMBOL(down_write_killable);
970 * trylock for writing -- returns 1 if successful, 0 if contention
972 int down_write_trylock(struct rw_semaphore *sem)
974 int ret = __down_write_trylock(sem);
977 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
981 EXPORT_SYMBOL(down_write_trylock);
984 * release a read lock
986 void up_read(struct rw_semaphore *sem)
988 rwsem_release(&sem->dep_map, 1, _RET_IP_);
991 EXPORT_SYMBOL(up_read);
994 * release a write lock
996 void up_write(struct rw_semaphore *sem)
998 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1001 EXPORT_SYMBOL(up_write);
1004 * downgrade write lock to read lock
1006 void downgrade_write(struct rw_semaphore *sem)
1008 lock_downgrade(&sem->dep_map, _RET_IP_);
1009 __downgrade_write(sem);
1011 EXPORT_SYMBOL(downgrade_write);
1013 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1015 void down_read_nested(struct rw_semaphore *sem, int subclass)
1018 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1019 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1021 EXPORT_SYMBOL(down_read_nested);
1023 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1026 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1027 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1029 EXPORT_SYMBOL(_down_write_nest_lock);
1031 void down_read_non_owner(struct rw_semaphore *sem)
1035 __rwsem_set_reader_owned(sem, NULL);
1037 EXPORT_SYMBOL(down_read_non_owner);
1039 void down_write_nested(struct rw_semaphore *sem, int subclass)
1042 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1043 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1045 EXPORT_SYMBOL(down_write_nested);
1047 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1050 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1052 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1053 __down_write_killable)) {
1054 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1060 EXPORT_SYMBOL(down_write_killable_nested);
1062 void up_read_non_owner(struct rw_semaphore *sem)
1064 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
1068 EXPORT_SYMBOL(up_read_non_owner);