1 // SPDX-License-Identifier: GPL-2.0
2 /* rwsem.c: R/W semaphores: contention handling functions
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 #include <linux/rwsem.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/wake_q.h>
19 #include <linux/sched/debug.h>
20 #include <linux/osq_lock.h>
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
74 * Initialize an rwsem:
76 void __init_rwsem(struct rw_semaphore *sem, const char *name,
77 struct lock_class_key *key)
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 * Make sure we are not reinitializing a held semaphore:
83 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84 lockdep_init_map(&sem->dep_map, name, key, 0);
86 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87 raw_spin_lock_init(&sem->wait_lock);
88 INIT_LIST_HEAD(&sem->wait_list);
89 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
91 osq_lock_init(&sem->osq);
95 EXPORT_SYMBOL(__init_rwsem);
97 enum rwsem_waiter_type {
98 RWSEM_WAITING_FOR_WRITE,
99 RWSEM_WAITING_FOR_READ
102 struct rwsem_waiter {
103 struct list_head list;
104 struct task_struct *task;
105 enum rwsem_waiter_type type;
108 enum rwsem_wake_type {
109 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
127 static void __rwsem_mark_wake(struct rw_semaphore *sem,
128 enum rwsem_wake_type wake_type,
129 struct wake_q_head *wake_q)
131 struct rwsem_waiter *waiter, *tmp;
132 long oldcount, woken = 0, adjustment = 0;
135 * Take a peek at the queue head waiter such that we can determine
136 * the wakeup(s) to perform.
138 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
140 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
141 if (wake_type == RWSEM_WAKE_ANY) {
143 * Mark writer at the front of the queue for wakeup.
144 * Until the task is actually later awoken later by
145 * the caller, other writers are able to steal it.
146 * Readers, on the other hand, will block as they
147 * will notice the queued writer.
149 wake_q_add(wake_q, waiter->task);
150 lockevent_inc(rwsem_wake_writer);
157 * Writers might steal the lock before we grant it to the next reader.
158 * We prefer to do the first reader grant before counting readers
159 * so we can bail out early if a writer stole the lock.
161 if (wake_type != RWSEM_WAKE_READ_OWNED) {
162 adjustment = RWSEM_ACTIVE_READ_BIAS;
164 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
165 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
167 * If the count is still less than RWSEM_WAITING_BIAS
168 * after removing the adjustment, it is assumed that
169 * a writer has stolen the lock. We have to undo our
172 if (atomic_long_add_return(-adjustment, &sem->count) <
176 /* Last active locker left. Retry waking readers. */
177 goto try_reader_grant;
180 * Set it to reader-owned to give spinners an early
181 * indication that readers now have the lock.
183 __rwsem_set_reader_owned(sem, waiter->task);
187 * Grant an infinite number of read locks to the readers at the front
188 * of the queue. We know that woken will be at least 1 as we accounted
189 * for above. Note we increment the 'active part' of the count by the
190 * number of readers before waking any processes up.
192 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
193 struct task_struct *tsk;
195 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
201 get_task_struct(tsk);
202 list_del(&waiter->list);
204 * Ensure calling get_task_struct() before setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup.
209 smp_store_release(&waiter->task, NULL);
211 * Ensure issuing the wakeup (either by us or someone else)
212 * after setting the reader waiter to nil.
214 wake_q_add_safe(wake_q, tsk);
217 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
218 lockevent_cond_inc(rwsem_wake_reader, woken);
219 if (list_empty(&sem->wait_list)) {
220 /* hit end of list above */
221 adjustment -= RWSEM_WAITING_BIAS;
225 atomic_long_add(adjustment, &sem->count);
229 * This function must be called with the sem->wait_lock held to prevent
230 * race conditions between checking the rwsem wait list and setting the
231 * sem->count accordingly.
233 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
236 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
238 if (count != RWSEM_WAITING_BIAS)
242 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
243 * are other tasks on the wait list, we need to add on WAITING_BIAS.
245 count = list_is_singular(&sem->wait_list) ?
246 RWSEM_ACTIVE_WRITE_BIAS :
247 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
249 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
250 == RWSEM_WAITING_BIAS) {
251 rwsem_set_owner(sem);
258 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
260 * Try to acquire write lock before the writer has been put on wait queue.
262 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
264 long count = atomic_long_read(&sem->count);
266 while (!count || count == RWSEM_WAITING_BIAS) {
267 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
268 count + RWSEM_ACTIVE_WRITE_BIAS)) {
269 rwsem_set_owner(sem);
270 lockevent_inc(rwsem_opt_wlock);
277 static inline bool owner_on_cpu(struct task_struct *owner)
280 * As lock holder preemption issue, we both skip spinning if
281 * task is not on cpu or its cpu is preempted
283 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
286 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
288 struct task_struct *owner;
291 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
297 owner = READ_ONCE(sem->owner);
299 ret = is_rwsem_owner_spinnable(owner) &&
307 * Return true only if we can still spin on the owner field of the rwsem.
309 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
311 struct task_struct *owner = READ_ONCE(sem->owner);
313 if (!is_rwsem_owner_spinnable(owner))
317 while (owner && (READ_ONCE(sem->owner) == owner)) {
319 * Ensure we emit the owner->on_cpu, dereference _after_
320 * checking sem->owner still matches owner, if that fails,
321 * owner might point to free()d memory, if it still matches,
322 * the rcu_read_lock() ensures the memory stays valid.
327 * abort spinning when need_resched or owner is not running or
328 * owner's cpu is preempted.
330 if (need_resched() || !owner_on_cpu(owner)) {
340 * If there is a new owner or the owner is not set, we continue
343 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
346 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
352 /* sem->wait_lock should not be held when doing optimistic spinning */
353 if (!rwsem_can_spin_on_owner(sem))
356 if (!osq_lock(&sem->osq))
360 * Optimistically spin on the owner field and attempt to acquire the
361 * lock whenever the owner changes. Spinning will be stopped when:
362 * 1) the owning writer isn't running; or
363 * 2) readers own the lock as we can't determine if they are
364 * actively running or not.
366 while (rwsem_spin_on_owner(sem)) {
368 * Try to acquire the lock
370 if (rwsem_try_write_lock_unqueued(sem)) {
376 * When there's no owner, we might have preempted between the
377 * owner acquiring the lock and setting the owner field. If
378 * we're an RT task that will live-lock because we won't let
379 * the owner complete.
381 if (!sem->owner && (need_resched() || rt_task(current)))
385 * The cpu_relax() call is a compiler barrier which forces
386 * everything in this loop to be re-loaded. We don't need
387 * memory barriers as we'll eventually observe the right
388 * values at the cost of a few extra spins.
392 osq_unlock(&sem->osq);
395 lockevent_cond_inc(rwsem_opt_fail, !taken);
400 * Return true if the rwsem has active spinner
402 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
404 return osq_is_locked(&sem->osq);
408 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
413 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
420 * Wait for the read lock to be granted
422 static inline struct rw_semaphore __sched *
423 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
425 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
426 struct rwsem_waiter waiter;
427 DEFINE_WAKE_Q(wake_q);
429 waiter.task = current;
430 waiter.type = RWSEM_WAITING_FOR_READ;
432 raw_spin_lock_irq(&sem->wait_lock);
433 if (list_empty(&sem->wait_list)) {
435 * In case the wait queue is empty and the lock isn't owned
436 * by a writer, this reader can exit the slowpath and return
437 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
438 * been set in the count.
440 if (atomic_long_read(&sem->count) >= 0) {
441 raw_spin_unlock_irq(&sem->wait_lock);
442 rwsem_set_reader_owned(sem);
443 lockevent_inc(rwsem_rlock_fast);
446 adjustment += RWSEM_WAITING_BIAS;
448 list_add_tail(&waiter.list, &sem->wait_list);
450 /* we're now waiting on the lock, but no longer actively locking */
451 count = atomic_long_add_return(adjustment, &sem->count);
454 * If there are no active locks, wake the front queued process(es).
456 * If there are no writers and we are first in the queue,
457 * wake our own waiter to join the existing active readers !
459 if (count == RWSEM_WAITING_BIAS ||
460 (count > RWSEM_WAITING_BIAS &&
461 adjustment != -RWSEM_ACTIVE_READ_BIAS))
462 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
464 raw_spin_unlock_irq(&sem->wait_lock);
467 /* wait to be given the lock */
469 set_current_state(state);
472 if (signal_pending_state(state, current)) {
473 raw_spin_lock_irq(&sem->wait_lock);
476 raw_spin_unlock_irq(&sem->wait_lock);
480 lockevent_inc(rwsem_sleep_reader);
483 __set_current_state(TASK_RUNNING);
484 lockevent_inc(rwsem_rlock);
487 list_del(&waiter.list);
488 if (list_empty(&sem->wait_list))
489 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
490 raw_spin_unlock_irq(&sem->wait_lock);
491 __set_current_state(TASK_RUNNING);
492 lockevent_inc(rwsem_rlock_fail);
493 return ERR_PTR(-EINTR);
496 __visible struct rw_semaphore * __sched
497 rwsem_down_read_failed(struct rw_semaphore *sem)
499 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
501 EXPORT_SYMBOL(rwsem_down_read_failed);
503 __visible struct rw_semaphore * __sched
504 rwsem_down_read_failed_killable(struct rw_semaphore *sem)
506 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
508 EXPORT_SYMBOL(rwsem_down_read_failed_killable);
511 * Wait until we successfully acquire the write lock
513 static inline struct rw_semaphore *
514 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
517 bool waiting = true; /* any queued threads before us */
518 struct rwsem_waiter waiter;
519 struct rw_semaphore *ret = sem;
520 DEFINE_WAKE_Q(wake_q);
522 /* undo write bias from down_write operation, stop active locking */
523 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
525 /* do optimistic spinning and steal lock if possible */
526 if (rwsem_optimistic_spin(sem))
530 * Optimistic spinning failed, proceed to the slowpath
531 * and block until we can acquire the sem.
533 waiter.task = current;
534 waiter.type = RWSEM_WAITING_FOR_WRITE;
536 raw_spin_lock_irq(&sem->wait_lock);
538 /* account for this before adding a new element to the list */
539 if (list_empty(&sem->wait_list))
542 list_add_tail(&waiter.list, &sem->wait_list);
544 /* we're now waiting on the lock, but no longer actively locking */
546 count = atomic_long_read(&sem->count);
549 * If there were already threads queued before us and there are
550 * no active writers, the lock must be read owned; so we try to
551 * wake any read locks that were queued ahead of us.
553 if (count > RWSEM_WAITING_BIAS) {
554 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
556 * The wakeup is normally called _after_ the wait_lock
557 * is released, but given that we are proactively waking
558 * readers we can deal with the wake_q overhead as it is
559 * similar to releasing and taking the wait_lock again
560 * for attempting rwsem_try_write_lock().
565 * Reinitialize wake_q after use.
567 wake_q_init(&wake_q);
571 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
573 /* wait until we successfully acquire the lock */
574 set_current_state(state);
576 if (rwsem_try_write_lock(count, sem))
578 raw_spin_unlock_irq(&sem->wait_lock);
580 /* Block until there are no active lockers. */
582 if (signal_pending_state(state, current))
586 lockevent_inc(rwsem_sleep_writer);
587 set_current_state(state);
588 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
590 raw_spin_lock_irq(&sem->wait_lock);
592 __set_current_state(TASK_RUNNING);
593 list_del(&waiter.list);
594 raw_spin_unlock_irq(&sem->wait_lock);
595 lockevent_inc(rwsem_wlock);
600 __set_current_state(TASK_RUNNING);
601 raw_spin_lock_irq(&sem->wait_lock);
602 list_del(&waiter.list);
603 if (list_empty(&sem->wait_list))
604 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
606 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
607 raw_spin_unlock_irq(&sem->wait_lock);
609 lockevent_inc(rwsem_wlock_fail);
611 return ERR_PTR(-EINTR);
614 __visible struct rw_semaphore * __sched
615 rwsem_down_write_failed(struct rw_semaphore *sem)
617 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
619 EXPORT_SYMBOL(rwsem_down_write_failed);
621 __visible struct rw_semaphore * __sched
622 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
624 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
626 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
629 * handle waking up a waiter on the semaphore
630 * - up_read/up_write has decremented the active part of count if we come here
633 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
636 DEFINE_WAKE_Q(wake_q);
639 * __rwsem_down_write_failed_common(sem)
640 * rwsem_optimistic_spin(sem)
641 * osq_unlock(sem->osq)
643 * atomic_long_add_return(&sem->count)
648 * if (atomic_long_sub_return_release(&sem->count) < 0)
650 * osq_is_locked(&sem->osq)
652 * And __up_write() must observe !osq_is_locked() when it observes the
653 * atomic_long_add_return() in order to not miss a wakeup.
655 * This boils down to:
657 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
659 * [RmW] Y += 1 [L] r1 = X
661 * exists (r0=1 /\ r1=0)
666 * If a spinner is present, it is not necessary to do the wakeup.
667 * Try to do wakeup only if the trylock succeeds to minimize
668 * spinlock contention which may introduce too much delay in the
671 * spinning writer up_write/up_read caller
672 * --------------- -----------------------
673 * [S] osq_unlock() [L] osq
675 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
677 * Here, it is important to make sure that there won't be a missed
678 * wakeup while the rwsem is free and the only spinning writer goes
679 * to sleep without taking the rwsem. Even when the spinning writer
680 * is just going to break out of the waiting loop, it will still do
681 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
682 * rwsem_has_spinner() is true, it will guarantee at least one
683 * trylock attempt on the rwsem later on.
685 if (rwsem_has_spinner(sem)) {
687 * The smp_rmb() here is to make sure that the spinner
688 * state is consulted before reading the wait_lock.
691 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
695 raw_spin_lock_irqsave(&sem->wait_lock, flags);
698 if (!list_empty(&sem->wait_list))
699 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
701 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
706 EXPORT_SYMBOL(rwsem_wake);
709 * downgrade a write lock into a read lock
710 * - caller incremented waiting part of count and discovered it still negative
711 * - just wake up any readers at the front of the queue
714 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
717 DEFINE_WAKE_Q(wake_q);
719 raw_spin_lock_irqsave(&sem->wait_lock, flags);
721 if (!list_empty(&sem->wait_list))
722 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
724 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
729 EXPORT_SYMBOL(rwsem_downgrade_wake);