1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
32 #include "lock_events.h"
35 * The least significant 3 bits of the owner value has the following
37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38 * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
39 * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
41 * When the rwsem is either owned by an anonymous writer, or it is
42 * reader-owned, but a spinning writer has timed out, both nonspinnable
43 * bits will be set to disable optimistic spinning by readers and writers.
44 * In the later case, the last unlocking reader should then check the
45 * writer nonspinnable bit and clear it only to give writers preference
46 * to acquire the lock via optimistic spinning, but not readers. Similar
47 * action is also done in the reader slowpath.
49 * When a writer acquires a rwsem, it puts its task_struct pointer
50 * into the owner field. It is cleared after an unlock.
52 * When a reader acquires a rwsem, it will also puts its task_struct
53 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
54 * On unlock, the owner field will largely be left untouched. So
55 * for a free or reader-owned rwsem, the owner value may contain
56 * information about the last reader that acquires the rwsem.
58 * That information may be helpful in debugging cases where the system
59 * seems to hang on a reader owned rwsem especially if only one reader
60 * is involved. Ideally we would like to track all the readers that own
61 * a rwsem, but the overhead is simply too big.
63 * Reader optimistic spinning is helpful when the reader critical section
64 * is short and there aren't that many readers around. It makes readers
65 * relatively more preferred than writers. When a writer times out spinning
66 * on a reader-owned lock and set the nospinnable bits, there are two main
69 * 1) The reader critical section is long, perhaps the task sleeps after
70 * acquiring the read lock.
71 * 2) There are just too many readers contending the lock causing it to
72 * take a while to service all of them.
74 * In the former case, long reader critical section will impede the progress
75 * of writers which is usually more important for system performance. In
76 * the later case, reader optimistic spinning tends to make the reader
77 * groups that contain readers that acquire the lock together smaller
78 * leading to more of them. That may hurt performance in some cases. In
79 * other words, the setting of nonspinnable bits indicates that reader
80 * optimistic spinning may not be helpful for those workloads that cause
83 * Therefore, any writers that had observed the setting of the writer
84 * nonspinnable bit for a given rwsem after they fail to acquire the lock
85 * via optimistic spinning will set the reader nonspinnable bit once they
86 * acquire the write lock. Similarly, readers that observe the setting
87 * of reader nonspinnable bit at slowpath entry will set the reader
88 * nonspinnable bits when they acquire the read lock via the wakeup path.
90 * Once the reader nonspinnable bit is on, it will only be reset when
91 * a writer is able to acquire the rwsem in the fast path or somehow a
92 * reader or writer in the slowpath doesn't observe the nonspinable bit.
94 * This is to discourage reader optmistic spinning on that particular
95 * rwsem and make writers more preferred. This adaptive disabling of reader
96 * optimistic spinning will alleviate the negative side effect of this
99 #define RWSEM_READER_OWNED (1UL << 0)
100 #define RWSEM_RD_NONSPINNABLE (1UL << 1)
101 #define RWSEM_WR_NONSPINNABLE (1UL << 2)
102 #define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
103 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
105 #ifdef CONFIG_DEBUG_RWSEMS
106 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
107 if (!debug_locks_silent && \
108 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
109 #c, atomic_long_read(&(sem)->count), \
110 atomic_long_read(&(sem)->owner), (long)current, \
111 list_empty(&(sem)->wait_list) ? "" : "not ")) \
115 # define DEBUG_RWSEMS_WARN_ON(c, sem)
119 * On 64-bit architectures, the bit definitions of the count are:
121 * Bit 0 - writer locked bit
122 * Bit 1 - waiters present bit
123 * Bit 2 - lock handoff bit
124 * Bits 3-7 - reserved
125 * Bits 8-62 - 55-bit reader count
126 * Bit 63 - read fail bit
128 * On 32-bit architectures, the bit definitions of the count are:
130 * Bit 0 - writer locked bit
131 * Bit 1 - waiters present bit
132 * Bit 2 - lock handoff bit
133 * Bits 3-7 - reserved
134 * Bits 8-30 - 23-bit reader count
135 * Bit 31 - read fail bit
137 * It is not likely that the most significant bit (read fail bit) will ever
138 * be set. This guard bit is still checked anyway in the down_read() fastpath
139 * just in case we need to use up more of the reader bits for other purpose
142 * atomic_long_fetch_add() is used to obtain reader lock, whereas
143 * atomic_long_cmpxchg() will be used to obtain writer lock.
145 * There are three places where the lock handoff bit may be set or cleared.
146 * 1) rwsem_mark_wake() for readers.
147 * 2) rwsem_try_write_lock() for writers.
148 * 3) Error path of rwsem_down_write_slowpath().
150 * For all the above cases, wait_lock will be held. A writer must also
151 * be the first one in the wait_list to be eligible for setting the handoff
152 * bit. So concurrent setting/clearing of handoff bit is not possible.
154 #define RWSEM_WRITER_LOCKED (1UL << 0)
155 #define RWSEM_FLAG_WAITERS (1UL << 1)
156 #define RWSEM_FLAG_HANDOFF (1UL << 2)
157 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
159 #define RWSEM_READER_SHIFT 8
160 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
161 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
162 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
163 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
164 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
165 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
168 * All writes to owner are protected by WRITE_ONCE() to make sure that
169 * store tearing can't happen as optimistic spinners may read and use
170 * the owner value concurrently without lock. Read from owner, however,
171 * may not need READ_ONCE() as long as the pointer value is only used
172 * for comparison and isn't being dereferenced.
174 static inline void rwsem_set_owner(struct rw_semaphore *sem)
176 atomic_long_set(&sem->owner, (long)current);
179 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
181 atomic_long_set(&sem->owner, 0);
185 * Test the flags in the owner field.
187 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
189 return atomic_long_read(&sem->owner) & flags;
193 * The task_struct pointer of the last owning reader will be left in
196 * Note that the owner value just indicates the task has owned the rwsem
197 * previously, it may not be the real owner or one of the real owners
198 * anymore when that field is examined, so take it with a grain of salt.
200 * The reader non-spinnable bit is preserved.
202 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
203 struct task_struct *owner)
205 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
206 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
208 atomic_long_set(&sem->owner, val);
211 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
213 __rwsem_set_reader_owned(sem, current);
217 * Return true if the rwsem is owned by a reader.
219 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
221 #ifdef CONFIG_DEBUG_RWSEMS
223 * Check the count to see if it is write-locked.
225 long count = atomic_long_read(&sem->count);
227 if (count & RWSEM_WRITER_MASK)
230 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
233 #ifdef CONFIG_DEBUG_RWSEMS
235 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
236 * is a task pointer in owner of a reader-owned rwsem, it will be the
237 * real owner or one of the real owners. The only exception is when the
238 * unlock is done by up_read_non_owner().
240 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
242 unsigned long val = atomic_long_read(&sem->owner);
244 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
245 if (atomic_long_try_cmpxchg(&sem->owner, &val,
246 val & RWSEM_OWNER_FLAGS_MASK))
251 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
257 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
258 * remains set. Otherwise, the operation will be aborted.
260 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
262 unsigned long owner = atomic_long_read(&sem->owner);
265 if (!(owner & RWSEM_READER_OWNED))
267 if (owner & RWSEM_NONSPINNABLE)
269 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
270 owner | RWSEM_NONSPINNABLE));
273 static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
275 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
276 if (WARN_ON_ONCE(cnt < 0))
277 rwsem_set_nonspinnable(sem);
278 return !(cnt & RWSEM_READ_FAILED_MASK);
282 * Return just the real task structure pointer of the owner
284 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
286 return (struct task_struct *)
287 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
291 * Return the real task structure pointer of the owner and the embedded
292 * flags in the owner. pflags must be non-NULL.
294 static inline struct task_struct *
295 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
297 unsigned long owner = atomic_long_read(&sem->owner);
299 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
300 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
304 * Guide to the rw_semaphore's count field.
306 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
309 * The lock is owned by readers when
310 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
311 * (2) some of the reader bits are set in count, and
312 * (3) the owner field has RWSEM_READ_OWNED bit set.
314 * Having some reader bits set is not enough to guarantee a readers owned
315 * lock as the readers may be in the process of backing out from the count
316 * and a writer has just released the lock. So another writer may steal
317 * the lock immediately after that.
321 * Initialize an rwsem:
323 void __init_rwsem(struct rw_semaphore *sem, const char *name,
324 struct lock_class_key *key)
326 #ifdef CONFIG_DEBUG_LOCK_ALLOC
328 * Make sure we are not reinitializing a held semaphore:
330 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
331 lockdep_init_map(&sem->dep_map, name, key, 0);
333 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
334 raw_spin_lock_init(&sem->wait_lock);
335 INIT_LIST_HEAD(&sem->wait_list);
336 atomic_long_set(&sem->owner, 0L);
337 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
338 osq_lock_init(&sem->osq);
341 EXPORT_SYMBOL(__init_rwsem);
343 enum rwsem_waiter_type {
344 RWSEM_WAITING_FOR_WRITE,
345 RWSEM_WAITING_FOR_READ
348 struct rwsem_waiter {
349 struct list_head list;
350 struct task_struct *task;
351 enum rwsem_waiter_type type;
352 unsigned long timeout;
353 unsigned long last_rowner;
355 #define rwsem_first_waiter(sem) \
356 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
358 enum rwsem_wake_type {
359 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
360 RWSEM_WAKE_READERS, /* Wake readers only */
361 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
364 enum writer_wait_state {
365 WRITER_NOT_FIRST, /* Writer is not first in wait list */
366 WRITER_FIRST, /* Writer is first in wait list */
367 WRITER_HANDOFF /* Writer is first & handoff needed */
371 * The typical HZ value is either 250 or 1000. So set the minimum waiting
372 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
373 * queue before initiating the handoff protocol.
375 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
378 * Magic number to batch-wakeup waiting readers, even when writers are
379 * also present in the queue. This both limits the amount of work the
380 * waking thread must do and also prevents any potential counter overflow,
383 #define MAX_READERS_WAKEUP 0x100
386 * handle the lock release when processes blocked on it that can now run
387 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
389 * - there must be someone on the queue
390 * - the wait_lock must be held by the caller
391 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
392 * to actually wakeup the blocked task(s) and drop the reference count,
393 * preferably when the wait_lock is released
394 * - woken process blocks are discarded from the list after having task zeroed
395 * - writers are only marked woken if downgrading is false
397 static void rwsem_mark_wake(struct rw_semaphore *sem,
398 enum rwsem_wake_type wake_type,
399 struct wake_q_head *wake_q)
401 struct rwsem_waiter *waiter, *tmp;
402 long oldcount, woken = 0, adjustment = 0;
403 struct list_head wlist;
405 lockdep_assert_held(&sem->wait_lock);
408 * Take a peek at the queue head waiter such that we can determine
409 * the wakeup(s) to perform.
411 waiter = rwsem_first_waiter(sem);
413 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
414 if (wake_type == RWSEM_WAKE_ANY) {
416 * Mark writer at the front of the queue for wakeup.
417 * Until the task is actually later awoken later by
418 * the caller, other writers are able to steal it.
419 * Readers, on the other hand, will block as they
420 * will notice the queued writer.
422 wake_q_add(wake_q, waiter->task);
423 lockevent_inc(rwsem_wake_writer);
430 * No reader wakeup if there are too many of them already.
432 if (unlikely(atomic_long_read(&sem->count) < 0))
436 * Writers might steal the lock before we grant it to the next reader.
437 * We prefer to do the first reader grant before counting readers
438 * so we can bail out early if a writer stole the lock.
440 if (wake_type != RWSEM_WAKE_READ_OWNED) {
441 struct task_struct *owner;
443 adjustment = RWSEM_READER_BIAS;
444 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
445 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
447 * When we've been waiting "too" long (for writers
448 * to give up the lock), request a HANDOFF to
451 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
452 time_after(jiffies, waiter->timeout)) {
453 adjustment -= RWSEM_FLAG_HANDOFF;
454 lockevent_inc(rwsem_rlock_handoff);
457 atomic_long_add(-adjustment, &sem->count);
461 * Set it to reader-owned to give spinners an early
462 * indication that readers now have the lock.
463 * The reader nonspinnable bit seen at slowpath entry of
464 * the reader is copied over.
466 owner = waiter->task;
467 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
468 owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
469 lockevent_inc(rwsem_opt_norspin);
471 __rwsem_set_reader_owned(sem, owner);
475 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
476 * queue. We know that the woken will be at least 1 as we accounted
477 * for above. Note we increment the 'active part' of the count by the
478 * number of readers before waking any processes up.
480 * This is an adaptation of the phase-fair R/W locks where at the
481 * reader phase (first waiter is a reader), all readers are eligible
482 * to acquire the lock at the same time irrespective of their order
483 * in the queue. The writers acquire the lock according to their
484 * order in the queue.
486 * We have to do wakeup in 2 passes to prevent the possibility that
487 * the reader count may be decremented before it is incremented. It
488 * is because the to-be-woken waiter may not have slept yet. So it
489 * may see waiter->task got cleared, finish its critical section and
490 * do an unlock before the reader count increment.
492 * 1) Collect the read-waiters in a separate list, count them and
493 * fully increment the reader count in rwsem.
494 * 2) For each waiters in the new list, clear waiter->task and
495 * put them into wake_q to be woken up later.
497 INIT_LIST_HEAD(&wlist);
498 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
499 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
503 list_move_tail(&waiter->list, &wlist);
506 * Limit # of readers that can be woken up per wakeup call.
508 if (woken >= MAX_READERS_WAKEUP)
512 adjustment = woken * RWSEM_READER_BIAS - adjustment;
513 lockevent_cond_inc(rwsem_wake_reader, woken);
514 if (list_empty(&sem->wait_list)) {
515 /* hit end of list above */
516 adjustment -= RWSEM_FLAG_WAITERS;
520 * When we've woken a reader, we no longer need to force writers
521 * to give up the lock and we can clear HANDOFF.
523 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
524 adjustment -= RWSEM_FLAG_HANDOFF;
527 atomic_long_add(adjustment, &sem->count);
530 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
531 struct task_struct *tsk;
534 get_task_struct(tsk);
537 * Ensure calling get_task_struct() before setting the reader
538 * waiter to nil such that rwsem_down_read_slowpath() cannot
539 * race with do_exit() by always holding a reference count
540 * to the task to wakeup.
542 smp_store_release(&waiter->task, NULL);
544 * Ensure issuing the wakeup (either by us or someone else)
545 * after setting the reader waiter to nil.
547 wake_q_add_safe(wake_q, tsk);
552 * This function must be called with the sem->wait_lock held to prevent
553 * race conditions between checking the rwsem wait list and setting the
554 * sem->count accordingly.
556 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
557 * bit is set or the lock is acquired with handoff bit cleared.
559 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
560 enum writer_wait_state wstate)
564 lockdep_assert_held(&sem->wait_lock);
566 count = atomic_long_read(&sem->count);
568 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
570 if (has_handoff && wstate == WRITER_NOT_FIRST)
575 if (count & RWSEM_LOCK_MASK) {
576 if (has_handoff || (wstate != WRITER_HANDOFF))
579 new |= RWSEM_FLAG_HANDOFF;
581 new |= RWSEM_WRITER_LOCKED;
582 new &= ~RWSEM_FLAG_HANDOFF;
584 if (list_is_singular(&sem->wait_list))
585 new &= ~RWSEM_FLAG_WAITERS;
587 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
590 * We have either acquired the lock with handoff bit cleared or
591 * set the handoff bit.
593 if (new & RWSEM_FLAG_HANDOFF)
596 rwsem_set_owner(sem);
600 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
602 * Try to acquire read lock before the reader is put on wait queue.
603 * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
606 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
608 long count = atomic_long_read(&sem->count);
610 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
613 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
614 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
615 rwsem_set_reader_owned(sem);
616 lockevent_inc(rwsem_opt_rlock);
620 /* Back out the change */
621 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
626 * Try to acquire write lock before the writer has been put on wait queue.
628 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
630 long count = atomic_long_read(&sem->count);
632 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
633 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
634 count | RWSEM_WRITER_LOCKED)) {
635 rwsem_set_owner(sem);
636 lockevent_inc(rwsem_opt_wlock);
643 static inline bool owner_on_cpu(struct task_struct *owner)
646 * As lock holder preemption issue, we both skip spinning if
647 * task is not on cpu or its cpu is preempted
649 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
652 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
653 unsigned long nonspinnable)
655 struct task_struct *owner;
659 BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE));
661 if (need_resched()) {
662 lockevent_inc(rwsem_opt_fail);
668 owner = rwsem_owner_flags(sem, &flags);
670 * Don't check the read-owner as the entry may be stale.
672 if ((flags & nonspinnable) ||
673 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
678 lockevent_cond_inc(rwsem_opt_fail, !ret);
683 * The rwsem_spin_on_owner() function returns the folowing 4 values
684 * depending on the lock owner state.
685 * OWNER_NULL : owner is currently NULL
686 * OWNER_WRITER: when owner changes and is a writer
687 * OWNER_READER: when owner changes and the new owner may be a reader.
688 * OWNER_NONSPINNABLE:
689 * when optimistic spinning has to stop because either the
690 * owner stops running, is unknown, or its timeslice has
695 OWNER_WRITER = 1 << 1,
696 OWNER_READER = 1 << 2,
697 OWNER_NONSPINNABLE = 1 << 3,
699 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
701 static inline enum owner_state
702 rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
704 if (flags & nonspinnable)
705 return OWNER_NONSPINNABLE;
707 if (flags & RWSEM_READER_OWNED)
710 return owner ? OWNER_WRITER : OWNER_NULL;
713 static noinline enum owner_state
714 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
716 struct task_struct *new, *owner;
717 unsigned long flags, new_flags;
718 enum owner_state state;
720 owner = rwsem_owner_flags(sem, &flags);
721 state = rwsem_owner_state(owner, flags, nonspinnable);
722 if (state != OWNER_WRITER)
727 if (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF) {
728 state = OWNER_NONSPINNABLE;
732 new = rwsem_owner_flags(sem, &new_flags);
733 if ((new != owner) || (new_flags != flags)) {
734 state = rwsem_owner_state(new, new_flags, nonspinnable);
739 * Ensure we emit the owner->on_cpu, dereference _after_
740 * checking sem->owner still matches owner, if that fails,
741 * owner might point to free()d memory, if it still matches,
742 * the rcu_read_lock() ensures the memory stays valid.
746 if (need_resched() || !owner_on_cpu(owner)) {
747 state = OWNER_NONSPINNABLE;
759 * Calculate reader-owned rwsem spinning threshold for writer
761 * The more readers own the rwsem, the longer it will take for them to
762 * wind down and free the rwsem. So the empirical formula used to
763 * determine the actual spinning time limit here is:
765 * Spinning threshold = (10 + nr_readers/2)us
767 * The limit is capped to a maximum of 25us (30 readers). This is just
768 * a heuristic and is subjected to change in the future.
770 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
772 long count = atomic_long_read(&sem->count);
773 int readers = count >> RWSEM_READER_SHIFT;
778 delta = (20 + readers) * NSEC_PER_USEC / 2;
780 return sched_clock() + delta;
783 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
786 int prev_owner_state = OWNER_NULL;
788 u64 rspin_threshold = 0;
789 unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
790 : RWSEM_RD_NONSPINNABLE;
794 /* sem->wait_lock should not be held when doing optimistic spinning */
795 if (!osq_lock(&sem->osq))
799 * Optimistically spin on the owner field and attempt to acquire the
800 * lock whenever the owner changes. Spinning will be stopped when:
801 * 1) the owning writer isn't running; or
802 * 2) readers own the lock and spinning time has exceeded limit.
805 enum owner_state owner_state;
807 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
808 if (!(owner_state & OWNER_SPINNABLE))
812 * Try to acquire the lock
814 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
815 : rwsem_try_read_lock_unqueued(sem);
821 * Time-based reader-owned rwsem optimistic spinning
823 if (wlock && (owner_state == OWNER_READER)) {
825 * Re-initialize rspin_threshold every time when
826 * the owner state changes from non-reader to reader.
827 * This allows a writer to steal the lock in between
828 * 2 reader phases and have the threshold reset at
829 * the beginning of the 2nd reader phase.
831 if (prev_owner_state != OWNER_READER) {
832 if (rwsem_test_oflags(sem, nonspinnable))
834 rspin_threshold = rwsem_rspin_threshold(sem);
839 * Check time threshold once every 16 iterations to
840 * avoid calling sched_clock() too frequently so
841 * as to reduce the average latency between the times
842 * when the lock becomes free and when the spinner
843 * is ready to do a trylock.
845 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
846 rwsem_set_nonspinnable(sem);
847 lockevent_inc(rwsem_opt_nospin);
853 * An RT task cannot do optimistic spinning if it cannot
854 * be sure the lock holder is running or live-lock may
855 * happen if the current task and the lock holder happen
856 * to run in the same CPU. However, aborting optimistic
857 * spinning while a NULL owner is detected may miss some
858 * opportunity where spinning can continue without causing
861 * There are 2 possible cases where an RT task may be able
862 * to continue spinning.
864 * 1) The lock owner is in the process of releasing the
865 * lock, sem->owner is cleared but the lock has not
867 * 2) The lock was free and owner cleared, but another
868 * task just comes in and acquire the lock before
869 * we try to get it. The new owner may be a spinnable
872 * To take advantage of two scenarios listed agove, the RT
873 * task is made to retry one more time to see if it can
874 * acquire the lock or continue spinning on the new owning
875 * writer. Of course, if the time lag is long enough or the
876 * new owner is not a writer or spinnable, the RT task will
879 * If the owner is a writer, the need_resched() check is
880 * done inside rwsem_spin_on_owner(). If the owner is not
881 * a writer, need_resched() check needs to be done here.
883 if (owner_state != OWNER_WRITER) {
886 if (rt_task(current) &&
887 (prev_owner_state != OWNER_WRITER))
890 prev_owner_state = owner_state;
893 * The cpu_relax() call is a compiler barrier which forces
894 * everything in this loop to be re-loaded. We don't need
895 * memory barriers as we'll eventually observe the right
896 * values at the cost of a few extra spins.
900 osq_unlock(&sem->osq);
903 lockevent_cond_inc(rwsem_opt_fail, !taken);
908 * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
909 * only be called when the reader count reaches 0.
911 * This give writers better chance to acquire the rwsem first before
912 * readers when the rwsem was being held by readers for a relatively long
913 * period of time. Race can happen that an optimistic spinner may have
914 * just stolen the rwsem and set the owner, but just clearing the
915 * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
917 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
919 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
920 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
924 * This function is called when the reader fails to acquire the lock via
925 * optimistic spinning. In this case we will still attempt to do a trylock
926 * when comparing the rwsem state right now with the state when entering
927 * the slowpath indicates that the reader is still in a valid reader phase.
928 * This happens when the following conditions are true:
930 * 1) The lock is currently reader owned, and
931 * 2) The lock is previously not reader-owned or the last read owner changes.
933 * In the former case, we have transitioned from a writer phase to a
934 * reader-phase while spinning. In the latter case, it means the reader
935 * phase hasn't ended when we entered the optimistic spinning loop. In
936 * both cases, the reader is eligible to acquire the lock. This is the
937 * secondary path where a read lock is acquired optimistically.
939 * The reader non-spinnable bit wasn't set at time of entry or it will
940 * not be here at all.
942 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
943 unsigned long last_rowner)
945 unsigned long owner = atomic_long_read(&sem->owner);
947 if (!(owner & RWSEM_READER_OWNED))
950 if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
951 rwsem_try_read_lock_unqueued(sem)) {
952 lockevent_inc(rwsem_opt_rlock2);
953 lockevent_add(rwsem_opt_fail, -1);
959 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
960 unsigned long nonspinnable)
965 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
970 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
972 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
973 unsigned long last_rowner)
980 * Wait for the read lock to be granted
982 static struct rw_semaphore __sched *
983 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
985 long count, adjustment = -RWSEM_READER_BIAS;
986 struct rwsem_waiter waiter;
987 DEFINE_WAKE_Q(wake_q);
991 * Save the current read-owner of rwsem, if available, and the
992 * reader nonspinnable bit.
994 waiter.last_rowner = atomic_long_read(&sem->owner);
995 if (!(waiter.last_rowner & RWSEM_READER_OWNED))
996 waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
998 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
1002 * Undo read bias from down_read() and do optimistic spinning.
1004 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1006 if (rwsem_optimistic_spin(sem, false)) {
1008 * Wake up other readers in the wait list if the front
1009 * waiter is a reader.
1011 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
1012 raw_spin_lock_irq(&sem->wait_lock);
1013 if (!list_empty(&sem->wait_list))
1014 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1016 raw_spin_unlock_irq(&sem->wait_lock);
1020 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1025 waiter.task = current;
1026 waiter.type = RWSEM_WAITING_FOR_READ;
1027 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1029 raw_spin_lock_irq(&sem->wait_lock);
1030 if (list_empty(&sem->wait_list)) {
1032 * In case the wait queue is empty and the lock isn't owned
1033 * by a writer or has the handoff bit set, this reader can
1034 * exit the slowpath and return immediately as its
1035 * RWSEM_READER_BIAS has already been set in the count.
1037 if (adjustment && !(atomic_long_read(&sem->count) &
1038 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1039 /* Provide lock ACQUIRE */
1040 smp_acquire__after_ctrl_dep();
1041 raw_spin_unlock_irq(&sem->wait_lock);
1042 rwsem_set_reader_owned(sem);
1043 lockevent_inc(rwsem_rlock_fast);
1046 adjustment += RWSEM_FLAG_WAITERS;
1048 list_add_tail(&waiter.list, &sem->wait_list);
1050 /* we're now waiting on the lock, but no longer actively locking */
1052 count = atomic_long_add_return(adjustment, &sem->count);
1054 count = atomic_long_read(&sem->count);
1057 * If there are no active locks, wake the front queued process(es).
1059 * If there are no writers and we are first in the queue,
1060 * wake our own waiter to join the existing active readers !
1062 if (!(count & RWSEM_LOCK_MASK)) {
1063 clear_wr_nonspinnable(sem);
1066 if (wake || (!(count & RWSEM_WRITER_MASK) &&
1067 (adjustment & RWSEM_FLAG_WAITERS)))
1068 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1070 raw_spin_unlock_irq(&sem->wait_lock);
1073 /* wait to be given the lock */
1075 set_current_state(state);
1076 if (!smp_load_acquire(&waiter.task)) {
1077 /* Orders against rwsem_mark_wake()'s smp_store_release() */
1080 if (signal_pending_state(state, current)) {
1081 raw_spin_lock_irq(&sem->wait_lock);
1084 raw_spin_unlock_irq(&sem->wait_lock);
1088 lockevent_inc(rwsem_sleep_reader);
1091 __set_current_state(TASK_RUNNING);
1092 lockevent_inc(rwsem_rlock);
1095 list_del(&waiter.list);
1096 if (list_empty(&sem->wait_list)) {
1097 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1100 raw_spin_unlock_irq(&sem->wait_lock);
1101 __set_current_state(TASK_RUNNING);
1102 lockevent_inc(rwsem_rlock_fail);
1103 return ERR_PTR(-EINTR);
1107 * This function is called by the a write lock owner. So the owner value
1108 * won't get changed by others.
1110 static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
1113 if (unlikely(disable)) {
1114 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
1115 lockevent_inc(rwsem_opt_norspin);
1120 * Wait until we successfully acquire the write lock
1122 static struct rw_semaphore *
1123 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1127 enum writer_wait_state wstate;
1128 struct rwsem_waiter waiter;
1129 struct rw_semaphore *ret = sem;
1130 DEFINE_WAKE_Q(wake_q);
1132 /* do optimistic spinning and steal lock if possible */
1133 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1134 rwsem_optimistic_spin(sem, true))
1138 * Disable reader optimistic spinning for this rwsem after
1139 * acquiring the write lock when the setting of the nonspinnable
1140 * bits are observed.
1142 disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
1145 * Optimistic spinning failed, proceed to the slowpath
1146 * and block until we can acquire the sem.
1148 waiter.task = current;
1149 waiter.type = RWSEM_WAITING_FOR_WRITE;
1150 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1152 raw_spin_lock_irq(&sem->wait_lock);
1154 /* account for this before adding a new element to the list */
1155 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1157 list_add_tail(&waiter.list, &sem->wait_list);
1159 /* we're now waiting on the lock */
1160 if (wstate == WRITER_NOT_FIRST) {
1161 count = atomic_long_read(&sem->count);
1164 * If there were already threads queued before us and:
1165 * 1) there are no no active locks, wake the front
1166 * queued process(es) as the handoff bit might be set.
1167 * 2) there are no active writers and some readers, the lock
1168 * must be read owned; so we try to wake any read lock
1169 * waiters that were queued ahead of us.
1171 if (count & RWSEM_WRITER_MASK)
1174 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1175 ? RWSEM_WAKE_READERS
1176 : RWSEM_WAKE_ANY, &wake_q);
1178 if (!wake_q_empty(&wake_q)) {
1180 * We want to minimize wait_lock hold time especially
1181 * when a large number of readers are to be woken up.
1183 raw_spin_unlock_irq(&sem->wait_lock);
1185 wake_q_init(&wake_q); /* Used again, reinit */
1186 raw_spin_lock_irq(&sem->wait_lock);
1189 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1193 /* wait until we successfully acquire the lock */
1194 set_current_state(state);
1196 if (rwsem_try_write_lock(sem, wstate))
1199 raw_spin_unlock_irq(&sem->wait_lock);
1201 /* Block until there are no active lockers. */
1203 if (signal_pending_state(state, current))
1207 lockevent_inc(rwsem_sleep_writer);
1208 set_current_state(state);
1210 * If HANDOFF bit is set, unconditionally do
1213 if (wstate == WRITER_HANDOFF)
1216 if ((wstate == WRITER_NOT_FIRST) &&
1217 (rwsem_first_waiter(sem) == &waiter))
1218 wstate = WRITER_FIRST;
1220 count = atomic_long_read(&sem->count);
1221 if (!(count & RWSEM_LOCK_MASK))
1225 * The setting of the handoff bit is deferred
1226 * until rwsem_try_write_lock() is called.
1228 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1229 time_after(jiffies, waiter.timeout))) {
1230 wstate = WRITER_HANDOFF;
1231 lockevent_inc(rwsem_wlock_handoff);
1236 raw_spin_lock_irq(&sem->wait_lock);
1238 __set_current_state(TASK_RUNNING);
1239 list_del(&waiter.list);
1240 rwsem_disable_reader_optspin(sem, disable_rspin);
1241 raw_spin_unlock_irq(&sem->wait_lock);
1242 lockevent_inc(rwsem_wlock);
1247 __set_current_state(TASK_RUNNING);
1248 raw_spin_lock_irq(&sem->wait_lock);
1249 list_del(&waiter.list);
1251 if (unlikely(wstate == WRITER_HANDOFF))
1252 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1254 if (list_empty(&sem->wait_list))
1255 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1257 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1258 raw_spin_unlock_irq(&sem->wait_lock);
1260 lockevent_inc(rwsem_wlock_fail);
1262 return ERR_PTR(-EINTR);
1266 * handle waking up a waiter on the semaphore
1267 * - up_read/up_write has decremented the active part of count if we come here
1269 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1271 unsigned long flags;
1272 DEFINE_WAKE_Q(wake_q);
1274 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1276 if (!list_empty(&sem->wait_list))
1277 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1279 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1286 * downgrade a write lock into a read lock
1287 * - caller incremented waiting part of count and discovered it still negative
1288 * - just wake up any readers at the front of the queue
1290 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1292 unsigned long flags;
1293 DEFINE_WAKE_Q(wake_q);
1295 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1297 if (!list_empty(&sem->wait_list))
1298 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1300 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1309 inline void __down_read(struct rw_semaphore *sem)
1311 if (!rwsem_read_trylock(sem)) {
1312 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1313 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1315 rwsem_set_reader_owned(sem);
1319 static inline int __down_read_killable(struct rw_semaphore *sem)
1321 if (!rwsem_read_trylock(sem)) {
1322 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1324 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1326 rwsem_set_reader_owned(sem);
1331 static inline int __down_read_trylock(struct rw_semaphore *sem)
1334 * Optimize for the case when the rwsem is not locked at all.
1336 long tmp = RWSEM_UNLOCKED_VALUE;
1339 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1340 tmp + RWSEM_READER_BIAS)) {
1341 rwsem_set_reader_owned(sem);
1344 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1351 static inline void __down_write(struct rw_semaphore *sem)
1353 long tmp = RWSEM_UNLOCKED_VALUE;
1355 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1356 RWSEM_WRITER_LOCKED)))
1357 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1359 rwsem_set_owner(sem);
1362 static inline int __down_write_killable(struct rw_semaphore *sem)
1364 long tmp = RWSEM_UNLOCKED_VALUE;
1366 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1367 RWSEM_WRITER_LOCKED))) {
1368 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1371 rwsem_set_owner(sem);
1376 static inline int __down_write_trylock(struct rw_semaphore *sem)
1378 long tmp = RWSEM_UNLOCKED_VALUE;
1380 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1381 RWSEM_WRITER_LOCKED)) {
1382 rwsem_set_owner(sem);
1389 * unlock after reading
1391 inline void __up_read(struct rw_semaphore *sem)
1395 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1396 rwsem_clear_reader_owned(sem);
1397 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1398 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1399 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1400 RWSEM_FLAG_WAITERS)) {
1401 clear_wr_nonspinnable(sem);
1402 rwsem_wake(sem, tmp);
1407 * unlock after writing
1409 static inline void __up_write(struct rw_semaphore *sem)
1414 * sem->owner may differ from current if the ownership is transferred
1415 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1417 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1418 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1419 rwsem_clear_owner(sem);
1420 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1421 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1422 rwsem_wake(sem, tmp);
1426 * downgrade write lock to read lock
1428 static inline void __downgrade_write(struct rw_semaphore *sem)
1433 * When downgrading from exclusive to shared ownership,
1434 * anything inside the write-locked region cannot leak
1435 * into the read side. In contrast, anything in the
1436 * read-locked region is ok to be re-ordered into the
1437 * write side. As such, rely on RELEASE semantics.
1439 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1440 tmp = atomic_long_fetch_add_release(
1441 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1442 rwsem_set_reader_owned(sem);
1443 if (tmp & RWSEM_FLAG_WAITERS)
1444 rwsem_downgrade_wake(sem);
1450 void __sched down_read(struct rw_semaphore *sem)
1453 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1455 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1457 EXPORT_SYMBOL(down_read);
1459 int __sched down_read_killable(struct rw_semaphore *sem)
1462 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1464 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1465 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1471 EXPORT_SYMBOL(down_read_killable);
1474 * trylock for reading -- returns 1 if successful, 0 if contention
1476 int down_read_trylock(struct rw_semaphore *sem)
1478 int ret = __down_read_trylock(sem);
1481 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1484 EXPORT_SYMBOL(down_read_trylock);
1489 void __sched down_write(struct rw_semaphore *sem)
1492 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1493 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1495 EXPORT_SYMBOL(down_write);
1500 int __sched down_write_killable(struct rw_semaphore *sem)
1503 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1505 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1506 __down_write_killable)) {
1507 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1513 EXPORT_SYMBOL(down_write_killable);
1516 * trylock for writing -- returns 1 if successful, 0 if contention
1518 int down_write_trylock(struct rw_semaphore *sem)
1520 int ret = __down_write_trylock(sem);
1523 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1527 EXPORT_SYMBOL(down_write_trylock);
1530 * release a read lock
1532 void up_read(struct rw_semaphore *sem)
1534 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1537 EXPORT_SYMBOL(up_read);
1540 * release a write lock
1542 void up_write(struct rw_semaphore *sem)
1544 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1547 EXPORT_SYMBOL(up_write);
1550 * downgrade write lock to read lock
1552 void downgrade_write(struct rw_semaphore *sem)
1554 lock_downgrade(&sem->dep_map, _RET_IP_);
1555 __downgrade_write(sem);
1557 EXPORT_SYMBOL(downgrade_write);
1559 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1561 void down_read_nested(struct rw_semaphore *sem, int subclass)
1564 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1565 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1567 EXPORT_SYMBOL(down_read_nested);
1569 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1572 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1573 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1575 EXPORT_SYMBOL(_down_write_nest_lock);
1577 void down_read_non_owner(struct rw_semaphore *sem)
1581 __rwsem_set_reader_owned(sem, NULL);
1583 EXPORT_SYMBOL(down_read_non_owner);
1585 void down_write_nested(struct rw_semaphore *sem, int subclass)
1588 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1589 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1591 EXPORT_SYMBOL(down_write_nested);
1593 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1596 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1598 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1599 __down_write_killable)) {
1600 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1606 EXPORT_SYMBOL(down_write_killable_nested);
1608 void up_read_non_owner(struct rw_semaphore *sem)
1610 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1613 EXPORT_SYMBOL(up_read_non_owner);