Merge tag 'amd-drm-next-5.19-2022-04-15' of https://gitlab.freedesktop.org/agd5f...
[linux-2.6-microblaze.git] / kernel / locking / rwsem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3  *
4  * Written by David Howells (dhowells@redhat.com).
5  * Derived from asm-i386/semaphore.h
6  *
7  * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8  * and Michel Lespinasse <walken@google.com>
9  *
10  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12  *
13  * Rwsem count bit fields re-definition and rwsem rearchitecture by
14  * Waiman Long <longman@redhat.com> and
15  * Peter Zijlstra <peterz@infradead.org>.
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30
31 #ifndef CONFIG_PREEMPT_RT
32 #include "lock_events.h"
33
34 /*
35  * The least significant 2 bits of the owner value has the following
36  * meanings when set.
37  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38  *  - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
39  *
40  * When the rwsem is reader-owned and a spinning writer has timed out,
41  * the nonspinnable bit will be set to disable optimistic spinning.
42
43  * When a writer acquires a rwsem, it puts its task_struct pointer
44  * into the owner field. It is cleared after an unlock.
45  *
46  * When a reader acquires a rwsem, it will also puts its task_struct
47  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
48  * On unlock, the owner field will largely be left untouched. So
49  * for a free or reader-owned rwsem, the owner value may contain
50  * information about the last reader that acquires the rwsem.
51  *
52  * That information may be helpful in debugging cases where the system
53  * seems to hang on a reader owned rwsem especially if only one reader
54  * is involved. Ideally we would like to track all the readers that own
55  * a rwsem, but the overhead is simply too big.
56  *
57  * A fast path reader optimistic lock stealing is supported when the rwsem
58  * is previously owned by a writer and the following conditions are met:
59  *  - rwsem is not currently writer owned
60  *  - the handoff isn't set.
61  */
62 #define RWSEM_READER_OWNED      (1UL << 0)
63 #define RWSEM_NONSPINNABLE      (1UL << 1)
64 #define RWSEM_OWNER_FLAGS_MASK  (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
65
66 #ifdef CONFIG_DEBUG_RWSEMS
67 # define DEBUG_RWSEMS_WARN_ON(c, sem)   do {                    \
68         if (!debug_locks_silent &&                              \
69             WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
70                 #c, atomic_long_read(&(sem)->count),            \
71                 (unsigned long) sem->magic,                     \
72                 atomic_long_read(&(sem)->owner), (long)current, \
73                 list_empty(&(sem)->wait_list) ? "" : "not "))   \
74                         debug_locks_off();                      \
75         } while (0)
76 #else
77 # define DEBUG_RWSEMS_WARN_ON(c, sem)
78 #endif
79
80 /*
81  * On 64-bit architectures, the bit definitions of the count are:
82  *
83  * Bit  0    - writer locked bit
84  * Bit  1    - waiters present bit
85  * Bit  2    - lock handoff bit
86  * Bits 3-7  - reserved
87  * Bits 8-62 - 55-bit reader count
88  * Bit  63   - read fail bit
89  *
90  * On 32-bit architectures, the bit definitions of the count are:
91  *
92  * Bit  0    - writer locked bit
93  * Bit  1    - waiters present bit
94  * Bit  2    - lock handoff bit
95  * Bits 3-7  - reserved
96  * Bits 8-30 - 23-bit reader count
97  * Bit  31   - read fail bit
98  *
99  * It is not likely that the most significant bit (read fail bit) will ever
100  * be set. This guard bit is still checked anyway in the down_read() fastpath
101  * just in case we need to use up more of the reader bits for other purpose
102  * in the future.
103  *
104  * atomic_long_fetch_add() is used to obtain reader lock, whereas
105  * atomic_long_cmpxchg() will be used to obtain writer lock.
106  *
107  * There are three places where the lock handoff bit may be set or cleared.
108  * 1) rwsem_mark_wake() for readers             -- set, clear
109  * 2) rwsem_try_write_lock() for writers        -- set, clear
110  * 3) rwsem_del_waiter()                        -- clear
111  *
112  * For all the above cases, wait_lock will be held. A writer must also
113  * be the first one in the wait_list to be eligible for setting the handoff
114  * bit. So concurrent setting/clearing of handoff bit is not possible.
115  */
116 #define RWSEM_WRITER_LOCKED     (1UL << 0)
117 #define RWSEM_FLAG_WAITERS      (1UL << 1)
118 #define RWSEM_FLAG_HANDOFF      (1UL << 2)
119 #define RWSEM_FLAG_READFAIL     (1UL << (BITS_PER_LONG - 1))
120
121 #define RWSEM_READER_SHIFT      8
122 #define RWSEM_READER_BIAS       (1UL << RWSEM_READER_SHIFT)
123 #define RWSEM_READER_MASK       (~(RWSEM_READER_BIAS - 1))
124 #define RWSEM_WRITER_MASK       RWSEM_WRITER_LOCKED
125 #define RWSEM_LOCK_MASK         (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
126 #define RWSEM_READ_FAILED_MASK  (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
127                                  RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
128
129 /*
130  * All writes to owner are protected by WRITE_ONCE() to make sure that
131  * store tearing can't happen as optimistic spinners may read and use
132  * the owner value concurrently without lock. Read from owner, however,
133  * may not need READ_ONCE() as long as the pointer value is only used
134  * for comparison and isn't being dereferenced.
135  */
136 static inline void rwsem_set_owner(struct rw_semaphore *sem)
137 {
138         atomic_long_set(&sem->owner, (long)current);
139 }
140
141 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
142 {
143         atomic_long_set(&sem->owner, 0);
144 }
145
146 /*
147  * Test the flags in the owner field.
148  */
149 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
150 {
151         return atomic_long_read(&sem->owner) & flags;
152 }
153
154 /*
155  * The task_struct pointer of the last owning reader will be left in
156  * the owner field.
157  *
158  * Note that the owner value just indicates the task has owned the rwsem
159  * previously, it may not be the real owner or one of the real owners
160  * anymore when that field is examined, so take it with a grain of salt.
161  *
162  * The reader non-spinnable bit is preserved.
163  */
164 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
165                                             struct task_struct *owner)
166 {
167         unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
168                 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
169
170         atomic_long_set(&sem->owner, val);
171 }
172
173 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
174 {
175         __rwsem_set_reader_owned(sem, current);
176 }
177
178 /*
179  * Return true if the rwsem is owned by a reader.
180  */
181 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
182 {
183 #ifdef CONFIG_DEBUG_RWSEMS
184         /*
185          * Check the count to see if it is write-locked.
186          */
187         long count = atomic_long_read(&sem->count);
188
189         if (count & RWSEM_WRITER_MASK)
190                 return false;
191 #endif
192         return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
193 }
194
195 #ifdef CONFIG_DEBUG_RWSEMS
196 /*
197  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
198  * is a task pointer in owner of a reader-owned rwsem, it will be the
199  * real owner or one of the real owners. The only exception is when the
200  * unlock is done by up_read_non_owner().
201  */
202 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
203 {
204         unsigned long val = atomic_long_read(&sem->owner);
205
206         while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
207                 if (atomic_long_try_cmpxchg(&sem->owner, &val,
208                                             val & RWSEM_OWNER_FLAGS_MASK))
209                         return;
210         }
211 }
212 #else
213 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
214 {
215 }
216 #endif
217
218 /*
219  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
220  * remains set. Otherwise, the operation will be aborted.
221  */
222 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
223 {
224         unsigned long owner = atomic_long_read(&sem->owner);
225
226         do {
227                 if (!(owner & RWSEM_READER_OWNED))
228                         break;
229                 if (owner & RWSEM_NONSPINNABLE)
230                         break;
231         } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
232                                           owner | RWSEM_NONSPINNABLE));
233 }
234
235 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
236 {
237         *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
238
239         if (WARN_ON_ONCE(*cntp < 0))
240                 rwsem_set_nonspinnable(sem);
241
242         if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
243                 rwsem_set_reader_owned(sem);
244                 return true;
245         }
246
247         return false;
248 }
249
250 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
251 {
252         long tmp = RWSEM_UNLOCKED_VALUE;
253
254         if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
255                 rwsem_set_owner(sem);
256                 return true;
257         }
258
259         return false;
260 }
261
262 /*
263  * Return just the real task structure pointer of the owner
264  */
265 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
266 {
267         return (struct task_struct *)
268                 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
269 }
270
271 /*
272  * Return the real task structure pointer of the owner and the embedded
273  * flags in the owner. pflags must be non-NULL.
274  */
275 static inline struct task_struct *
276 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
277 {
278         unsigned long owner = atomic_long_read(&sem->owner);
279
280         *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
281         return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
282 }
283
284 /*
285  * Guide to the rw_semaphore's count field.
286  *
287  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
288  * by a writer.
289  *
290  * The lock is owned by readers when
291  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
292  * (2) some of the reader bits are set in count, and
293  * (3) the owner field has RWSEM_READ_OWNED bit set.
294  *
295  * Having some reader bits set is not enough to guarantee a readers owned
296  * lock as the readers may be in the process of backing out from the count
297  * and a writer has just released the lock. So another writer may steal
298  * the lock immediately after that.
299  */
300
301 /*
302  * Initialize an rwsem:
303  */
304 void __init_rwsem(struct rw_semaphore *sem, const char *name,
305                   struct lock_class_key *key)
306 {
307 #ifdef CONFIG_DEBUG_LOCK_ALLOC
308         /*
309          * Make sure we are not reinitializing a held semaphore:
310          */
311         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
312         lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
313 #endif
314 #ifdef CONFIG_DEBUG_RWSEMS
315         sem->magic = sem;
316 #endif
317         atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
318         raw_spin_lock_init(&sem->wait_lock);
319         INIT_LIST_HEAD(&sem->wait_list);
320         atomic_long_set(&sem->owner, 0L);
321 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
322         osq_lock_init(&sem->osq);
323 #endif
324 }
325 EXPORT_SYMBOL(__init_rwsem);
326
327 enum rwsem_waiter_type {
328         RWSEM_WAITING_FOR_WRITE,
329         RWSEM_WAITING_FOR_READ
330 };
331
332 struct rwsem_waiter {
333         struct list_head list;
334         struct task_struct *task;
335         enum rwsem_waiter_type type;
336         unsigned long timeout;
337
338         /* Writer only, not initialized in reader */
339         bool handoff_set;
340 };
341 #define rwsem_first_waiter(sem) \
342         list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
343
344 enum rwsem_wake_type {
345         RWSEM_WAKE_ANY,         /* Wake whatever's at head of wait list */
346         RWSEM_WAKE_READERS,     /* Wake readers only */
347         RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
348 };
349
350 /*
351  * The typical HZ value is either 250 or 1000. So set the minimum waiting
352  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
353  * queue before initiating the handoff protocol.
354  */
355 #define RWSEM_WAIT_TIMEOUT      DIV_ROUND_UP(HZ, 250)
356
357 /*
358  * Magic number to batch-wakeup waiting readers, even when writers are
359  * also present in the queue. This both limits the amount of work the
360  * waking thread must do and also prevents any potential counter overflow,
361  * however unlikely.
362  */
363 #define MAX_READERS_WAKEUP      0x100
364
365 static inline void
366 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
367 {
368         lockdep_assert_held(&sem->wait_lock);
369         list_add_tail(&waiter->list, &sem->wait_list);
370         /* caller will set RWSEM_FLAG_WAITERS */
371 }
372
373 /*
374  * Remove a waiter from the wait_list and clear flags.
375  *
376  * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
377  * this function. Modify with care.
378  */
379 static inline void
380 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
381 {
382         lockdep_assert_held(&sem->wait_lock);
383         list_del(&waiter->list);
384         if (likely(!list_empty(&sem->wait_list)))
385                 return;
386
387         atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
388 }
389
390 /*
391  * handle the lock release when processes blocked on it that can now run
392  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
393  *   have been set.
394  * - there must be someone on the queue
395  * - the wait_lock must be held by the caller
396  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
397  *   to actually wakeup the blocked task(s) and drop the reference count,
398  *   preferably when the wait_lock is released
399  * - woken process blocks are discarded from the list after having task zeroed
400  * - writers are only marked woken if downgrading is false
401  *
402  * Implies rwsem_del_waiter() for all woken readers.
403  */
404 static void rwsem_mark_wake(struct rw_semaphore *sem,
405                             enum rwsem_wake_type wake_type,
406                             struct wake_q_head *wake_q)
407 {
408         struct rwsem_waiter *waiter, *tmp;
409         long oldcount, woken = 0, adjustment = 0;
410         struct list_head wlist;
411
412         lockdep_assert_held(&sem->wait_lock);
413
414         /*
415          * Take a peek at the queue head waiter such that we can determine
416          * the wakeup(s) to perform.
417          */
418         waiter = rwsem_first_waiter(sem);
419
420         if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
421                 if (wake_type == RWSEM_WAKE_ANY) {
422                         /*
423                          * Mark writer at the front of the queue for wakeup.
424                          * Until the task is actually later awoken later by
425                          * the caller, other writers are able to steal it.
426                          * Readers, on the other hand, will block as they
427                          * will notice the queued writer.
428                          */
429                         wake_q_add(wake_q, waiter->task);
430                         lockevent_inc(rwsem_wake_writer);
431                 }
432
433                 return;
434         }
435
436         /*
437          * No reader wakeup if there are too many of them already.
438          */
439         if (unlikely(atomic_long_read(&sem->count) < 0))
440                 return;
441
442         /*
443          * Writers might steal the lock before we grant it to the next reader.
444          * We prefer to do the first reader grant before counting readers
445          * so we can bail out early if a writer stole the lock.
446          */
447         if (wake_type != RWSEM_WAKE_READ_OWNED) {
448                 struct task_struct *owner;
449
450                 adjustment = RWSEM_READER_BIAS;
451                 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
452                 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
453                         /*
454                          * When we've been waiting "too" long (for writers
455                          * to give up the lock), request a HANDOFF to
456                          * force the issue.
457                          */
458                         if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
459                             time_after(jiffies, waiter->timeout)) {
460                                 adjustment -= RWSEM_FLAG_HANDOFF;
461                                 lockevent_inc(rwsem_rlock_handoff);
462                         }
463
464                         atomic_long_add(-adjustment, &sem->count);
465                         return;
466                 }
467                 /*
468                  * Set it to reader-owned to give spinners an early
469                  * indication that readers now have the lock.
470                  * The reader nonspinnable bit seen at slowpath entry of
471                  * the reader is copied over.
472                  */
473                 owner = waiter->task;
474                 __rwsem_set_reader_owned(sem, owner);
475         }
476
477         /*
478          * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
479          * queue. We know that the woken will be at least 1 as we accounted
480          * for above. Note we increment the 'active part' of the count by the
481          * number of readers before waking any processes up.
482          *
483          * This is an adaptation of the phase-fair R/W locks where at the
484          * reader phase (first waiter is a reader), all readers are eligible
485          * to acquire the lock at the same time irrespective of their order
486          * in the queue. The writers acquire the lock according to their
487          * order in the queue.
488          *
489          * We have to do wakeup in 2 passes to prevent the possibility that
490          * the reader count may be decremented before it is incremented. It
491          * is because the to-be-woken waiter may not have slept yet. So it
492          * may see waiter->task got cleared, finish its critical section and
493          * do an unlock before the reader count increment.
494          *
495          * 1) Collect the read-waiters in a separate list, count them and
496          *    fully increment the reader count in rwsem.
497          * 2) For each waiters in the new list, clear waiter->task and
498          *    put them into wake_q to be woken up later.
499          */
500         INIT_LIST_HEAD(&wlist);
501         list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
502                 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
503                         continue;
504
505                 woken++;
506                 list_move_tail(&waiter->list, &wlist);
507
508                 /*
509                  * Limit # of readers that can be woken up per wakeup call.
510                  */
511                 if (unlikely(woken >= MAX_READERS_WAKEUP))
512                         break;
513         }
514
515         adjustment = woken * RWSEM_READER_BIAS - adjustment;
516         lockevent_cond_inc(rwsem_wake_reader, woken);
517
518         oldcount = atomic_long_read(&sem->count);
519         if (list_empty(&sem->wait_list)) {
520                 /*
521                  * Combined with list_move_tail() above, this implies
522                  * rwsem_del_waiter().
523                  */
524                 adjustment -= RWSEM_FLAG_WAITERS;
525                 if (oldcount & RWSEM_FLAG_HANDOFF)
526                         adjustment -= RWSEM_FLAG_HANDOFF;
527         } else if (woken) {
528                 /*
529                  * When we've woken a reader, we no longer need to force
530                  * writers to give up the lock and we can clear HANDOFF.
531                  */
532                 if (oldcount & RWSEM_FLAG_HANDOFF)
533                         adjustment -= RWSEM_FLAG_HANDOFF;
534         }
535
536         if (adjustment)
537                 atomic_long_add(adjustment, &sem->count);
538
539         /* 2nd pass */
540         list_for_each_entry_safe(waiter, tmp, &wlist, list) {
541                 struct task_struct *tsk;
542
543                 tsk = waiter->task;
544                 get_task_struct(tsk);
545
546                 /*
547                  * Ensure calling get_task_struct() before setting the reader
548                  * waiter to nil such that rwsem_down_read_slowpath() cannot
549                  * race with do_exit() by always holding a reference count
550                  * to the task to wakeup.
551                  */
552                 smp_store_release(&waiter->task, NULL);
553                 /*
554                  * Ensure issuing the wakeup (either by us or someone else)
555                  * after setting the reader waiter to nil.
556                  */
557                 wake_q_add_safe(wake_q, tsk);
558         }
559 }
560
561 /*
562  * This function must be called with the sem->wait_lock held to prevent
563  * race conditions between checking the rwsem wait list and setting the
564  * sem->count accordingly.
565  *
566  * Implies rwsem_del_waiter() on success.
567  */
568 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
569                                         struct rwsem_waiter *waiter)
570 {
571         bool first = rwsem_first_waiter(sem) == waiter;
572         long count, new;
573
574         lockdep_assert_held(&sem->wait_lock);
575
576         count = atomic_long_read(&sem->count);
577         do {
578                 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
579
580                 if (has_handoff) {
581                         if (!first)
582                                 return false;
583
584                         /* First waiter inherits a previously set handoff bit */
585                         waiter->handoff_set = true;
586                 }
587
588                 new = count;
589
590                 if (count & RWSEM_LOCK_MASK) {
591                         if (has_handoff || (!rt_task(waiter->task) &&
592                                             !time_after(jiffies, waiter->timeout)))
593                                 return false;
594
595                         new |= RWSEM_FLAG_HANDOFF;
596                 } else {
597                         new |= RWSEM_WRITER_LOCKED;
598                         new &= ~RWSEM_FLAG_HANDOFF;
599
600                         if (list_is_singular(&sem->wait_list))
601                                 new &= ~RWSEM_FLAG_WAITERS;
602                 }
603         } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
604
605         /*
606          * We have either acquired the lock with handoff bit cleared or
607          * set the handoff bit.
608          */
609         if (new & RWSEM_FLAG_HANDOFF) {
610                 waiter->handoff_set = true;
611                 lockevent_inc(rwsem_wlock_handoff);
612                 return false;
613         }
614
615         /*
616          * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
617          * success.
618          */
619         list_del(&waiter->list);
620         rwsem_set_owner(sem);
621         return true;
622 }
623
624 /*
625  * The rwsem_spin_on_owner() function returns the following 4 values
626  * depending on the lock owner state.
627  *   OWNER_NULL  : owner is currently NULL
628  *   OWNER_WRITER: when owner changes and is a writer
629  *   OWNER_READER: when owner changes and the new owner may be a reader.
630  *   OWNER_NONSPINNABLE:
631  *                 when optimistic spinning has to stop because either the
632  *                 owner stops running, is unknown, or its timeslice has
633  *                 been used up.
634  */
635 enum owner_state {
636         OWNER_NULL              = 1 << 0,
637         OWNER_WRITER            = 1 << 1,
638         OWNER_READER            = 1 << 2,
639         OWNER_NONSPINNABLE      = 1 << 3,
640 };
641
642 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
643 /*
644  * Try to acquire write lock before the writer has been put on wait queue.
645  */
646 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
647 {
648         long count = atomic_long_read(&sem->count);
649
650         while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
651                 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
652                                         count | RWSEM_WRITER_LOCKED)) {
653                         rwsem_set_owner(sem);
654                         lockevent_inc(rwsem_opt_lock);
655                         return true;
656                 }
657         }
658         return false;
659 }
660
661 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
662 {
663         struct task_struct *owner;
664         unsigned long flags;
665         bool ret = true;
666
667         if (need_resched()) {
668                 lockevent_inc(rwsem_opt_fail);
669                 return false;
670         }
671
672         preempt_disable();
673         /*
674          * Disable preemption is equal to the RCU read-side crital section,
675          * thus the task_strcut structure won't go away.
676          */
677         owner = rwsem_owner_flags(sem, &flags);
678         /*
679          * Don't check the read-owner as the entry may be stale.
680          */
681         if ((flags & RWSEM_NONSPINNABLE) ||
682             (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
683                 ret = false;
684         preempt_enable();
685
686         lockevent_cond_inc(rwsem_opt_fail, !ret);
687         return ret;
688 }
689
690 #define OWNER_SPINNABLE         (OWNER_NULL | OWNER_WRITER | OWNER_READER)
691
692 static inline enum owner_state
693 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
694 {
695         if (flags & RWSEM_NONSPINNABLE)
696                 return OWNER_NONSPINNABLE;
697
698         if (flags & RWSEM_READER_OWNED)
699                 return OWNER_READER;
700
701         return owner ? OWNER_WRITER : OWNER_NULL;
702 }
703
704 static noinline enum owner_state
705 rwsem_spin_on_owner(struct rw_semaphore *sem)
706 {
707         struct task_struct *new, *owner;
708         unsigned long flags, new_flags;
709         enum owner_state state;
710
711         lockdep_assert_preemption_disabled();
712
713         owner = rwsem_owner_flags(sem, &flags);
714         state = rwsem_owner_state(owner, flags);
715         if (state != OWNER_WRITER)
716                 return state;
717
718         for (;;) {
719                 /*
720                  * When a waiting writer set the handoff flag, it may spin
721                  * on the owner as well. Once that writer acquires the lock,
722                  * we can spin on it. So we don't need to quit even when the
723                  * handoff bit is set.
724                  */
725                 new = rwsem_owner_flags(sem, &new_flags);
726                 if ((new != owner) || (new_flags != flags)) {
727                         state = rwsem_owner_state(new, new_flags);
728                         break;
729                 }
730
731                 /*
732                  * Ensure we emit the owner->on_cpu, dereference _after_
733                  * checking sem->owner still matches owner, if that fails,
734                  * owner might point to free()d memory, if it still matches,
735                  * our spinning context already disabled preemption which is
736                  * equal to RCU read-side crital section ensures the memory
737                  * stays valid.
738                  */
739                 barrier();
740
741                 if (need_resched() || !owner_on_cpu(owner)) {
742                         state = OWNER_NONSPINNABLE;
743                         break;
744                 }
745
746                 cpu_relax();
747         }
748
749         return state;
750 }
751
752 /*
753  * Calculate reader-owned rwsem spinning threshold for writer
754  *
755  * The more readers own the rwsem, the longer it will take for them to
756  * wind down and free the rwsem. So the empirical formula used to
757  * determine the actual spinning time limit here is:
758  *
759  *   Spinning threshold = (10 + nr_readers/2)us
760  *
761  * The limit is capped to a maximum of 25us (30 readers). This is just
762  * a heuristic and is subjected to change in the future.
763  */
764 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
765 {
766         long count = atomic_long_read(&sem->count);
767         int readers = count >> RWSEM_READER_SHIFT;
768         u64 delta;
769
770         if (readers > 30)
771                 readers = 30;
772         delta = (20 + readers) * NSEC_PER_USEC / 2;
773
774         return sched_clock() + delta;
775 }
776
777 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
778 {
779         bool taken = false;
780         int prev_owner_state = OWNER_NULL;
781         int loop = 0;
782         u64 rspin_threshold = 0;
783
784         preempt_disable();
785
786         /* sem->wait_lock should not be held when doing optimistic spinning */
787         if (!osq_lock(&sem->osq))
788                 goto done;
789
790         /*
791          * Optimistically spin on the owner field and attempt to acquire the
792          * lock whenever the owner changes. Spinning will be stopped when:
793          *  1) the owning writer isn't running; or
794          *  2) readers own the lock and spinning time has exceeded limit.
795          */
796         for (;;) {
797                 enum owner_state owner_state;
798
799                 owner_state = rwsem_spin_on_owner(sem);
800                 if (!(owner_state & OWNER_SPINNABLE))
801                         break;
802
803                 /*
804                  * Try to acquire the lock
805                  */
806                 taken = rwsem_try_write_lock_unqueued(sem);
807
808                 if (taken)
809                         break;
810
811                 /*
812                  * Time-based reader-owned rwsem optimistic spinning
813                  */
814                 if (owner_state == OWNER_READER) {
815                         /*
816                          * Re-initialize rspin_threshold every time when
817                          * the owner state changes from non-reader to reader.
818                          * This allows a writer to steal the lock in between
819                          * 2 reader phases and have the threshold reset at
820                          * the beginning of the 2nd reader phase.
821                          */
822                         if (prev_owner_state != OWNER_READER) {
823                                 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
824                                         break;
825                                 rspin_threshold = rwsem_rspin_threshold(sem);
826                                 loop = 0;
827                         }
828
829                         /*
830                          * Check time threshold once every 16 iterations to
831                          * avoid calling sched_clock() too frequently so
832                          * as to reduce the average latency between the times
833                          * when the lock becomes free and when the spinner
834                          * is ready to do a trylock.
835                          */
836                         else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
837                                 rwsem_set_nonspinnable(sem);
838                                 lockevent_inc(rwsem_opt_nospin);
839                                 break;
840                         }
841                 }
842
843                 /*
844                  * An RT task cannot do optimistic spinning if it cannot
845                  * be sure the lock holder is running or live-lock may
846                  * happen if the current task and the lock holder happen
847                  * to run in the same CPU. However, aborting optimistic
848                  * spinning while a NULL owner is detected may miss some
849                  * opportunity where spinning can continue without causing
850                  * problem.
851                  *
852                  * There are 2 possible cases where an RT task may be able
853                  * to continue spinning.
854                  *
855                  * 1) The lock owner is in the process of releasing the
856                  *    lock, sem->owner is cleared but the lock has not
857                  *    been released yet.
858                  * 2) The lock was free and owner cleared, but another
859                  *    task just comes in and acquire the lock before
860                  *    we try to get it. The new owner may be a spinnable
861                  *    writer.
862                  *
863                  * To take advantage of two scenarios listed above, the RT
864                  * task is made to retry one more time to see if it can
865                  * acquire the lock or continue spinning on the new owning
866                  * writer. Of course, if the time lag is long enough or the
867                  * new owner is not a writer or spinnable, the RT task will
868                  * quit spinning.
869                  *
870                  * If the owner is a writer, the need_resched() check is
871                  * done inside rwsem_spin_on_owner(). If the owner is not
872                  * a writer, need_resched() check needs to be done here.
873                  */
874                 if (owner_state != OWNER_WRITER) {
875                         if (need_resched())
876                                 break;
877                         if (rt_task(current) &&
878                            (prev_owner_state != OWNER_WRITER))
879                                 break;
880                 }
881                 prev_owner_state = owner_state;
882
883                 /*
884                  * The cpu_relax() call is a compiler barrier which forces
885                  * everything in this loop to be re-loaded. We don't need
886                  * memory barriers as we'll eventually observe the right
887                  * values at the cost of a few extra spins.
888                  */
889                 cpu_relax();
890         }
891         osq_unlock(&sem->osq);
892 done:
893         preempt_enable();
894         lockevent_cond_inc(rwsem_opt_fail, !taken);
895         return taken;
896 }
897
898 /*
899  * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
900  * only be called when the reader count reaches 0.
901  */
902 static inline void clear_nonspinnable(struct rw_semaphore *sem)
903 {
904         if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
905                 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
906 }
907
908 #else
909 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
910 {
911         return false;
912 }
913
914 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
915 {
916         return false;
917 }
918
919 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
920
921 static inline enum owner_state
922 rwsem_spin_on_owner(struct rw_semaphore *sem)
923 {
924         return OWNER_NONSPINNABLE;
925 }
926 #endif
927
928 /*
929  * Wait for the read lock to be granted
930  */
931 static struct rw_semaphore __sched *
932 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
933 {
934         long adjustment = -RWSEM_READER_BIAS;
935         long rcnt = (count >> RWSEM_READER_SHIFT);
936         struct rwsem_waiter waiter;
937         DEFINE_WAKE_Q(wake_q);
938         bool wake = false;
939
940         /*
941          * To prevent a constant stream of readers from starving a sleeping
942          * waiter, don't attempt optimistic lock stealing if the lock is
943          * currently owned by readers.
944          */
945         if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
946             (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
947                 goto queue;
948
949         /*
950          * Reader optimistic lock stealing.
951          */
952         if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
953                 rwsem_set_reader_owned(sem);
954                 lockevent_inc(rwsem_rlock_steal);
955
956                 /*
957                  * Wake up other readers in the wait queue if it is
958                  * the first reader.
959                  */
960                 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
961                         raw_spin_lock_irq(&sem->wait_lock);
962                         if (!list_empty(&sem->wait_list))
963                                 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
964                                                 &wake_q);
965                         raw_spin_unlock_irq(&sem->wait_lock);
966                         wake_up_q(&wake_q);
967                 }
968                 return sem;
969         }
970
971 queue:
972         waiter.task = current;
973         waiter.type = RWSEM_WAITING_FOR_READ;
974         waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
975
976         raw_spin_lock_irq(&sem->wait_lock);
977         if (list_empty(&sem->wait_list)) {
978                 /*
979                  * In case the wait queue is empty and the lock isn't owned
980                  * by a writer or has the handoff bit set, this reader can
981                  * exit the slowpath and return immediately as its
982                  * RWSEM_READER_BIAS has already been set in the count.
983                  */
984                 if (!(atomic_long_read(&sem->count) &
985                      (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
986                         /* Provide lock ACQUIRE */
987                         smp_acquire__after_ctrl_dep();
988                         raw_spin_unlock_irq(&sem->wait_lock);
989                         rwsem_set_reader_owned(sem);
990                         lockevent_inc(rwsem_rlock_fast);
991                         return sem;
992                 }
993                 adjustment += RWSEM_FLAG_WAITERS;
994         }
995         rwsem_add_waiter(sem, &waiter);
996
997         /* we're now waiting on the lock, but no longer actively locking */
998         count = atomic_long_add_return(adjustment, &sem->count);
999
1000         /*
1001          * If there are no active locks, wake the front queued process(es).
1002          *
1003          * If there are no writers and we are first in the queue,
1004          * wake our own waiter to join the existing active readers !
1005          */
1006         if (!(count & RWSEM_LOCK_MASK)) {
1007                 clear_nonspinnable(sem);
1008                 wake = true;
1009         }
1010         if (wake || (!(count & RWSEM_WRITER_MASK) &&
1011                     (adjustment & RWSEM_FLAG_WAITERS)))
1012                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1013
1014         raw_spin_unlock_irq(&sem->wait_lock);
1015         wake_up_q(&wake_q);
1016
1017         /* wait to be given the lock */
1018         for (;;) {
1019                 set_current_state(state);
1020                 if (!smp_load_acquire(&waiter.task)) {
1021                         /* Matches rwsem_mark_wake()'s smp_store_release(). */
1022                         break;
1023                 }
1024                 if (signal_pending_state(state, current)) {
1025                         raw_spin_lock_irq(&sem->wait_lock);
1026                         if (waiter.task)
1027                                 goto out_nolock;
1028                         raw_spin_unlock_irq(&sem->wait_lock);
1029                         /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1030                         break;
1031                 }
1032                 schedule();
1033                 lockevent_inc(rwsem_sleep_reader);
1034         }
1035
1036         __set_current_state(TASK_RUNNING);
1037         lockevent_inc(rwsem_rlock);
1038         return sem;
1039
1040 out_nolock:
1041         rwsem_del_waiter(sem, &waiter);
1042         raw_spin_unlock_irq(&sem->wait_lock);
1043         __set_current_state(TASK_RUNNING);
1044         lockevent_inc(rwsem_rlock_fail);
1045         return ERR_PTR(-EINTR);
1046 }
1047
1048 /*
1049  * Wait until we successfully acquire the write lock
1050  */
1051 static struct rw_semaphore __sched *
1052 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1053 {
1054         long count;
1055         struct rwsem_waiter waiter;
1056         DEFINE_WAKE_Q(wake_q);
1057
1058         /* do optimistic spinning and steal lock if possible */
1059         if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1060                 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1061                 return sem;
1062         }
1063
1064         /*
1065          * Optimistic spinning failed, proceed to the slowpath
1066          * and block until we can acquire the sem.
1067          */
1068         waiter.task = current;
1069         waiter.type = RWSEM_WAITING_FOR_WRITE;
1070         waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1071         waiter.handoff_set = false;
1072
1073         raw_spin_lock_irq(&sem->wait_lock);
1074         rwsem_add_waiter(sem, &waiter);
1075
1076         /* we're now waiting on the lock */
1077         if (rwsem_first_waiter(sem) != &waiter) {
1078                 count = atomic_long_read(&sem->count);
1079
1080                 /*
1081                  * If there were already threads queued before us and:
1082                  *  1) there are no active locks, wake the front
1083                  *     queued process(es) as the handoff bit might be set.
1084                  *  2) there are no active writers and some readers, the lock
1085                  *     must be read owned; so we try to wake any read lock
1086                  *     waiters that were queued ahead of us.
1087                  */
1088                 if (count & RWSEM_WRITER_MASK)
1089                         goto wait;
1090
1091                 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1092                                         ? RWSEM_WAKE_READERS
1093                                         : RWSEM_WAKE_ANY, &wake_q);
1094
1095                 if (!wake_q_empty(&wake_q)) {
1096                         /*
1097                          * We want to minimize wait_lock hold time especially
1098                          * when a large number of readers are to be woken up.
1099                          */
1100                         raw_spin_unlock_irq(&sem->wait_lock);
1101                         wake_up_q(&wake_q);
1102                         wake_q_init(&wake_q);   /* Used again, reinit */
1103                         raw_spin_lock_irq(&sem->wait_lock);
1104                 }
1105         } else {
1106                 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1107         }
1108
1109 wait:
1110         /* wait until we successfully acquire the lock */
1111         set_current_state(state);
1112         for (;;) {
1113                 if (rwsem_try_write_lock(sem, &waiter)) {
1114                         /* rwsem_try_write_lock() implies ACQUIRE on success */
1115                         break;
1116                 }
1117
1118                 raw_spin_unlock_irq(&sem->wait_lock);
1119
1120                 if (signal_pending_state(state, current))
1121                         goto out_nolock;
1122
1123                 /*
1124                  * After setting the handoff bit and failing to acquire
1125                  * the lock, attempt to spin on owner to accelerate lock
1126                  * transfer. If the previous owner is a on-cpu writer and it
1127                  * has just released the lock, OWNER_NULL will be returned.
1128                  * In this case, we attempt to acquire the lock again
1129                  * without sleeping.
1130                  */
1131                 if (waiter.handoff_set) {
1132                         enum owner_state owner_state;
1133
1134                         preempt_disable();
1135                         owner_state = rwsem_spin_on_owner(sem);
1136                         preempt_enable();
1137
1138                         if (owner_state == OWNER_NULL)
1139                                 goto trylock_again;
1140                 }
1141
1142                 schedule();
1143                 lockevent_inc(rwsem_sleep_writer);
1144                 set_current_state(state);
1145 trylock_again:
1146                 raw_spin_lock_irq(&sem->wait_lock);
1147         }
1148         __set_current_state(TASK_RUNNING);
1149         raw_spin_unlock_irq(&sem->wait_lock);
1150         lockevent_inc(rwsem_wlock);
1151         return sem;
1152
1153 out_nolock:
1154         __set_current_state(TASK_RUNNING);
1155         raw_spin_lock_irq(&sem->wait_lock);
1156         rwsem_del_waiter(sem, &waiter);
1157         if (!list_empty(&sem->wait_list))
1158                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1159         raw_spin_unlock_irq(&sem->wait_lock);
1160         wake_up_q(&wake_q);
1161         lockevent_inc(rwsem_wlock_fail);
1162         return ERR_PTR(-EINTR);
1163 }
1164
1165 /*
1166  * handle waking up a waiter on the semaphore
1167  * - up_read/up_write has decremented the active part of count if we come here
1168  */
1169 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1170 {
1171         unsigned long flags;
1172         DEFINE_WAKE_Q(wake_q);
1173
1174         raw_spin_lock_irqsave(&sem->wait_lock, flags);
1175
1176         if (!list_empty(&sem->wait_list))
1177                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1178
1179         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1180         wake_up_q(&wake_q);
1181
1182         return sem;
1183 }
1184
1185 /*
1186  * downgrade a write lock into a read lock
1187  * - caller incremented waiting part of count and discovered it still negative
1188  * - just wake up any readers at the front of the queue
1189  */
1190 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1191 {
1192         unsigned long flags;
1193         DEFINE_WAKE_Q(wake_q);
1194
1195         raw_spin_lock_irqsave(&sem->wait_lock, flags);
1196
1197         if (!list_empty(&sem->wait_list))
1198                 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1199
1200         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1201         wake_up_q(&wake_q);
1202
1203         return sem;
1204 }
1205
1206 /*
1207  * lock for reading
1208  */
1209 static inline int __down_read_common(struct rw_semaphore *sem, int state)
1210 {
1211         long count;
1212
1213         if (!rwsem_read_trylock(sem, &count)) {
1214                 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1215                         return -EINTR;
1216                 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1217         }
1218         return 0;
1219 }
1220
1221 static inline void __down_read(struct rw_semaphore *sem)
1222 {
1223         __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1224 }
1225
1226 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1227 {
1228         return __down_read_common(sem, TASK_INTERRUPTIBLE);
1229 }
1230
1231 static inline int __down_read_killable(struct rw_semaphore *sem)
1232 {
1233         return __down_read_common(sem, TASK_KILLABLE);
1234 }
1235
1236 static inline int __down_read_trylock(struct rw_semaphore *sem)
1237 {
1238         long tmp;
1239
1240         DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1241
1242         tmp = atomic_long_read(&sem->count);
1243         while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1244                 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1245                                                     tmp + RWSEM_READER_BIAS)) {
1246                         rwsem_set_reader_owned(sem);
1247                         return 1;
1248                 }
1249         }
1250         return 0;
1251 }
1252
1253 /*
1254  * lock for writing
1255  */
1256 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1257 {
1258         if (unlikely(!rwsem_write_trylock(sem))) {
1259                 if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1260                         return -EINTR;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static inline void __down_write(struct rw_semaphore *sem)
1267 {
1268         __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1269 }
1270
1271 static inline int __down_write_killable(struct rw_semaphore *sem)
1272 {
1273         return __down_write_common(sem, TASK_KILLABLE);
1274 }
1275
1276 static inline int __down_write_trylock(struct rw_semaphore *sem)
1277 {
1278         DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1279         return rwsem_write_trylock(sem);
1280 }
1281
1282 /*
1283  * unlock after reading
1284  */
1285 static inline void __up_read(struct rw_semaphore *sem)
1286 {
1287         long tmp;
1288
1289         DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1290         DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1291
1292         rwsem_clear_reader_owned(sem);
1293         tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1294         DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1295         if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1296                       RWSEM_FLAG_WAITERS)) {
1297                 clear_nonspinnable(sem);
1298                 rwsem_wake(sem);
1299         }
1300 }
1301
1302 /*
1303  * unlock after writing
1304  */
1305 static inline void __up_write(struct rw_semaphore *sem)
1306 {
1307         long tmp;
1308
1309         DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1310         /*
1311          * sem->owner may differ from current if the ownership is transferred
1312          * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1313          */
1314         DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1315                             !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1316
1317         rwsem_clear_owner(sem);
1318         tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1319         if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1320                 rwsem_wake(sem);
1321 }
1322
1323 /*
1324  * downgrade write lock to read lock
1325  */
1326 static inline void __downgrade_write(struct rw_semaphore *sem)
1327 {
1328         long tmp;
1329
1330         /*
1331          * When downgrading from exclusive to shared ownership,
1332          * anything inside the write-locked region cannot leak
1333          * into the read side. In contrast, anything in the
1334          * read-locked region is ok to be re-ordered into the
1335          * write side. As such, rely on RELEASE semantics.
1336          */
1337         DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1338         tmp = atomic_long_fetch_add_release(
1339                 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1340         rwsem_set_reader_owned(sem);
1341         if (tmp & RWSEM_FLAG_WAITERS)
1342                 rwsem_downgrade_wake(sem);
1343 }
1344
1345 #else /* !CONFIG_PREEMPT_RT */
1346
1347 #define RT_MUTEX_BUILD_MUTEX
1348 #include "rtmutex.c"
1349
1350 #define rwbase_set_and_save_current_state(state)        \
1351         set_current_state(state)
1352
1353 #define rwbase_restore_current_state()                  \
1354         __set_current_state(TASK_RUNNING)
1355
1356 #define rwbase_rtmutex_lock_state(rtm, state)           \
1357         __rt_mutex_lock(rtm, state)
1358
1359 #define rwbase_rtmutex_slowlock_locked(rtm, state)      \
1360         __rt_mutex_slowlock_locked(rtm, NULL, state)
1361
1362 #define rwbase_rtmutex_unlock(rtm)                      \
1363         __rt_mutex_unlock(rtm)
1364
1365 #define rwbase_rtmutex_trylock(rtm)                     \
1366         __rt_mutex_trylock(rtm)
1367
1368 #define rwbase_signal_pending_state(state, current)     \
1369         signal_pending_state(state, current)
1370
1371 #define rwbase_schedule()                               \
1372         schedule()
1373
1374 #include "rwbase_rt.c"
1375
1376 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1377                   struct lock_class_key *key)
1378 {
1379         init_rwbase_rt(&(sem)->rwbase);
1380
1381 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1382         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1383         lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1384 #endif
1385 }
1386 EXPORT_SYMBOL(__init_rwsem);
1387
1388 static inline void __down_read(struct rw_semaphore *sem)
1389 {
1390         rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1391 }
1392
1393 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1394 {
1395         return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1396 }
1397
1398 static inline int __down_read_killable(struct rw_semaphore *sem)
1399 {
1400         return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1401 }
1402
1403 static inline int __down_read_trylock(struct rw_semaphore *sem)
1404 {
1405         return rwbase_read_trylock(&sem->rwbase);
1406 }
1407
1408 static inline void __up_read(struct rw_semaphore *sem)
1409 {
1410         rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1411 }
1412
1413 static inline void __sched __down_write(struct rw_semaphore *sem)
1414 {
1415         rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1416 }
1417
1418 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1419 {
1420         return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1421 }
1422
1423 static inline int __down_write_trylock(struct rw_semaphore *sem)
1424 {
1425         return rwbase_write_trylock(&sem->rwbase);
1426 }
1427
1428 static inline void __up_write(struct rw_semaphore *sem)
1429 {
1430         rwbase_write_unlock(&sem->rwbase);
1431 }
1432
1433 static inline void __downgrade_write(struct rw_semaphore *sem)
1434 {
1435         rwbase_write_downgrade(&sem->rwbase);
1436 }
1437
1438 /* Debug stubs for the common API */
1439 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1440
1441 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1442                                             struct task_struct *owner)
1443 {
1444 }
1445
1446 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1447 {
1448         int count = atomic_read(&sem->rwbase.readers);
1449
1450         return count < 0 && count != READER_BIAS;
1451 }
1452
1453 #endif /* CONFIG_PREEMPT_RT */
1454
1455 /*
1456  * lock for reading
1457  */
1458 void __sched down_read(struct rw_semaphore *sem)
1459 {
1460         might_sleep();
1461         rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1462
1463         LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1464 }
1465 EXPORT_SYMBOL(down_read);
1466
1467 int __sched down_read_interruptible(struct rw_semaphore *sem)
1468 {
1469         might_sleep();
1470         rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1471
1472         if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1473                 rwsem_release(&sem->dep_map, _RET_IP_);
1474                 return -EINTR;
1475         }
1476
1477         return 0;
1478 }
1479 EXPORT_SYMBOL(down_read_interruptible);
1480
1481 int __sched down_read_killable(struct rw_semaphore *sem)
1482 {
1483         might_sleep();
1484         rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1485
1486         if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1487                 rwsem_release(&sem->dep_map, _RET_IP_);
1488                 return -EINTR;
1489         }
1490
1491         return 0;
1492 }
1493 EXPORT_SYMBOL(down_read_killable);
1494
1495 /*
1496  * trylock for reading -- returns 1 if successful, 0 if contention
1497  */
1498 int down_read_trylock(struct rw_semaphore *sem)
1499 {
1500         int ret = __down_read_trylock(sem);
1501
1502         if (ret == 1)
1503                 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1504         return ret;
1505 }
1506 EXPORT_SYMBOL(down_read_trylock);
1507
1508 /*
1509  * lock for writing
1510  */
1511 void __sched down_write(struct rw_semaphore *sem)
1512 {
1513         might_sleep();
1514         rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1515         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1516 }
1517 EXPORT_SYMBOL(down_write);
1518
1519 /*
1520  * lock for writing
1521  */
1522 int __sched down_write_killable(struct rw_semaphore *sem)
1523 {
1524         might_sleep();
1525         rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1526
1527         if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1528                                   __down_write_killable)) {
1529                 rwsem_release(&sem->dep_map, _RET_IP_);
1530                 return -EINTR;
1531         }
1532
1533         return 0;
1534 }
1535 EXPORT_SYMBOL(down_write_killable);
1536
1537 /*
1538  * trylock for writing -- returns 1 if successful, 0 if contention
1539  */
1540 int down_write_trylock(struct rw_semaphore *sem)
1541 {
1542         int ret = __down_write_trylock(sem);
1543
1544         if (ret == 1)
1545                 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1546
1547         return ret;
1548 }
1549 EXPORT_SYMBOL(down_write_trylock);
1550
1551 /*
1552  * release a read lock
1553  */
1554 void up_read(struct rw_semaphore *sem)
1555 {
1556         rwsem_release(&sem->dep_map, _RET_IP_);
1557         __up_read(sem);
1558 }
1559 EXPORT_SYMBOL(up_read);
1560
1561 /*
1562  * release a write lock
1563  */
1564 void up_write(struct rw_semaphore *sem)
1565 {
1566         rwsem_release(&sem->dep_map, _RET_IP_);
1567         __up_write(sem);
1568 }
1569 EXPORT_SYMBOL(up_write);
1570
1571 /*
1572  * downgrade write lock to read lock
1573  */
1574 void downgrade_write(struct rw_semaphore *sem)
1575 {
1576         lock_downgrade(&sem->dep_map, _RET_IP_);
1577         __downgrade_write(sem);
1578 }
1579 EXPORT_SYMBOL(downgrade_write);
1580
1581 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1582
1583 void down_read_nested(struct rw_semaphore *sem, int subclass)
1584 {
1585         might_sleep();
1586         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1587         LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1588 }
1589 EXPORT_SYMBOL(down_read_nested);
1590
1591 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1592 {
1593         might_sleep();
1594         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1595
1596         if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1597                 rwsem_release(&sem->dep_map, _RET_IP_);
1598                 return -EINTR;
1599         }
1600
1601         return 0;
1602 }
1603 EXPORT_SYMBOL(down_read_killable_nested);
1604
1605 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1606 {
1607         might_sleep();
1608         rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1609         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1610 }
1611 EXPORT_SYMBOL(_down_write_nest_lock);
1612
1613 void down_read_non_owner(struct rw_semaphore *sem)
1614 {
1615         might_sleep();
1616         __down_read(sem);
1617         __rwsem_set_reader_owned(sem, NULL);
1618 }
1619 EXPORT_SYMBOL(down_read_non_owner);
1620
1621 void down_write_nested(struct rw_semaphore *sem, int subclass)
1622 {
1623         might_sleep();
1624         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1625         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1626 }
1627 EXPORT_SYMBOL(down_write_nested);
1628
1629 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1630 {
1631         might_sleep();
1632         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1633
1634         if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1635                                   __down_write_killable)) {
1636                 rwsem_release(&sem->dep_map, _RET_IP_);
1637                 return -EINTR;
1638         }
1639
1640         return 0;
1641 }
1642 EXPORT_SYMBOL(down_write_killable_nested);
1643
1644 void up_read_non_owner(struct rw_semaphore *sem)
1645 {
1646         DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1647         __up_read(sem);
1648 }
1649 EXPORT_SYMBOL(up_read_non_owner);
1650
1651 #endif