1 // SPDX-License-Identifier: GPL-2.0-only
4 * RT-specific reader/writer semaphores and reader/writer locks
6 * down_write/write_lock()
8 * 2) Remove the reader BIAS to force readers into the slow path
9 * 3) Wait until all readers have left the critical section
10 * 4) Mark it write locked
12 * up_write/write_unlock()
13 * 1) Remove the write locked marker
14 * 2) Set the reader BIAS, so readers can use the fast path again
15 * 3) Unlock rtmutex, to release blocked readers
17 * down_read/read_lock()
18 * 1) Try fast path acquisition (reader BIAS is set)
19 * 2) Take tmutex::wait_lock, which protects the writelocked flag
20 * 3) If !writelocked, acquire it for read
21 * 4) If writelocked, block on tmutex
22 * 5) unlock rtmutex, goto 1)
24 * up_read/read_unlock()
25 * 1) Try fast path release (reader count != 1)
26 * 2) Wake the writer waiting in down_write()/write_lock() #3
28 * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29 * locks on RT are not writer fair, but writers, which should be avoided in
30 * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31 * inheritance mechanism.
33 * It's possible to make the rw primitives writer fair by keeping a list of
34 * active readers. A blocked writer would force all newly incoming readers
35 * to block on the rtmutex, but the rtmutex would have to be proxy locked
36 * for one reader after the other. We can't use multi-reader inheritance
37 * because there is no way to support that with SCHED_DEADLINE.
38 * Implementing the one by one reader boosting/handover mechanism is a
39 * major surgery for a very dubious value.
41 * The risk of writer starvation is there, but the pathological use cases
42 * which trigger it are not necessarily the typical RT workloads.
44 * Common code shared between RT rw_semaphore and rwlock
47 static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
52 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
55 for (r = atomic_read(&rwb->readers); r < 0;) {
56 if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
62 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
65 struct rt_mutex_base *rtm = &rwb->rtmutex;
68 raw_spin_lock_irq(&rtm->wait_lock);
70 * Allow readers, as long as the writer has not completely
71 * acquired the semaphore for write.
73 if (atomic_read(&rwb->readers) != WRITER_BIAS) {
74 atomic_inc(&rwb->readers);
75 raw_spin_unlock_irq(&rtm->wait_lock);
80 * Call into the slow lock path with the rtmutex->wait_lock
81 * held, so this can't result in the following race:
83 * Reader1 Reader2 Writer
89 * unlock(m->wait_lock)
93 * sem->writelocked=true
94 * unlock(m->wait_lock)
97 * sem->writelocked=false
105 * That would put Reader1 behind the writer waiting on
106 * Reader2 to call up_read(), which might be unbound.
110 * For rwlocks this returns 0 unconditionally, so the below
111 * !ret conditionals are optimized out.
113 ret = rwbase_rtmutex_slowlock_locked(rtm, state);
116 * On success the rtmutex is held, so there can't be a writer
117 * active. Increment the reader count and immediately drop the
120 * rtmutex->wait_lock has to be unlocked in any case of course.
123 atomic_inc(&rwb->readers);
124 raw_spin_unlock_irq(&rtm->wait_lock);
126 rwbase_rtmutex_unlock(rtm);
130 static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
133 if (rwbase_read_trylock(rwb))
136 return __rwbase_read_lock(rwb, state);
139 static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
142 struct rt_mutex_base *rtm = &rwb->rtmutex;
143 struct task_struct *owner;
145 raw_spin_lock_irq(&rtm->wait_lock);
147 * Wake the writer, i.e. the rtmutex owner. It might release the
148 * rtmutex concurrently in the fast path (due to a signal), but to
149 * clean up rwb->readers it needs to acquire rtm->wait_lock. The
150 * worst case which can happen is a spurious wakeup.
152 owner = rt_mutex_owner(rtm);
154 wake_up_state(owner, state);
156 raw_spin_unlock_irq(&rtm->wait_lock);
159 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
163 * rwb->readers can only hit 0 when a writer is waiting for the
164 * active readers to leave the critical section.
166 if (unlikely(atomic_dec_and_test(&rwb->readers)))
167 __rwbase_read_unlock(rwb, state);
170 static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
173 struct rt_mutex_base *rtm = &rwb->rtmutex;
175 atomic_add(READER_BIAS - bias, &rwb->readers);
176 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
177 rwbase_rtmutex_unlock(rtm);
180 static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
182 struct rt_mutex_base *rtm = &rwb->rtmutex;
185 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
186 __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
189 static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
191 struct rt_mutex_base *rtm = &rwb->rtmutex;
194 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
195 /* Release it and account current as reader */
196 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
199 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
202 struct rt_mutex_base *rtm = &rwb->rtmutex;
205 /* Take the rtmutex as a first step */
206 if (rwbase_rtmutex_lock_state(rtm, state))
209 /* Force readers into slow path */
210 atomic_sub(READER_BIAS, &rwb->readers);
212 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
214 * set_current_state() for rw_semaphore
215 * current_save_and_set_rtlock_wait_state() for rwlock
217 rwbase_set_and_save_current_state(state);
219 /* Block until all readers have left the critical section. */
220 for (; atomic_read(&rwb->readers);) {
221 /* Optimized out for rwlocks */
222 if (rwbase_signal_pending_state(state, current)) {
223 __set_current_state(TASK_RUNNING);
224 __rwbase_write_unlock(rwb, 0, flags);
227 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
230 * Schedule and wait for the readers to leave the critical
231 * section. The last reader leaving it wakes the waiter.
233 if (atomic_read(&rwb->readers) != 0)
235 set_current_state(state);
236 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
239 atomic_set(&rwb->readers, WRITER_BIAS);
240 rwbase_restore_current_state();
241 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
245 static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
247 struct rt_mutex_base *rtm = &rwb->rtmutex;
250 if (!rwbase_rtmutex_trylock(rtm))
253 atomic_sub(READER_BIAS, &rwb->readers);
255 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
256 if (!atomic_read(&rwb->readers)) {
257 atomic_set(&rwb->readers, WRITER_BIAS);
258 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
261 __rwbase_write_unlock(rwb, 0, flags);