1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 struct list_head list;
13 struct task_struct *task;
15 #define RWSEM_WAITING_FOR_READ 0x00000001
16 #define RWSEM_WAITING_FOR_WRITE 0x00000002
20 * handle the lock release when processes blocked on it that can now run
21 * - if we come here from up_xxxx(), then:
22 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
23 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
24 * - there must be someone on the queue
25 * - the spinlock must be held by the caller
26 * - woken process blocks are discarded from the list after having task zeroed
27 * - writers are only woken if downgrading is false
29 static inline struct rw_semaphore *
30 __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
32 struct rwsem_waiter *waiter;
33 struct task_struct *tsk;
34 struct list_head *next;
35 signed long oldcount, woken, loop;
38 goto dont_wake_writers;
40 /* if we came through an up_xxxx() call, we only only wake someone up
41 * if we can transition the active part of the count from 0 -> 1
44 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
46 if (oldcount & RWSEM_ACTIVE_MASK)
49 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
51 /* try to grant a single write lock if there's a writer at the front
52 * of the queue - note we leave the 'active part' of the count
53 * incremented by 1 and the waiting part incremented by 0x00010000
55 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
58 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
59 * It is an allocated on the waiter's stack and may become invalid at
60 * any time after that point (due to a wakeup from another source).
62 list_del(&waiter->list);
70 /* don't want to wake any writers */
72 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
73 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
76 /* grant an infinite number of read locks to the readers at the front
78 * - note we increment the 'active part' of the count by the number of
79 * readers before waking any processes up
86 if (waiter->list.next == &sem->wait_list)
89 waiter = list_entry(waiter->list.next,
90 struct rwsem_waiter, list);
92 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
95 woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
97 /* we'd already done one increment earlier */
98 woken -= RWSEM_ACTIVE_BIAS;
100 rwsem_atomic_add(woken, sem);
102 next = sem->wait_list.next;
103 for (; loop > 0; loop--) {
104 waiter = list_entry(next, struct rwsem_waiter, list);
105 next = waiter->list.next;
109 wake_up_process(tsk);
110 put_task_struct(tsk);
113 sem->wait_list.next = next;
114 next->prev = &sem->wait_list;
119 /* undo the change to count, but check for a transition 1->0 */
121 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
127 * wait for a lock to be granted
129 static inline struct rw_semaphore *
130 rwsem_down_failed_common(struct rw_semaphore *sem,
131 struct rwsem_waiter *waiter, signed long adjustment)
133 struct task_struct *tsk = current;
136 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
138 /* set up my own style of waitqueue */
139 spin_lock_irq(&sem->wait_lock);
141 get_task_struct(tsk);
143 list_add_tail(&waiter->list, &sem->wait_list);
145 /* we're now waiting on the lock, but no longer actively read-locking */
146 count = rwsem_atomic_update(adjustment, sem);
148 /* if there are no active locks, wake the front queued process(es) up */
149 if (!(count & RWSEM_ACTIVE_MASK))
150 sem = __rwsem_do_wake(sem, 0);
152 spin_unlock_irq(&sem->wait_lock);
154 /* wait to be given the lock */
159 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
162 tsk->state = TASK_RUNNING;
168 * wait for the read lock to be granted
170 struct rw_semaphore fastcall __sched *
171 rwsem_down_read_failed(struct rw_semaphore *sem)
173 struct rwsem_waiter waiter;
175 waiter.flags = RWSEM_WAITING_FOR_READ;
176 rwsem_down_failed_common(sem, &waiter,
177 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
182 * wait for the write lock to be granted
184 struct rw_semaphore fastcall __sched *
185 rwsem_down_write_failed(struct rw_semaphore *sem)
187 struct rwsem_waiter waiter;
189 waiter.flags = RWSEM_WAITING_FOR_WRITE;
190 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
196 * handle waking up a waiter on the semaphore
197 * - up_read/up_write has decremented the active part of count if we come here
199 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
203 spin_lock_irqsave(&sem->wait_lock, flags);
205 /* do nothing if list empty */
206 if (!list_empty(&sem->wait_list))
207 sem = __rwsem_do_wake(sem, 0);
209 spin_unlock_irqrestore(&sem->wait_lock, flags);
215 * downgrade a write lock into a read lock
216 * - caller incremented waiting part of count and discovered it still negative
217 * - just wake up any readers at the front of the queue
219 struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
223 spin_lock_irqsave(&sem->wait_lock, flags);
225 /* do nothing if list empty */
226 if (!list_empty(&sem->wait_list))
227 sem = __rwsem_do_wake(sem, 1);
229 spin_unlock_irqrestore(&sem->wait_lock, flags);
234 EXPORT_SYMBOL(rwsem_down_read_failed);
235 EXPORT_SYMBOL(rwsem_down_write_failed);
236 EXPORT_SYMBOL(rwsem_wake);
237 EXPORT_SYMBOL(rwsem_downgrade_wake);