locking/rwsem: Implement a new locking scheme
[linux-2.6-microblaze.git] / kernel / locking / rwsem-xadd.c
index 6b3ee99..7d537b5 100644 (file)
@@ -9,6 +9,8 @@
  *
  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
+ *
+ * Rwsem count bit fields re-definition by Waiman Long <longman@redhat.com>.
  */
 #include <linux/rwsem.h>
 #include <linux/init.h>
 #include "rwsem.h"
 
 /*
- * Guide to the rw_semaphore's count field for common values.
- * (32-bit case illustrated, similar for 64-bit)
- *
- * 0x0000000X  (1) X readers active or attempting lock, no writer waiting
- *                 X = #active_readers + #readers attempting to lock
- *                 (X*ACTIVE_BIAS)
- *
- * 0x00000000  rwsem is unlocked, and no one is waiting for the lock or
- *             attempting to read lock or write lock.
- *
- * 0xffff000X  (1) X readers active or attempting lock, with waiters for lock
- *                 X = #active readers + # readers attempting lock
- *                 (X*ACTIVE_BIAS + WAITING_BIAS)
- *             (2) 1 writer attempting lock, no waiters for lock
- *                 X-1 = #active readers + #readers attempting lock
- *                 ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
- *             (3) 1 writer active, no waiters for lock
- *                 X-1 = #active readers + #readers attempting lock
- *                 ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
- *
- * 0xffff0001  (1) 1 reader active or attempting lock, waiters for lock
- *                 (WAITING_BIAS + ACTIVE_BIAS)
- *             (2) 1 writer active or attempting lock, no waiters for lock
- *                 (ACTIVE_WRITE_BIAS)
+ * Guide to the rw_semaphore's count field.
  *
- * 0xffff0000  (1) There are writers or readers queued but none active
- *                 or in the process of attempting lock.
- *                 (WAITING_BIAS)
- *             Note: writer can attempt to steal lock for this count by adding
- *             ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
+ * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
+ * by a writer.
  *
- * 0xfffe0001  (1) 1 writer active, or attempting lock. Waiters on queue.
- *                 (ACTIVE_WRITE_BIAS + WAITING_BIAS)
- *
- * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
- *      the count becomes more than 0 for successful lock acquisition,
- *      i.e. the case where there are only readers or nobody has lock.
- *      (1st and 2nd case above).
- *
- *      Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
- *      checking the count becomes ACTIVE_WRITE_BIAS for successful lock
- *      acquisition (i.e. nobody else has lock or attempts lock).  If
- *      unsuccessful, in rwsem_down_write_failed, we'll check to see if there
- *      are only waiters but none active (5th case above), and attempt to
- *      steal the lock.
+ * The lock is owned by readers when
+ * (1) the RWSEM_WRITER_LOCKED isn't set in count,
+ * (2) some of the reader bits are set in count, and
+ * (3) the owner field has RWSEM_READ_OWNED bit set.
  *
+ * Having some reader bits set is not enough to guarantee a readers owned
+ * lock as the readers may be in the process of backing out from the count
+ * and a writer has just released the lock. So another writer may steal
+ * the lock immediately after that.
  */
 
 /*
@@ -86,8 +56,8 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        sem->owner = NULL;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        osq_lock_init(&sem->osq);
 #endif
 }
@@ -113,9 +83,8 @@ enum rwsem_wake_type {
 
 /*
  * handle the lock release when processes blocked on it that can now run
- * - if we come here from up_xxxx(), then:
- *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
- *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
+ * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
+ *   have been set.
  * - there must be someone on the queue
  * - the wait_lock must be held by the caller
  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
@@ -130,6 +99,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
 {
        struct rwsem_waiter *waiter, *tmp;
        long oldcount, woken = 0, adjustment = 0;
+       struct list_head wlist;
 
        /*
         * Take a peek at the queue head waiter such that we can determine
@@ -159,22 +129,11 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
         * so we can bail out early if a writer stole the lock.
         */
        if (wake_type != RWSEM_WAKE_READ_OWNED) {
-               adjustment = RWSEM_ACTIVE_READ_BIAS;
- try_reader_grant:
+               adjustment = RWSEM_READER_BIAS;
                oldcount = atomic_long_fetch_add(adjustment, &sem->count);
-               if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
-                       /*
-                        * If the count is still less than RWSEM_WAITING_BIAS
-                        * after removing the adjustment, it is assumed that
-                        * a writer has stolen the lock. We have to undo our
-                        * reader grant.
-                        */
-                       if (atomic_long_add_return(-adjustment, &sem->count) <
-                           RWSEM_WAITING_BIAS)
-                               return;
-
-                       /* Last active locker left. Retry waking readers. */
-                       goto try_reader_grant;
+               if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
+                       atomic_long_sub(adjustment, &sem->count);
+                       return;
                }
                /*
                 * Set it to reader-owned to give spinners an early
@@ -188,18 +147,43 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
         * of the queue. We know that woken will be at least 1 as we accounted
         * for above. Note we increment the 'active part' of the count by the
         * number of readers before waking any processes up.
+        *
+        * We have to do wakeup in 2 passes to prevent the possibility that
+        * the reader count may be decremented before it is incremented. It
+        * is because the to-be-woken waiter may not have slept yet. So it
+        * may see waiter->task got cleared, finish its critical section and
+        * do an unlock before the reader count increment.
+        *
+        * 1) Collect the read-waiters in a separate list, count them and
+        *    fully increment the reader count in rwsem.
+        * 2) For each waiters in the new list, clear waiter->task and
+        *    put them into wake_q to be woken up later.
         */
-       list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
-               struct task_struct *tsk;
-
+       list_for_each_entry(waiter, &sem->wait_list, list) {
                if (waiter->type == RWSEM_WAITING_FOR_WRITE)
                        break;
 
                woken++;
-               tsk = waiter->task;
+       }
+       list_cut_before(&wlist, &sem->wait_list, &waiter->list);
+
+       adjustment = woken * RWSEM_READER_BIAS - adjustment;
+       lockevent_cond_inc(rwsem_wake_reader, woken);
+       if (list_empty(&sem->wait_list)) {
+               /* hit end of list above */
+               adjustment -= RWSEM_FLAG_WAITERS;
+       }
+
+       if (adjustment)
+               atomic_long_add(adjustment, &sem->count);
+
+       /* 2nd pass */
+       list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+               struct task_struct *tsk;
 
+               tsk = waiter->task;
                get_task_struct(tsk);
-               list_del(&waiter->list);
+
                /*
                 * Ensure calling get_task_struct() before setting the reader
                 * waiter to nil such that rwsem_down_read_failed() cannot
@@ -213,16 +197,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                 */
                wake_q_add_safe(wake_q, tsk);
        }
-
-       adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
-       lockevent_cond_inc(rwsem_wake_reader, woken);
-       if (list_empty(&sem->wait_list)) {
-               /* hit end of list above */
-               adjustment -= RWSEM_WAITING_BIAS;
-       }
-
-       if (adjustment)
-               atomic_long_add(adjustment, &sem->count);
 }
 
 /*
@@ -232,22 +206,15 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
  */
 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 {
-       /*
-        * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
-        */
-       if (count != RWSEM_WAITING_BIAS)
+       long new;
+
+       if (count & RWSEM_LOCK_MASK)
                return false;
 
-       /*
-        * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
-        * are other tasks on the wait list, we need to add on WAITING_BIAS.
-        */
-       count = list_is_singular(&sem->wait_list) ?
-                       RWSEM_ACTIVE_WRITE_BIAS :
-                       RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
+       new = count + RWSEM_WRITER_LOCKED -
+            (list_is_singular(&sem->wait_list) ? RWSEM_FLAG_WAITERS : 0);
 
-       if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
-                                                       == RWSEM_WAITING_BIAS) {
+       if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)) {
                rwsem_set_owner(sem);
                return true;
        }
@@ -263,9 +230,9 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 {
        long count = atomic_long_read(&sem->count);
 
-       while (!count || count == RWSEM_WAITING_BIAS) {
+       while (!(count & RWSEM_LOCK_MASK)) {
                if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
-                                       count + RWSEM_ACTIVE_WRITE_BIAS)) {
+                                       count + RWSEM_WRITER_LOCKED)) {
                        rwsem_set_owner(sem);
                        lockevent_inc(rwsem_opt_wlock);
                        return true;
@@ -395,25 +362,11 @@ done:
        lockevent_cond_inc(rwsem_opt_fail, !taken);
        return taken;
 }
-
-/*
- * Return true if the rwsem has active spinner
- */
-static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
-{
-       return osq_is_locked(&sem->osq);
-}
-
 #else
 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 {
        return false;
 }
-
-static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
-{
-       return false;
-}
 #endif
 
 /*
@@ -422,7 +375,7 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
 static inline struct rw_semaphore __sched *
 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 {
-       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
+       long count, adjustment = -RWSEM_READER_BIAS;
        struct rwsem_waiter waiter;
        DEFINE_WAKE_Q(wake_q);
 
@@ -434,16 +387,16 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
                /*
                 * In case the wait queue is empty and the lock isn't owned
                 * by a writer, this reader can exit the slowpath and return
-                * immediately as its RWSEM_ACTIVE_READ_BIAS has already
-                * been set in the count.
+                * immediately as its RWSEM_READER_BIAS has already been
+                * set in the count.
                 */
-               if (atomic_long_read(&sem->count) >= 0) {
+               if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
                        raw_spin_unlock_irq(&sem->wait_lock);
                        rwsem_set_reader_owned(sem);
                        lockevent_inc(rwsem_rlock_fast);
                        return sem;
                }
-               adjustment += RWSEM_WAITING_BIAS;
+               adjustment += RWSEM_FLAG_WAITERS;
        }
        list_add_tail(&waiter.list, &sem->wait_list);
 
@@ -456,9 +409,8 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
         * If there are no writers and we are first in the queue,
         * wake our own waiter to join the existing active readers !
         */
-       if (count == RWSEM_WAITING_BIAS ||
-           (count > RWSEM_WAITING_BIAS &&
-            adjustment != -RWSEM_ACTIVE_READ_BIAS))
+       if (!(count & RWSEM_LOCK_MASK) ||
+          (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS)))
                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
        raw_spin_unlock_irq(&sem->wait_lock);
@@ -486,7 +438,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 out_nolock:
        list_del(&waiter.list);
        if (list_empty(&sem->wait_list))
-               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+               atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
        raw_spin_unlock_irq(&sem->wait_lock);
        __set_current_state(TASK_RUNNING);
        lockevent_inc(rwsem_rlock_fail);
@@ -519,9 +471,6 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
        struct rw_semaphore *ret = sem;
        DEFINE_WAKE_Q(wake_q);
 
-       /* undo write bias from down_write operation, stop active locking */
-       count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
-
        /* do optimistic spinning and steal lock if possible */
        if (rwsem_optimistic_spin(sem))
                return sem;
@@ -541,16 +490,18 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 
        list_add_tail(&waiter.list, &sem->wait_list);
 
-       /* we're now waiting on the lock, but no longer actively locking */
+       /* we're now waiting on the lock */
        if (waiting) {
                count = atomic_long_read(&sem->count);
 
                /*
                 * If there were already threads queued before us and there are
-                * no active writers, the lock must be read owned; so we try to
-                * wake any read locks that were queued ahead of us.
+                * no active writers and some readers, the lock must be read
+                * owned; so we try to  any read locks that were queued ahead
+                * of us.
                 */
-               if (count > RWSEM_WAITING_BIAS) {
+               if (!(count & RWSEM_WRITER_MASK) &&
+                    (count & RWSEM_READER_MASK)) {
                        __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
                        /*
                         * The wakeup is normally called _after_ the wait_lock
@@ -567,8 +518,9 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
                        wake_q_init(&wake_q);
                }
 
-       } else
-               count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
+       } else {
+               count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count);
+       }
 
        /* wait until we successfully acquire the lock */
        set_current_state(state);
@@ -585,7 +537,8 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
                        schedule();
                        lockevent_inc(rwsem_sleep_writer);
                        set_current_state(state);
-               } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
+                       count = atomic_long_read(&sem->count);
+               } while (count & RWSEM_LOCK_MASK);
 
                raw_spin_lock_irq(&sem->wait_lock);
        }
@@ -601,7 +554,7 @@ out_nolock:
        raw_spin_lock_irq(&sem->wait_lock);
        list_del(&waiter.list);
        if (list_empty(&sem->wait_list))
-               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+               atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
        else
                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
        raw_spin_unlock_irq(&sem->wait_lock);
@@ -635,65 +588,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
 
-       /*
-       * __rwsem_down_write_failed_common(sem)
-       *   rwsem_optimistic_spin(sem)
-       *     osq_unlock(sem->osq)
-       *   ...
-       *   atomic_long_add_return(&sem->count)
-       *
-       *      - VS -
-       *
-       *              __up_write()
-       *                if (atomic_long_sub_return_release(&sem->count) < 0)
-       *                  rwsem_wake(sem)
-       *                    osq_is_locked(&sem->osq)
-       *
-       * And __up_write() must observe !osq_is_locked() when it observes the
-       * atomic_long_add_return() in order to not miss a wakeup.
-       *
-       * This boils down to:
-       *
-       * [S.rel] X = 1                [RmW] r0 = (Y += 0)
-       *         MB                         RMB
-       * [RmW]   Y += 1               [L]   r1 = X
-       *
-       * exists (r0=1 /\ r1=0)
-       */
-       smp_rmb();
-
-       /*
-        * If a spinner is present, it is not necessary to do the wakeup.
-        * Try to do wakeup only if the trylock succeeds to minimize
-        * spinlock contention which may introduce too much delay in the
-        * unlock operation.
-        *
-        *    spinning writer           up_write/up_read caller
-        *    ---------------           -----------------------
-        * [S]   osq_unlock()           [L]   osq
-        *       MB                           RMB
-        * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
-        *
-        * Here, it is important to make sure that there won't be a missed
-        * wakeup while the rwsem is free and the only spinning writer goes
-        * to sleep without taking the rwsem. Even when the spinning writer
-        * is just going to break out of the waiting loop, it will still do
-        * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
-        * rwsem_has_spinner() is true, it will guarantee at least one
-        * trylock attempt on the rwsem later on.
-        */
-       if (rwsem_has_spinner(sem)) {
-               /*
-                * The smp_rmb() here is to make sure that the spinner
-                * state is consulted before reading the wait_lock.
-                */
-               smp_rmb();
-               if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
-                       return sem;
-               goto locked;
-       }
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
-locked:
 
        if (!list_empty(&sem->wait_list))
                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);