locking/spinlocks: Remove an instruction from spin and write locks
authorMatthew Wilcox <willy@infradead.org>
Mon, 20 Aug 2018 14:19:14 +0000 (10:19 -0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 2 Oct 2018 07:49:42 +0000 (09:49 +0200)
Both spin locks and write locks currently do:

 f0 0f b1 17             lock cmpxchg %edx,(%rdi)
 85 c0                   test   %eax,%eax
 75 05                   jne    [slowpath]

This 'test' insn is superfluous; the cmpxchg insn sets the Z flag
appropriately.  Peter pointed out that using atomic_try_cmpxchg_acquire()
will let the compiler know this is true.  Comparing before/after
disassemblies show the only effect is to remove this insn.

Take this opportunity to make the spin & write lock code resemble each
other more closely and have similar likely() hints.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Link: http://lkml.kernel.org/r/20180820162639.GC25153@bombadil.infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/asm-generic/qrwlock.h
include/asm-generic/qspinlock.h

index 0f7062b..36254d2 100644 (file)
@@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
        if (unlikely(cnts))
                return 0;
 
-       return likely(atomic_cmpxchg_acquire(&lock->cnts,
-                                            cnts, cnts | _QW_LOCKED) == cnts);
+       return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+                               _QW_LOCKED));
 }
 /**
  * queued_read_lock - acquire read lock of a queue rwlock
@@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)
  */
 static inline void queued_write_lock(struct qrwlock *lock)
 {
+       u32 cnts = 0;
        /* Optimize for the unfair lock case where the fair flag is 0. */
-       if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+       if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
                return;
 
        queued_write_lock_slowpath(lock);
index 9cc4575..7541fa7 100644 (file)
@@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
  */
 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 {
-       if (!atomic_read(&lock->val) &&
-          (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
-               return 1;
-       return 0;
+       u32 val = atomic_read(&lock->val);
+
+       if (unlikely(val))
+               return 0;
+
+       return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 }
 
 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
@@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  */
 static __always_inline void queued_spin_lock(struct qspinlock *lock)
 {
-       u32 val;
+       u32 val = 0;
 
-       val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
-       if (likely(val == 0))
+       if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
                return;
+
        queued_spin_lock_slowpath(lock, val);
 }