powerpc/spinlock: Define smp_mb__after_spinlock only once
authorDavidlohr Bueso <dave@stgolabs.net>
Tue, 9 Mar 2021 01:59:48 +0000 (17:59 -0800)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 26 Mar 2021 12:19:43 +0000 (23:19 +1100)
Instead of both queued and simple spinlocks doing it. Move
it into the arch's spinlock.h.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210309015950.27688-2-dave@stgolabs.net
arch/powerpc/include/asm/qspinlock.h
arch/powerpc/include/asm/simple_spinlock.h
arch/powerpc/include/asm/spinlock.h

index b752d34..3ce1a0b 100644 (file)
@@ -44,8 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
 }
 #define queued_spin_lock queued_spin_lock
 
-#define smp_mb__after_spinlock()   smp_mb()
-
 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 {
        /*
index 5b862de..da5d40c 100644 (file)
@@ -282,7 +282,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  rw_yield(lock)
 #define arch_write_relax(lock) rw_yield(lock)
 
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock()   smp_mb()
-
 #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
index 6ec7228..bd75872 100644 (file)
@@ -10,6 +10,9 @@
 #include <asm/simple_spinlock.h>
 #endif
 
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock()       smp_mb()
+
 #ifndef CONFIG_PARAVIRT_SPINLOCKS
 static inline void pv_spinlocks_init(void) { }
 #endif