ARC: atomic_cmpxchg/atomic_xchg: implement relaxed variants
authorVineet Gupta <vgupta@kernel.org>
Mon, 11 May 2020 20:27:23 +0000 (13:27 -0700)
committerVineet Gupta <vgupta@kernel.org>
Tue, 24 Aug 2021 21:25:47 +0000 (14:25 -0700)
And move them out of cmpxchg.h to canonical atomic.h

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
arch/arc/include/asm/atomic.h
arch/arc/include/asm/cmpxchg.h

index ee88e1d..52ee51e 100644 (file)
 #include <asm/atomic-spinlock.h>
 #endif
 
+#define arch_atomic_cmpxchg(v, o, n)                                   \
+({                                                                     \
+       arch_cmpxchg(&((v)->counter), (o), (n));                        \
+})
+
+#ifdef arch_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed(v, o, n)                           \
+({                                                                     \
+       arch_cmpxchg_relaxed(&((v)->counter), (o), (n));                \
+})
+#endif
+
+#define arch_atomic_xchg(v, n)                                         \
+({                                                                     \
+       arch_xchg(&((v)->counter), (n));                                \
+})
+
+#ifdef arch_xchg_relaxed
+#define arch_atomic_xchg_relaxed(v, n)                                 \
+({                                                                     \
+       arch_xchg_relaxed(&((v)->counter), (n));                        \
+})
+#endif
+
+/*
+ * 64-bit atomics
+ */
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #else
index e2ae0eb..c5b544a 100644 (file)
 
 #endif
 
-/*
- * atomic_cmpxchg is same as cmpxchg
- *   LLSC: only different in data-type, semantics are exactly same
- *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
- *         semantics, and this lock also happens to be used by atomic_*()
- */
-#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
-
 /*
  * xchg
  */
 
 #endif
 
-/*
- * "atomic" variant of xchg()
- * REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following definition
- * is incorrect. But here's the rationale:
- *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
- *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
- *         is natively "SMP safe", no serialization required).
- *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
- *         could clobber them. atomic_xchg() itself would be 1 insn, so it
- *         can't be clobbered by others. Thus no serialization required when
- *         atomic_xchg is involved.
- */
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 #endif