1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 #ifndef __ASM_ARC_CMPXCHG_H
7 #define __ASM_ARC_CMPXCHG_H
9 #include <linux/build_bug.h>
10 #include <linux/types.h>
12 #include <asm/barrier.h>
15 #ifdef CONFIG_ARC_HAS_LLSC
21 #define __cmpxchg(ptr, old, new) \
23 __typeof__(*(ptr)) _prev; \
25 __asm__ __volatile__( \
26 "1: llock %0, [%1] \n" \
27 " brne %0, %2, 2f \n" \
28 " scond %3, [%1] \n" \
31 : "=&r"(_prev) /* Early clobber prevent reg reuse */ \
32 : "r"(ptr), /* Not "m": llock only supports reg */ \
34 "r"(new) /* Not "ir": scond can't take LIMM */ \
36 "memory"); /* gcc knows memory is clobbered */ \
41 #define arch_cmpxchg_relaxed(ptr, old, new) \
43 __typeof__(ptr) _p_ = (ptr); \
44 __typeof__(*(ptr)) _o_ = (old); \
45 __typeof__(*(ptr)) _n_ = (new); \
46 __typeof__(*(ptr)) _prev_; \
48 switch(sizeof((_p_))) { \
50 _prev_ = __cmpxchg(_p_, _o_, _n_); \
60 #define arch_cmpxchg(ptr, old, new) \
62 volatile __typeof__(ptr) _p_ = (ptr); \
63 __typeof__(*(ptr)) _o_ = (old); \
64 __typeof__(*(ptr)) _n_ = (new); \
65 __typeof__(*(ptr)) _prev_; \
66 unsigned long __flags; \
68 BUILD_BUG_ON(sizeof(_p_) != 4); \
71 * spin lock/unlock provide the needed smp_mb() before/after \
73 atomic_ops_lock(__flags); \
77 atomic_ops_unlock(__flags); \
84 * atomic_cmpxchg is same as cmpxchg
85 * LLSC: only different in data-type, semantics are exactly same
86 * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
87 * semantics, and this lock also happens to be used by atomic_*()
89 #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
94 #ifdef CONFIG_ARC_HAS_LLSC
96 #define __xchg(ptr, val) \
98 __asm__ __volatile__( \
99 " ex %0, [%1] \n" /* set new value */ \
103 _val_; /* get old value */ \
106 #define arch_xchg_relaxed(ptr, val) \
108 __typeof__(ptr) _p_ = (ptr); \
109 __typeof__(*(ptr)) _val_ = (val); \
111 switch(sizeof(*(_p_))) { \
113 _val_ = __xchg(_p_, _val_); \
121 #else /* !CONFIG_ARC_HAS_LLSC */
124 * EX instructions is baseline and present in !LLSC too. But in this
125 * regime it still needs use @atomic_ops_lock spinlock to allow interop
126 * with cmpxchg() which uses spinlock in !LLSC
127 * (llist.h use xchg and cmpxchg on sama data)
130 #define arch_xchg(ptr, val) \
132 __typeof__(ptr) _p_ = (ptr); \
133 __typeof__(*(ptr)) _val_ = (val); \
135 unsigned long __flags; \
137 atomic_ops_lock(__flags); \
139 __asm__ __volatile__( \
145 atomic_ops_unlock(__flags); \
152 * "atomic" variant of xchg()
153 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
154 * Since xchg() doesn't always do that, it would seem that following definition
155 * is incorrect. But here's the rationale:
156 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
157 * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
158 * is natively "SMP safe", no serialization required).
159 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
160 * could clobber them. atomic_xchg() itself would be 1 insn, so it
161 * can't be clobbered by others. Thus no serialization required when
162 * atomic_xchg is involved.
164 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))