1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc...
13 * But use these as seldom as possible since they are much slower
14 * than regular operations.
18 * To ensure dependency ordering is preserved for the _relaxed and
19 * _release atomics, an smp_mb() is unconditionally inserted into the
20 * _relaxed variants, which are used to build the barriered versions.
21 * Avoid redundant back-to-back fences in the _acquire and _fence
24 #define __atomic_acquire_fence()
25 #define __atomic_post_full_fence()
27 #define ATOMIC64_INIT(i) { (i) }
29 #define arch_atomic_read(v) READ_ONCE((v)->counter)
30 #define arch_atomic64_read(v) READ_ONCE((v)->counter)
32 #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
33 #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
36 * To get proper branch prediction for the main line, we must branch
37 * forward to code at the end of this object's .text section, then
38 * branch back to restart the operation.
41 #define ATOMIC_OP(op, asm_op) \
42 static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
45 __asm__ __volatile__( \
47 " " #asm_op " %0,%2,%0\n" \
53 :"=&r" (temp), "=m" (v->counter) \
54 :"Ir" (i), "m" (v->counter)); \
57 #define ATOMIC_OP_RETURN(op, asm_op) \
58 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
61 __asm__ __volatile__( \
63 " " #asm_op " %0,%3,%2\n" \
64 " " #asm_op " %0,%3,%0\n" \
70 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
71 :"Ir" (i), "m" (v->counter) : "memory"); \
76 #define ATOMIC_FETCH_OP(op, asm_op) \
77 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
80 __asm__ __volatile__( \
82 " " #asm_op " %2,%3,%0\n" \
88 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
89 :"Ir" (i), "m" (v->counter) : "memory"); \
94 #define ATOMIC64_OP(op, asm_op) \
95 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
98 __asm__ __volatile__( \
100 " " #asm_op " %0,%2,%0\n" \
106 :"=&r" (temp), "=m" (v->counter) \
107 :"Ir" (i), "m" (v->counter)); \
110 #define ATOMIC64_OP_RETURN(op, asm_op) \
111 static __inline__ s64 \
112 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
115 __asm__ __volatile__( \
117 " " #asm_op " %0,%3,%2\n" \
118 " " #asm_op " %0,%3,%0\n" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
130 #define ATOMIC64_FETCH_OP(op, asm_op) \
131 static __inline__ s64 \
132 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
135 __asm__ __volatile__( \
137 " " #asm_op " %2,%3,%0\n" \
143 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
144 :"Ir" (i), "m" (v->counter) : "memory"); \
149 #define ATOMIC_OPS(op) \
150 ATOMIC_OP(op, op##l) \
151 ATOMIC_OP_RETURN(op, op##l) \
152 ATOMIC_FETCH_OP(op, op##l) \
153 ATOMIC64_OP(op, op##q) \
154 ATOMIC64_OP_RETURN(op, op##q) \
155 ATOMIC64_FETCH_OP(op, op##q)
160 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
161 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
162 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
163 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
165 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
166 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
167 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
168 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
170 #define arch_atomic_andnot arch_atomic_andnot
171 #define arch_atomic64_andnot arch_atomic64_andnot
174 #define ATOMIC_OPS(op, asm) \
176 ATOMIC_FETCH_OP(op, asm) \
177 ATOMIC64_OP(op, asm) \
178 ATOMIC64_FETCH_OP(op, asm)
181 ATOMIC_OPS(andnot, bic)
185 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
186 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
187 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
188 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
190 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
192 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
193 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
196 #undef ATOMIC64_FETCH_OP
197 #undef ATOMIC64_OP_RETURN
199 #undef ATOMIC_FETCH_OP
200 #undef ATOMIC_OP_RETURN
203 #define arch_atomic64_cmpxchg(v, old, new) \
204 (arch_cmpxchg(&((v)->counter), old, new))
205 #define arch_atomic64_xchg(v, new) \
206 (arch_xchg(&((v)->counter), new))
208 #define arch_atomic_cmpxchg(v, old, new) \
209 (arch_cmpxchg(&((v)->counter), old, new))
210 #define arch_atomic_xchg(v, new) \
211 (arch_xchg(&((v)->counter), new))
214 * arch_atomic_fetch_add_unless - add unless the number is a given value
215 * @v: pointer of type atomic_t
216 * @a: the amount to add to v...
217 * @u: ...unless v is equal to u.
219 * Atomically adds @a to @v, so long as it was not @u.
220 * Returns the old value of @v.
222 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
226 __asm__ __volatile__(
227 "1: ldl_l %[old],%[mem]\n"
228 " cmpeq %[old],%[u],%[c]\n"
229 " addl %[old],%[a],%[new]\n"
231 " stl_c %[new],%[mem]\n"
237 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
238 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
243 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
246 * arch_atomic64_fetch_add_unless - add unless the number is a given value
247 * @v: pointer of type atomic64_t
248 * @a: the amount to add to v...
249 * @u: ...unless v is equal to u.
251 * Atomically adds @a to @v, so long as it was not @u.
252 * Returns the old value of @v.
254 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
258 __asm__ __volatile__(
259 "1: ldq_l %[old],%[mem]\n"
260 " cmpeq %[old],%[u],%[c]\n"
261 " addq %[old],%[a],%[new]\n"
263 " stq_c %[new],%[mem]\n"
269 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
270 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
275 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
278 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
279 * @v: pointer of type atomic_t
281 * The function returns the old value of *v minus 1, even if
282 * the atomic variable, v, was not decremented.
284 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
288 __asm__ __volatile__(
289 "1: ldq_l %[old],%[mem]\n"
290 " subq %[old],1,%[tmp]\n"
292 " stq_c %[tmp],%[mem]\n"
298 : [old] "=&r"(old), [tmp] "=&r"(tmp)
304 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
306 #endif /* _ALPHA_ATOMIC_H */