1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 #ifndef _ASM_ARC_ATOMIC_H
7 #define _ASM_ARC_ATOMIC_H
11 #include <linux/types.h>
12 #include <linux/compiler.h>
13 #include <asm/cmpxchg.h>
14 #include <asm/barrier.h>
17 #define atomic_read(v) READ_ONCE((v)->counter)
19 #ifdef CONFIG_ARC_HAS_LLSC
21 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
23 #define ATOMIC_OP(op, c_op, asm_op) \
24 static inline void atomic_##op(int i, atomic_t *v) \
28 __asm__ __volatile__( \
29 "1: llock %[val], [%[ctr]] \n" \
30 " " #asm_op " %[val], %[val], %[i] \n" \
31 " scond %[val], [%[ctr]] \n" \
33 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
34 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
39 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
40 static inline int atomic_##op##_return(int i, atomic_t *v) \
45 * Explicit full memory barrier needed before/after as \
46 * LLOCK/SCOND themselves don't provide any such semantics \
50 __asm__ __volatile__( \
51 "1: llock %[val], [%[ctr]] \n" \
52 " " #asm_op " %[val], %[val], %[i] \n" \
53 " scond %[val], [%[ctr]] \n" \
56 : [ctr] "r" (&v->counter), \
65 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
66 static inline int atomic_fetch_##op(int i, atomic_t *v) \
68 unsigned int val, orig; \
71 * Explicit full memory barrier needed before/after as \
72 * LLOCK/SCOND themselves don't provide any such semantics \
76 __asm__ __volatile__( \
77 "1: llock %[orig], [%[ctr]] \n" \
78 " " #asm_op " %[val], %[orig], %[i] \n" \
79 " scond %[val], [%[ctr]] \n" \
81 : [val] "=&r" (val), \
83 : [ctr] "r" (&v->counter), \
92 #else /* !CONFIG_ARC_HAS_LLSC */
96 /* violating atomic_xxx API locking protocol in UP for optimization sake */
97 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
101 static inline void atomic_set(atomic_t *v, int i)
104 * Independent of hardware support, all of the atomic_xxx() APIs need
105 * to follow the same locking rules to make sure that a "hardware"
106 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
109 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
110 * requires the locking.
114 atomic_ops_lock(flags);
115 WRITE_ONCE(v->counter, i);
116 atomic_ops_unlock(flags);
119 #define atomic_set_release(v, i) atomic_set((v), (i))
124 * Non hardware assisted Atomic-R-M-W
125 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
128 #define ATOMIC_OP(op, c_op, asm_op) \
129 static inline void atomic_##op(int i, atomic_t *v) \
131 unsigned long flags; \
133 atomic_ops_lock(flags); \
135 atomic_ops_unlock(flags); \
138 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
139 static inline int atomic_##op##_return(int i, atomic_t *v) \
141 unsigned long flags; \
142 unsigned long temp; \
145 * spin lock/unlock provides the needed smp_mb() before/after \
147 atomic_ops_lock(flags); \
151 atomic_ops_unlock(flags); \
156 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
157 static inline int atomic_fetch_##op(int i, atomic_t *v) \
159 unsigned long flags; \
160 unsigned long orig; \
163 * spin lock/unlock provides the needed smp_mb() before/after \
165 atomic_ops_lock(flags); \
168 atomic_ops_unlock(flags); \
173 #endif /* !CONFIG_ARC_HAS_LLSC */
175 #define ATOMIC_OPS(op, c_op, asm_op) \
176 ATOMIC_OP(op, c_op, asm_op) \
177 ATOMIC_OP_RETURN(op, c_op, asm_op) \
178 ATOMIC_FETCH_OP(op, c_op, asm_op)
180 ATOMIC_OPS(add, +=, add)
181 ATOMIC_OPS(sub, -=, sub)
183 #define atomic_andnot atomic_andnot
184 #define atomic_fetch_andnot atomic_fetch_andnot
187 #define ATOMIC_OPS(op, c_op, asm_op) \
188 ATOMIC_OP(op, c_op, asm_op) \
189 ATOMIC_FETCH_OP(op, c_op, asm_op)
191 ATOMIC_OPS(and, &=, and)
192 ATOMIC_OPS(andnot, &= ~, bic)
193 ATOMIC_OPS(or, |=, or)
194 ATOMIC_OPS(xor, ^=, xor)
197 #undef ATOMIC_FETCH_OP
198 #undef ATOMIC_OP_RETURN
201 #ifdef CONFIG_GENERIC_ATOMIC64
203 #include <asm-generic/atomic64.h>
205 #else /* Kconfig ensures this is only enabled with needed h/w assist */
208 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
209 * - The address HAS to be 64-bit aligned
210 * - There are 2 semantics involved here:
211 * = exclusive implies no interim update between load/store to same addr
212 * = both words are observed/updated together: this is guaranteed even
213 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
214 * is NOT required to use LLOCKD+SCONDD, STD suffices
218 s64 __aligned(8) counter;
221 #define ATOMIC64_INIT(a) { (a) }
223 static inline s64 atomic64_read(const atomic64_t *v)
227 __asm__ __volatile__(
235 static inline void atomic64_set(atomic64_t *v, s64 a)
238 * This could have been a simple assignment in "C" but would need
239 * explicit volatile. Otherwise gcc optimizers could elide the store
240 * which borked atomic64 self-test
241 * In the inline asm version, memory clobber needed for exact same
242 * reason, to tell gcc about the store.
244 * This however is not needed for sibling atomic64_add() etc since both
245 * load/store are explicitly done in inline asm. As long as API is used
246 * for each access, gcc has no way to optimize away any load/store
248 __asm__ __volatile__(
251 : "r"(a), "r"(&v->counter)
255 #define ATOMIC64_OP(op, op1, op2) \
256 static inline void atomic64_##op(s64 a, atomic64_t *v) \
260 __asm__ __volatile__( \
262 " llockd %0, [%1] \n" \
263 " " #op1 " %L0, %L0, %L2 \n" \
264 " " #op2 " %H0, %H0, %H2 \n" \
265 " scondd %0, [%1] \n" \
268 : "r"(&v->counter), "ir"(a) \
272 #define ATOMIC64_OP_RETURN(op, op1, op2) \
273 static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
279 __asm__ __volatile__( \
281 " llockd %0, [%1] \n" \
282 " " #op1 " %L0, %L0, %L2 \n" \
283 " " #op2 " %H0, %H0, %H2 \n" \
284 " scondd %0, [%1] \n" \
287 : "r"(&v->counter), "ir"(a) \
288 : "cc"); /* memory clobber comes from smp_mb() */ \
295 #define ATOMIC64_FETCH_OP(op, op1, op2) \
296 static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
302 __asm__ __volatile__( \
304 " llockd %0, [%2] \n" \
305 " " #op1 " %L1, %L0, %L3 \n" \
306 " " #op2 " %H1, %H0, %H3 \n" \
307 " scondd %1, [%2] \n" \
309 : "=&r"(orig), "=&r"(val) \
310 : "r"(&v->counter), "ir"(a) \
311 : "cc"); /* memory clobber comes from smp_mb() */ \
318 #define ATOMIC64_OPS(op, op1, op2) \
319 ATOMIC64_OP(op, op1, op2) \
320 ATOMIC64_OP_RETURN(op, op1, op2) \
321 ATOMIC64_FETCH_OP(op, op1, op2)
323 #define atomic64_andnot atomic64_andnot
324 #define atomic64_fetch_andnot atomic64_fetch_andnot
326 ATOMIC64_OPS(add, add.f, adc)
327 ATOMIC64_OPS(sub, sub.f, sbc)
328 ATOMIC64_OPS(and, and, and)
329 ATOMIC64_OPS(andnot, bic, bic)
330 ATOMIC64_OPS(or, or, or)
331 ATOMIC64_OPS(xor, xor, xor)
334 #undef ATOMIC64_FETCH_OP
335 #undef ATOMIC64_OP_RETURN
339 atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
345 __asm__ __volatile__(
346 "1: llockd %0, [%1] \n"
347 " brne %L0, %L2, 2f \n"
348 " brne %H0, %H2, 2f \n"
349 " scondd %3, [%1] \n"
353 : "r"(ptr), "ir"(expected), "r"(new)
354 : "cc"); /* memory clobber comes from smp_mb() */
361 static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
367 __asm__ __volatile__(
368 "1: llockd %0, [%1] \n"
369 " scondd %2, [%1] \n"
374 : "cc"); /* memory clobber comes from smp_mb() */
382 * atomic64_dec_if_positive - decrement by 1 if old value positive
383 * @v: pointer of type atomic64_t
385 * The function returns the old value of *v minus 1, even if
386 * the atomic variable, v, was not decremented.
389 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
395 __asm__ __volatile__(
396 "1: llockd %0, [%1] \n"
397 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
398 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
399 " brlt %H0, 0, 2f \n"
400 " scondd %0, [%1] \n"
405 : "cc"); /* memory clobber comes from smp_mb() */
411 #define atomic64_dec_if_positive atomic64_dec_if_positive
414 * atomic64_fetch_add_unless - add unless the number is a given value
415 * @v: pointer of type atomic64_t
416 * @a: the amount to add to v...
417 * @u: ...unless v is equal to u.
419 * Atomically adds @a to @v, if it was not @u.
420 * Returns the old value of @v
422 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
428 __asm__ __volatile__(
429 "1: llockd %0, [%2] \n"
430 " brne %L0, %L4, 2f # continue to add since v != u \n"
431 " breq.d %H0, %H4, 3f # return since v == u \n"
433 " add.f %L1, %L0, %L3 \n"
434 " adc %H1, %H0, %H3 \n"
435 " scondd %1, [%2] \n"
438 : "=&r"(old), "=&r" (temp)
439 : "r"(&v->counter), "r"(a), "r"(u)
440 : "cc"); /* memory clobber comes from smp_mb() */
446 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
448 #endif /* !CONFIG_GENERIC_ATOMIC64 */
450 #endif /* !__ASSEMBLY__ */