1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
14 #define ATOMIC_INIT(i) { (i) }
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
21 #define __atomic_acquire_fence() \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
24 #define __atomic_release_fence() \
25 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
27 static __inline__ int atomic_read(const atomic_t *v)
31 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
36 static __inline__ void atomic_set(atomic_t *v, int i)
38 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
41 #define ATOMIC_OP(op, asm_op) \
42 static __inline__ void atomic_##op(int a, atomic_t *v) \
46 __asm__ __volatile__( \
47 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
48 #asm_op " %0,%2,%0\n" \
49 " stwcx. %0,0,%3 \n" \
51 : "=&r" (t), "+m" (v->counter) \
52 : "r" (a), "r" (&v->counter) \
56 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
57 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
61 __asm__ __volatile__( \
62 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
63 #asm_op " %0,%2,%0\n" \
66 : "=&r" (t), "+m" (v->counter) \
67 : "r" (a), "r" (&v->counter) \
73 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
74 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
78 __asm__ __volatile__( \
79 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
80 #asm_op " %1,%3,%0\n" \
83 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
84 : "r" (a), "r" (&v->counter) \
90 #define ATOMIC_OPS(op, asm_op) \
91 ATOMIC_OP(op, asm_op) \
92 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
93 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
98 #define atomic_add_return_relaxed atomic_add_return_relaxed
99 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
101 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
102 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
105 #define ATOMIC_OPS(op, asm_op) \
106 ATOMIC_OP(op, asm_op) \
107 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
113 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
114 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
115 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
118 #undef ATOMIC_FETCH_OP_RELAXED
119 #undef ATOMIC_OP_RETURN_RELAXED
122 static __inline__ void atomic_inc(atomic_t *v)
126 __asm__ __volatile__(
127 "1: lwarx %0,0,%2 # atomic_inc\n\
131 : "=&r" (t), "+m" (v->counter)
135 #define atomic_inc atomic_inc
137 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
141 __asm__ __volatile__(
142 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
146 : "=&r" (t), "+m" (v->counter)
153 static __inline__ void atomic_dec(atomic_t *v)
157 __asm__ __volatile__(
158 "1: lwarx %0,0,%2 # atomic_dec\n\
162 : "=&r" (t), "+m" (v->counter)
166 #define atomic_dec atomic_dec
168 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
172 __asm__ __volatile__(
173 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
177 : "=&r" (t), "+m" (v->counter)
184 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
185 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
187 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
188 #define atomic_cmpxchg_relaxed(v, o, n) \
189 cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define atomic_cmpxchg_acquire(v, o, n) \
191 cmpxchg_acquire(&((v)->counter), (o), (n))
193 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
194 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
197 * atomic_fetch_add_unless - add unless the number is a given value
198 * @v: pointer of type atomic_t
199 * @a: the amount to add to v...
200 * @u: ...unless v is equal to u.
202 * Atomically adds @a to @v, so long as it was not @u.
203 * Returns the old value of @v.
205 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
209 __asm__ __volatile__ (
210 PPC_ATOMIC_ENTRY_BARRIER
211 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
217 PPC_ATOMIC_EXIT_BARRIER
221 : "r" (&v->counter), "r" (a), "r" (u)
226 #define atomic_fetch_add_unless atomic_fetch_add_unless
229 * atomic_inc_not_zero - increment unless the number is zero
230 * @v: pointer of type atomic_t
232 * Atomically increments @v by 1, so long as @v is non-zero.
233 * Returns non-zero if @v was non-zero, and zero otherwise.
235 static __inline__ int atomic_inc_not_zero(atomic_t *v)
239 __asm__ __volatile__ (
240 PPC_ATOMIC_ENTRY_BARRIER
241 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
247 PPC_ATOMIC_EXIT_BARRIER
250 : "=&r" (t1), "=&r" (t2)
252 : "cc", "xer", "memory");
256 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
259 * Atomically test *v and decrement if it is greater than 0.
260 * The function returns the old value of *v minus 1, even if
261 * the atomic variable, v, was not decremented.
263 static __inline__ int atomic_dec_if_positive(atomic_t *v)
267 __asm__ __volatile__(
268 PPC_ATOMIC_ENTRY_BARRIER
269 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
275 PPC_ATOMIC_EXIT_BARRIER
283 #define atomic_dec_if_positive atomic_dec_if_positive
287 #define ATOMIC64_INIT(i) { (i) }
289 static __inline__ s64 atomic64_read(const atomic64_t *v)
293 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
298 static __inline__ void atomic64_set(atomic64_t *v, s64 i)
300 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
303 #define ATOMIC64_OP(op, asm_op) \
304 static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
308 __asm__ __volatile__( \
309 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
310 #asm_op " %0,%2,%0\n" \
311 " stdcx. %0,0,%3 \n" \
313 : "=&r" (t), "+m" (v->counter) \
314 : "r" (a), "r" (&v->counter) \
318 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
320 atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
324 __asm__ __volatile__( \
325 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
326 #asm_op " %0,%2,%0\n" \
327 " stdcx. %0,0,%3\n" \
329 : "=&r" (t), "+m" (v->counter) \
330 : "r" (a), "r" (&v->counter) \
336 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
338 atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
342 __asm__ __volatile__( \
343 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
344 #asm_op " %1,%3,%0\n" \
345 " stdcx. %1,0,%4\n" \
347 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
348 : "r" (a), "r" (&v->counter) \
354 #define ATOMIC64_OPS(op, asm_op) \
355 ATOMIC64_OP(op, asm_op) \
356 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
357 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
359 ATOMIC64_OPS(add, add)
360 ATOMIC64_OPS(sub, subf)
362 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
363 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
365 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
366 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
369 #define ATOMIC64_OPS(op, asm_op) \
370 ATOMIC64_OP(op, asm_op) \
371 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
373 ATOMIC64_OPS(and, and)
375 ATOMIC64_OPS(xor, xor)
377 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
378 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
379 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
382 #undef ATOMIC64_FETCH_OP_RELAXED
383 #undef ATOMIC64_OP_RETURN_RELAXED
386 static __inline__ void atomic64_inc(atomic64_t *v)
390 __asm__ __volatile__(
391 "1: ldarx %0,0,%2 # atomic64_inc\n\
395 : "=&r" (t), "+m" (v->counter)
399 #define atomic64_inc atomic64_inc
401 static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
405 __asm__ __volatile__(
406 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
410 : "=&r" (t), "+m" (v->counter)
417 static __inline__ void atomic64_dec(atomic64_t *v)
421 __asm__ __volatile__(
422 "1: ldarx %0,0,%2 # atomic64_dec\n\
426 : "=&r" (t), "+m" (v->counter)
430 #define atomic64_dec atomic64_dec
432 static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
436 __asm__ __volatile__(
437 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
441 : "=&r" (t), "+m" (v->counter)
448 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
449 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
452 * Atomically test *v and decrement if it is greater than 0.
453 * The function returns the old value of *v minus 1.
455 static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
459 __asm__ __volatile__(
460 PPC_ATOMIC_ENTRY_BARRIER
461 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
466 PPC_ATOMIC_EXIT_BARRIER
470 : "cc", "xer", "memory");
474 #define atomic64_dec_if_positive atomic64_dec_if_positive
476 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
477 #define atomic64_cmpxchg_relaxed(v, o, n) \
478 cmpxchg_relaxed(&((v)->counter), (o), (n))
479 #define atomic64_cmpxchg_acquire(v, o, n) \
480 cmpxchg_acquire(&((v)->counter), (o), (n))
482 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
483 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
486 * atomic64_fetch_add_unless - add unless the number is a given value
487 * @v: pointer of type atomic64_t
488 * @a: the amount to add to v...
489 * @u: ...unless v is equal to u.
491 * Atomically adds @a to @v, so long as it was not @u.
492 * Returns the old value of @v.
494 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
498 __asm__ __volatile__ (
499 PPC_ATOMIC_ENTRY_BARRIER
500 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
506 PPC_ATOMIC_EXIT_BARRIER
510 : "r" (&v->counter), "r" (a), "r" (u)
515 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
518 * atomic_inc64_not_zero - increment unless the number is zero
519 * @v: pointer of type atomic64_t
521 * Atomically increments @v by 1, so long as @v is non-zero.
522 * Returns non-zero if @v was non-zero, and zero otherwise.
524 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
528 __asm__ __volatile__ (
529 PPC_ATOMIC_ENTRY_BARRIER
530 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
536 PPC_ATOMIC_EXIT_BARRIER
539 : "=&r" (t1), "=&r" (t2)
541 : "cc", "xer", "memory");
545 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
547 #endif /* __powerpc64__ */
549 #endif /* __KERNEL__ */
550 #endif /* _ASM_POWERPC_ATOMIC_H_ */