1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
20 #define __atomic_acquire_fence() \
21 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23 #define __atomic_release_fence() \
24 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26 static __inline__ int arch_atomic_read(const atomic_t *v)
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
35 static __inline__ void arch_atomic_set(atomic_t *v, int i)
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
40 #define ATOMIC_OP(op, asm_op) \
41 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
45 __asm__ __volatile__( \
46 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
47 #asm_op " %0,%2,%0\n" \
48 " stwcx. %0,0,%3 \n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r" (a), "r" (&v->counter) \
55 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
56 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
60 __asm__ __volatile__( \
61 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
62 #asm_op " %0,%2,%0\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r" (a), "r" (&v->counter) \
72 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
73 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
77 __asm__ __volatile__( \
78 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
79 #asm_op " %1,%3,%0\n" \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r" (a), "r" (&v->counter) \
89 #define ATOMIC_OPS(op, asm_op) \
90 ATOMIC_OP(op, asm_op) \
91 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
92 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
97 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
98 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
100 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
101 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
104 #define ATOMIC_OPS(op, asm_op) \
105 ATOMIC_OP(op, asm_op) \
106 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
112 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
113 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
114 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
117 #undef ATOMIC_FETCH_OP_RELAXED
118 #undef ATOMIC_OP_RETURN_RELAXED
121 static __inline__ void arch_atomic_inc(atomic_t *v)
125 __asm__ __volatile__(
126 "1: lwarx %0,0,%2 # atomic_inc\n\
130 : "=&r" (t), "+m" (v->counter)
134 #define arch_atomic_inc arch_atomic_inc
136 static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
140 __asm__ __volatile__(
141 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
145 : "=&r" (t), "+m" (v->counter)
152 static __inline__ void arch_atomic_dec(atomic_t *v)
156 __asm__ __volatile__(
157 "1: lwarx %0,0,%2 # atomic_dec\n\
161 : "=&r" (t), "+m" (v->counter)
165 #define arch_atomic_dec arch_atomic_dec
167 static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
171 __asm__ __volatile__(
172 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
176 : "=&r" (t), "+m" (v->counter)
183 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
184 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
186 #define arch_atomic_cmpxchg(v, o, n) \
187 (arch_cmpxchg(&((v)->counter), (o), (n)))
188 #define arch_atomic_cmpxchg_relaxed(v, o, n) \
189 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define arch_atomic_cmpxchg_acquire(v, o, n) \
191 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
193 #define arch_atomic_xchg(v, new) \
194 (arch_xchg(&((v)->counter), new))
195 #define arch_atomic_xchg_relaxed(v, new) \
196 arch_xchg_relaxed(&((v)->counter), (new))
199 * Don't want to override the generic atomic_try_cmpxchg_acquire, because
200 * we add a lock hint to the lwarx, which may not be wanted for the
201 * _acquire case (and is not used by the other _acquire variants so it
202 * would be a surprise).
204 static __always_inline bool
205 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
209 __asm__ __volatile__ (
210 "1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
215 "\t" PPC_ACQUIRE_BARRIER " \n"
217 : "=&r" (r), "+m" (v->counter)
218 : "r" (&v->counter), "r" (o), "r" (new)
221 if (unlikely(r != o))
223 return likely(r == o);
227 * atomic_fetch_add_unless - add unless the number is a given value
228 * @v: pointer of type atomic_t
229 * @a: the amount to add to v...
230 * @u: ...unless v is equal to u.
232 * Atomically adds @a to @v, so long as it was not @u.
233 * Returns the old value of @v.
235 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
239 __asm__ __volatile__ (
240 PPC_ATOMIC_ENTRY_BARRIER
241 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
247 PPC_ATOMIC_EXIT_BARRIER
251 : "r" (&v->counter), "r" (a), "r" (u)
256 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
259 * atomic_inc_not_zero - increment unless the number is zero
260 * @v: pointer of type atomic_t
262 * Atomically increments @v by 1, so long as @v is non-zero.
263 * Returns non-zero if @v was non-zero, and zero otherwise.
265 static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
269 __asm__ __volatile__ (
270 PPC_ATOMIC_ENTRY_BARRIER
271 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
277 PPC_ATOMIC_EXIT_BARRIER
280 : "=&r" (t1), "=&r" (t2)
282 : "cc", "xer", "memory");
286 #define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
289 * Atomically test *v and decrement if it is greater than 0.
290 * The function returns the old value of *v minus 1, even if
291 * the atomic variable, v, was not decremented.
293 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
297 __asm__ __volatile__(
298 PPC_ATOMIC_ENTRY_BARRIER
299 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
305 PPC_ATOMIC_EXIT_BARRIER
313 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
317 #define ATOMIC64_INIT(i) { (i) }
319 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
323 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
328 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
330 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
333 #define ATOMIC64_OP(op, asm_op) \
334 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
338 __asm__ __volatile__( \
339 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
340 #asm_op " %0,%2,%0\n" \
341 " stdcx. %0,0,%3 \n" \
343 : "=&r" (t), "+m" (v->counter) \
344 : "r" (a), "r" (&v->counter) \
348 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
350 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
354 __asm__ __volatile__( \
355 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
356 #asm_op " %0,%2,%0\n" \
357 " stdcx. %0,0,%3\n" \
359 : "=&r" (t), "+m" (v->counter) \
360 : "r" (a), "r" (&v->counter) \
366 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
368 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
372 __asm__ __volatile__( \
373 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
374 #asm_op " %1,%3,%0\n" \
375 " stdcx. %1,0,%4\n" \
377 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
378 : "r" (a), "r" (&v->counter) \
384 #define ATOMIC64_OPS(op, asm_op) \
385 ATOMIC64_OP(op, asm_op) \
386 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
387 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
389 ATOMIC64_OPS(add, add)
390 ATOMIC64_OPS(sub, subf)
392 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
393 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
395 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
396 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
399 #define ATOMIC64_OPS(op, asm_op) \
400 ATOMIC64_OP(op, asm_op) \
401 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
403 ATOMIC64_OPS(and, and)
405 ATOMIC64_OPS(xor, xor)
407 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
408 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
409 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
412 #undef ATOMIC64_FETCH_OP_RELAXED
413 #undef ATOMIC64_OP_RETURN_RELAXED
416 static __inline__ void arch_atomic64_inc(atomic64_t *v)
420 __asm__ __volatile__(
421 "1: ldarx %0,0,%2 # atomic64_inc\n\
425 : "=&r" (t), "+m" (v->counter)
429 #define arch_atomic64_inc arch_atomic64_inc
431 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
435 __asm__ __volatile__(
436 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
440 : "=&r" (t), "+m" (v->counter)
447 static __inline__ void arch_atomic64_dec(atomic64_t *v)
451 __asm__ __volatile__(
452 "1: ldarx %0,0,%2 # atomic64_dec\n\
456 : "=&r" (t), "+m" (v->counter)
460 #define arch_atomic64_dec arch_atomic64_dec
462 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
466 __asm__ __volatile__(
467 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
471 : "=&r" (t), "+m" (v->counter)
478 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
479 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
482 * Atomically test *v and decrement if it is greater than 0.
483 * The function returns the old value of *v minus 1.
485 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
489 __asm__ __volatile__(
490 PPC_ATOMIC_ENTRY_BARRIER
491 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
496 PPC_ATOMIC_EXIT_BARRIER
500 : "cc", "xer", "memory");
504 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
506 #define arch_atomic64_cmpxchg(v, o, n) \
507 (arch_cmpxchg(&((v)->counter), (o), (n)))
508 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \
509 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
510 #define arch_atomic64_cmpxchg_acquire(v, o, n) \
511 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
513 #define arch_atomic64_xchg(v, new) \
514 (arch_xchg(&((v)->counter), new))
515 #define arch_atomic64_xchg_relaxed(v, new) \
516 arch_xchg_relaxed(&((v)->counter), (new))
519 * atomic64_fetch_add_unless - add unless the number is a given value
520 * @v: pointer of type atomic64_t
521 * @a: the amount to add to v...
522 * @u: ...unless v is equal to u.
524 * Atomically adds @a to @v, so long as it was not @u.
525 * Returns the old value of @v.
527 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
531 __asm__ __volatile__ (
532 PPC_ATOMIC_ENTRY_BARRIER
533 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
539 PPC_ATOMIC_EXIT_BARRIER
543 : "r" (&v->counter), "r" (a), "r" (u)
548 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
551 * atomic_inc64_not_zero - increment unless the number is zero
552 * @v: pointer of type atomic64_t
554 * Atomically increments @v by 1, so long as @v is non-zero.
555 * Returns non-zero if @v was non-zero, and zero otherwise.
557 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
561 __asm__ __volatile__ (
562 PPC_ATOMIC_ENTRY_BARRIER
563 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
569 PPC_ATOMIC_EXIT_BARRIER
572 : "=&r" (t1), "=&r" (t2)
574 : "cc", "xer", "memory");
578 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
580 #endif /* __powerpc64__ */
582 #endif /* __KERNEL__ */
583 #endif /* _ASM_POWERPC_ATOMIC_H_ */