1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
8 #ifndef _ASM_RISCV_ATOMIC_H
9 #define _ASM_RISCV_ATOMIC_H
11 #ifdef CONFIG_GENERIC_ATOMIC64
12 # include <asm-generic/atomic64.h>
14 # if (__riscv_xlen < 64)
15 # error "64-bit atomics require XLEN to be at least 64"
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
22 #define __atomic_acquire_fence() \
23 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
25 #define __atomic_release_fence() \
26 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
28 static __always_inline int atomic_read(const atomic_t *v)
30 return READ_ONCE(v->counter);
32 static __always_inline void atomic_set(atomic_t *v, int i)
34 WRITE_ONCE(v->counter, i);
37 #ifndef CONFIG_GENERIC_ATOMIC64
38 #define ATOMIC64_INIT(i) { (i) }
39 static __always_inline s64 atomic64_read(const atomic64_t *v)
41 return READ_ONCE(v->counter);
43 static __always_inline void atomic64_set(atomic64_t *v, s64 i)
45 WRITE_ONCE(v->counter, i);
50 * First, the atomic ops that have no ordering constraints and therefor don't
51 * have the AQ or RL bits set. These don't return anything, so there's only
52 * one version to worry about.
54 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
55 static __always_inline \
56 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
58 __asm__ __volatile__ ( \
59 " amo" #asm_op "." #asm_type " zero, %1, %0" \
65 #ifdef CONFIG_GENERIC_ATOMIC64
66 #define ATOMIC_OPS(op, asm_op, I) \
67 ATOMIC_OP (op, asm_op, I, w, int, )
69 #define ATOMIC_OPS(op, asm_op, I) \
70 ATOMIC_OP (op, asm_op, I, w, int, ) \
71 ATOMIC_OP (op, asm_op, I, d, s64, 64)
74 ATOMIC_OPS(add, add, i)
75 ATOMIC_OPS(sub, add, -i)
76 ATOMIC_OPS(and, and, i)
77 ATOMIC_OPS( or, or, i)
78 ATOMIC_OPS(xor, xor, i)
84 * Atomic ops that have ordered, relaxed, acquire, and release variants.
85 * There's two flavors of these: the arithmatic ops have both fetch and return
86 * versions, while the logical ops only have fetch versions.
88 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
89 static __always_inline \
90 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
91 atomic##prefix##_t *v) \
93 register c_type ret; \
94 __asm__ __volatile__ ( \
95 " amo" #asm_op "." #asm_type " %1, %2, %0" \
96 : "+A" (v->counter), "=r" (ret) \
101 static __always_inline \
102 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
104 register c_type ret; \
105 __asm__ __volatile__ ( \
106 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
107 : "+A" (v->counter), "=r" (ret) \
113 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
114 static __always_inline \
115 c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
116 atomic##prefix##_t *v) \
118 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
120 static __always_inline \
121 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
123 return atomic##prefix##_fetch_##op(i, v) c_op I; \
126 #ifdef CONFIG_GENERIC_ATOMIC64
127 #define ATOMIC_OPS(op, asm_op, c_op, I) \
128 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
129 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
131 #define ATOMIC_OPS(op, asm_op, c_op, I) \
132 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
133 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
134 ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
138 ATOMIC_OPS(add, add, +, i)
139 ATOMIC_OPS(sub, add, +, -i)
141 #define atomic_add_return_relaxed atomic_add_return_relaxed
142 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
143 #define atomic_add_return atomic_add_return
144 #define atomic_sub_return atomic_sub_return
146 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
147 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
148 #define atomic_fetch_add atomic_fetch_add
149 #define atomic_fetch_sub atomic_fetch_sub
151 #ifndef CONFIG_GENERIC_ATOMIC64
152 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
153 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
154 #define atomic64_add_return atomic64_add_return
155 #define atomic64_sub_return atomic64_sub_return
157 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
158 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
159 #define atomic64_fetch_add atomic64_fetch_add
160 #define atomic64_fetch_sub atomic64_fetch_sub
165 #ifdef CONFIG_GENERIC_ATOMIC64
166 #define ATOMIC_OPS(op, asm_op, I) \
167 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
169 #define ATOMIC_OPS(op, asm_op, I) \
170 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
171 ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
174 ATOMIC_OPS(and, and, i)
175 ATOMIC_OPS( or, or, i)
176 ATOMIC_OPS(xor, xor, i)
178 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
179 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
180 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
181 #define atomic_fetch_and atomic_fetch_and
182 #define atomic_fetch_or atomic_fetch_or
183 #define atomic_fetch_xor atomic_fetch_xor
185 #ifndef CONFIG_GENERIC_ATOMIC64
186 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
187 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
188 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
189 #define atomic64_fetch_and atomic64_fetch_and
190 #define atomic64_fetch_or atomic64_fetch_or
191 #define atomic64_fetch_xor atomic64_fetch_xor
196 #undef ATOMIC_FETCH_OP
197 #undef ATOMIC_OP_RETURN
199 /* This is required to provide a full barrier on success. */
200 static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
204 __asm__ __volatile__ (
205 "0: lr.w %[p], %[c]\n"
206 " beq %[p], %[u], 1f\n"
207 " add %[rc], %[p], %[a]\n"
208 " sc.w.rl %[rc], %[rc], %[c]\n"
212 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213 : [a]"r" (a), [u]"r" (u)
217 #define atomic_fetch_add_unless atomic_fetch_add_unless
219 #ifndef CONFIG_GENERIC_ATOMIC64
220 static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
225 __asm__ __volatile__ (
226 "0: lr.d %[p], %[c]\n"
227 " beq %[p], %[u], 1f\n"
228 " add %[rc], %[p], %[a]\n"
229 " sc.d.rl %[rc], %[rc], %[c]\n"
233 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234 : [a]"r" (a), [u]"r" (u)
238 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
242 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
243 * {cmp,}xchg and the operations that return, so they need a full barrier.
245 #define ATOMIC_OP(c_t, prefix, size) \
246 static __always_inline \
247 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
249 return __xchg_relaxed(&(v->counter), n, size); \
251 static __always_inline \
252 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
254 return __xchg_acquire(&(v->counter), n, size); \
256 static __always_inline \
257 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
259 return __xchg_release(&(v->counter), n, size); \
261 static __always_inline \
262 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
264 return __xchg(&(v->counter), n, size); \
266 static __always_inline \
267 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
270 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
272 static __always_inline \
273 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
276 return __cmpxchg_acquire(&(v->counter), o, n, size); \
278 static __always_inline \
279 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
282 return __cmpxchg_release(&(v->counter), o, n, size); \
284 static __always_inline \
285 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
287 return __cmpxchg(&(v->counter), o, n, size); \
290 #ifdef CONFIG_GENERIC_ATOMIC64
291 #define ATOMIC_OPS() \
294 #define ATOMIC_OPS() \
295 ATOMIC_OP(int, , 4) \
296 ATOMIC_OP(s64, 64, 8)
301 #define atomic_xchg_relaxed atomic_xchg_relaxed
302 #define atomic_xchg_acquire atomic_xchg_acquire
303 #define atomic_xchg_release atomic_xchg_release
304 #define atomic_xchg atomic_xchg
305 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
306 #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
307 #define atomic_cmpxchg_release atomic_cmpxchg_release
308 #define atomic_cmpxchg atomic_cmpxchg
313 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
317 __asm__ __volatile__ (
318 "0: lr.w %[p], %[c]\n"
319 " sub %[rc], %[p], %[o]\n"
321 " sc.w.rl %[rc], %[rc], %[c]\n"
325 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
328 return prev - offset;
331 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
333 #ifndef CONFIG_GENERIC_ATOMIC64
334 static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
339 __asm__ __volatile__ (
340 "0: lr.d %[p], %[c]\n"
341 " sub %[rc], %[p], %[o]\n"
343 " sc.d.rl %[rc], %[rc], %[c]\n"
347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
350 return prev - offset;
353 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
356 #endif /* _ASM_RISCV_ATOMIC_H */