Merge tag 'riscv-for-linus-6.9-mw2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / riscv / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Copyright (C) 2012 Regents of the University of California
5  * Copyright (C) 2017 SiFive
6  */
7
8 #ifndef _ASM_RISCV_ATOMIC_H
9 #define _ASM_RISCV_ATOMIC_H
10
11 #ifdef CONFIG_GENERIC_ATOMIC64
12 # include <asm-generic/atomic64.h>
13 #else
14 # if (__riscv_xlen < 64)
15 #  error "64-bit atomics require XLEN to be at least 64"
16 # endif
17 #endif
18
19 #include <asm/cmpxchg.h>
20
21 #define __atomic_acquire_fence()                                        \
22         __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
23
24 #define __atomic_release_fence()                                        \
25         __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
26
27 static __always_inline int arch_atomic_read(const atomic_t *v)
28 {
29         return READ_ONCE(v->counter);
30 }
31 static __always_inline void arch_atomic_set(atomic_t *v, int i)
32 {
33         WRITE_ONCE(v->counter, i);
34 }
35
36 #ifndef CONFIG_GENERIC_ATOMIC64
37 #define ATOMIC64_INIT(i) { (i) }
38 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
39 {
40         return READ_ONCE(v->counter);
41 }
42 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
43 {
44         WRITE_ONCE(v->counter, i);
45 }
46 #endif
47
48 /*
49  * First, the atomic ops that have no ordering constraints and therefor don't
50  * have the AQ or RL bits set.  These don't return anything, so there's only
51  * one version to worry about.
52  */
53 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)              \
54 static __always_inline                                                  \
55 void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)        \
56 {                                                                       \
57         __asm__ __volatile__ (                                          \
58                 "       amo" #asm_op "." #asm_type " zero, %1, %0"      \
59                 : "+A" (v->counter)                                     \
60                 : "r" (I)                                               \
61                 : "memory");                                            \
62 }                                                                       \
63
64 #ifdef CONFIG_GENERIC_ATOMIC64
65 #define ATOMIC_OPS(op, asm_op, I)                                       \
66         ATOMIC_OP (op, asm_op, I, w, int,   )
67 #else
68 #define ATOMIC_OPS(op, asm_op, I)                                       \
69         ATOMIC_OP (op, asm_op, I, w, int,   )                           \
70         ATOMIC_OP (op, asm_op, I, d, s64, 64)
71 #endif
72
73 ATOMIC_OPS(add, add,  i)
74 ATOMIC_OPS(sub, add, -i)
75 ATOMIC_OPS(and, and,  i)
76 ATOMIC_OPS( or,  or,  i)
77 ATOMIC_OPS(xor, xor,  i)
78
79 #undef ATOMIC_OP
80 #undef ATOMIC_OPS
81
82 /*
83  * Atomic ops that have ordered, relaxed, acquire, and release variants.
84  * There's two flavors of these: the arithmatic ops have both fetch and return
85  * versions, while the logical ops only have fetch versions.
86  */
87 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)        \
88 static __always_inline                                                  \
89 c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i,             \
90                                              atomic##prefix##_t *v)     \
91 {                                                                       \
92         register c_type ret;                                            \
93         __asm__ __volatile__ (                                          \
94                 "       amo" #asm_op "." #asm_type " %1, %2, %0"        \
95                 : "+A" (v->counter), "=r" (ret)                         \
96                 : "r" (I)                                               \
97                 : "memory");                                            \
98         return ret;                                                     \
99 }                                                                       \
100 static __always_inline                                                  \
101 c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)        \
102 {                                                                       \
103         register c_type ret;                                            \
104         __asm__ __volatile__ (                                          \
105                 "       amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"  \
106                 : "+A" (v->counter), "=r" (ret)                         \
107                 : "r" (I)                                               \
108                 : "memory");                                            \
109         return ret;                                                     \
110 }
111
112 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
113 static __always_inline                                                  \
114 c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i,            \
115                                               atomic##prefix##_t *v)    \
116 {                                                                       \
117         return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
118 }                                                                       \
119 static __always_inline                                                  \
120 c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)     \
121 {                                                                       \
122         return arch_atomic##prefix##_fetch_##op(i, v) c_op I;           \
123 }
124
125 #ifdef CONFIG_GENERIC_ATOMIC64
126 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
127         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )               \
128         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
129 #else
130 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
131         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )               \
132         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )               \
133         ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)               \
134         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
135 #endif
136
137 ATOMIC_OPS(add, add, +,  i)
138 ATOMIC_OPS(sub, add, +, -i)
139
140 #define arch_atomic_add_return_relaxed  arch_atomic_add_return_relaxed
141 #define arch_atomic_sub_return_relaxed  arch_atomic_sub_return_relaxed
142 #define arch_atomic_add_return          arch_atomic_add_return
143 #define arch_atomic_sub_return          arch_atomic_sub_return
144
145 #define arch_atomic_fetch_add_relaxed   arch_atomic_fetch_add_relaxed
146 #define arch_atomic_fetch_sub_relaxed   arch_atomic_fetch_sub_relaxed
147 #define arch_atomic_fetch_add           arch_atomic_fetch_add
148 #define arch_atomic_fetch_sub           arch_atomic_fetch_sub
149
150 #ifndef CONFIG_GENERIC_ATOMIC64
151 #define arch_atomic64_add_return_relaxed        arch_atomic64_add_return_relaxed
152 #define arch_atomic64_sub_return_relaxed        arch_atomic64_sub_return_relaxed
153 #define arch_atomic64_add_return                arch_atomic64_add_return
154 #define arch_atomic64_sub_return                arch_atomic64_sub_return
155
156 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
157 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
158 #define arch_atomic64_fetch_add         arch_atomic64_fetch_add
159 #define arch_atomic64_fetch_sub         arch_atomic64_fetch_sub
160 #endif
161
162 #undef ATOMIC_OPS
163
164 #ifdef CONFIG_GENERIC_ATOMIC64
165 #define ATOMIC_OPS(op, asm_op, I)                                       \
166         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
167 #else
168 #define ATOMIC_OPS(op, asm_op, I)                                       \
169         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )                      \
170         ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
171 #endif
172
173 ATOMIC_OPS(and, and, i)
174 ATOMIC_OPS( or,  or, i)
175 ATOMIC_OPS(xor, xor, i)
176
177 #define arch_atomic_fetch_and_relaxed   arch_atomic_fetch_and_relaxed
178 #define arch_atomic_fetch_or_relaxed    arch_atomic_fetch_or_relaxed
179 #define arch_atomic_fetch_xor_relaxed   arch_atomic_fetch_xor_relaxed
180 #define arch_atomic_fetch_and           arch_atomic_fetch_and
181 #define arch_atomic_fetch_or            arch_atomic_fetch_or
182 #define arch_atomic_fetch_xor           arch_atomic_fetch_xor
183
184 #ifndef CONFIG_GENERIC_ATOMIC64
185 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
186 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
187 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
188 #define arch_atomic64_fetch_and         arch_atomic64_fetch_and
189 #define arch_atomic64_fetch_or          arch_atomic64_fetch_or
190 #define arch_atomic64_fetch_xor         arch_atomic64_fetch_xor
191 #endif
192
193 #undef ATOMIC_OPS
194
195 #undef ATOMIC_FETCH_OP
196 #undef ATOMIC_OP_RETURN
197
198 /* This is required to provide a full barrier on success. */
199 static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
200 {
201        int prev, rc;
202
203         __asm__ __volatile__ (
204                 "0:     lr.w     %[p],  %[c]\n"
205                 "       beq      %[p],  %[u], 1f\n"
206                 "       add      %[rc], %[p], %[a]\n"
207                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
208                 "       bnez     %[rc], 0b\n"
209                 RISCV_FULL_BARRIER
210                 "1:\n"
211                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
212                 : [a]"r" (a), [u]"r" (u)
213                 : "memory");
214         return prev;
215 }
216 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
217
218 #ifndef CONFIG_GENERIC_ATOMIC64
219 static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
220 {
221        s64 prev;
222        long rc;
223
224         __asm__ __volatile__ (
225                 "0:     lr.d     %[p],  %[c]\n"
226                 "       beq      %[p],  %[u], 1f\n"
227                 "       add      %[rc], %[p], %[a]\n"
228                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
229                 "       bnez     %[rc], 0b\n"
230                 RISCV_FULL_BARRIER
231                 "1:\n"
232                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
233                 : [a]"r" (a), [u]"r" (u)
234                 : "memory");
235         return prev;
236 }
237 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
238 #endif
239
240 static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
241 {
242         int prev, rc;
243
244         __asm__ __volatile__ (
245                 "0:     lr.w      %[p],  %[c]\n"
246                 "       bltz      %[p],  1f\n"
247                 "       addi      %[rc], %[p], 1\n"
248                 "       sc.w.rl   %[rc], %[rc], %[c]\n"
249                 "       bnez      %[rc], 0b\n"
250                 RISCV_FULL_BARRIER
251                 "1:\n"
252                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
253                 :
254                 : "memory");
255         return !(prev < 0);
256 }
257
258 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
259
260 static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
261 {
262         int prev, rc;
263
264         __asm__ __volatile__ (
265                 "0:     lr.w      %[p],  %[c]\n"
266                 "       bgtz      %[p],  1f\n"
267                 "       addi      %[rc], %[p], -1\n"
268                 "       sc.w.rl   %[rc], %[rc], %[c]\n"
269                 "       bnez      %[rc], 0b\n"
270                 RISCV_FULL_BARRIER
271                 "1:\n"
272                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
273                 :
274                 : "memory");
275         return !(prev > 0);
276 }
277
278 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
279
280 static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
281 {
282        int prev, rc;
283
284         __asm__ __volatile__ (
285                 "0:     lr.w     %[p],  %[c]\n"
286                 "       addi     %[rc], %[p], -1\n"
287                 "       bltz     %[rc], 1f\n"
288                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
289                 "       bnez     %[rc], 0b\n"
290                 RISCV_FULL_BARRIER
291                 "1:\n"
292                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
293                 :
294                 : "memory");
295         return prev - 1;
296 }
297
298 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
299
300 #ifndef CONFIG_GENERIC_ATOMIC64
301 static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
302 {
303         s64 prev;
304         long rc;
305
306         __asm__ __volatile__ (
307                 "0:     lr.d      %[p],  %[c]\n"
308                 "       bltz      %[p],  1f\n"
309                 "       addi      %[rc], %[p], 1\n"
310                 "       sc.d.rl   %[rc], %[rc], %[c]\n"
311                 "       bnez      %[rc], 0b\n"
312                 RISCV_FULL_BARRIER
313                 "1:\n"
314                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
315                 :
316                 : "memory");
317         return !(prev < 0);
318 }
319
320 #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
321
322 static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
323 {
324         s64 prev;
325         long rc;
326
327         __asm__ __volatile__ (
328                 "0:     lr.d      %[p],  %[c]\n"
329                 "       bgtz      %[p],  1f\n"
330                 "       addi      %[rc], %[p], -1\n"
331                 "       sc.d.rl   %[rc], %[rc], %[c]\n"
332                 "       bnez      %[rc], 0b\n"
333                 RISCV_FULL_BARRIER
334                 "1:\n"
335                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
336                 :
337                 : "memory");
338         return !(prev > 0);
339 }
340
341 #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
342
343 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
344 {
345        s64 prev;
346        long rc;
347
348         __asm__ __volatile__ (
349                 "0:     lr.d     %[p],  %[c]\n"
350                 "       addi      %[rc], %[p], -1\n"
351                 "       bltz     %[rc], 1f\n"
352                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
353                 "       bnez     %[rc], 0b\n"
354                 RISCV_FULL_BARRIER
355                 "1:\n"
356                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
357                 :
358                 : "memory");
359         return prev - 1;
360 }
361
362 #define arch_atomic64_dec_if_positive   arch_atomic64_dec_if_positive
363 #endif
364
365 #endif /* _ASM_RISCV_ATOMIC_H */