1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/atomic.h
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
13 #define ATOMIC_OP(op, asm_op) \
14 static __always_inline void \
15 __lse_atomic_##op(int i, atomic_t *v) \
19 " " #asm_op " %w[i], %[v]\n" \
20 : [v] "+Q" (v->counter) \
24 ATOMIC_OP(andnot, stclr)
29 static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
31 __lse_atomic_add(-i, v);
36 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
37 static __always_inline int \
38 __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
44 " " #asm_op #mb " %w[i], %w[old], %[v]" \
45 : [v] "+Q" (v->counter), \
53 #define ATOMIC_FETCH_OPS(op, asm_op) \
54 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
55 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
56 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
57 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
59 ATOMIC_FETCH_OPS(andnot, ldclr)
60 ATOMIC_FETCH_OPS(or, ldset)
61 ATOMIC_FETCH_OPS(xor, ldeor)
62 ATOMIC_FETCH_OPS(add, ldadd)
64 #undef ATOMIC_FETCH_OP
65 #undef ATOMIC_FETCH_OPS
67 #define ATOMIC_FETCH_OP_SUB(name) \
68 static __always_inline int \
69 __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
71 return __lse_atomic_fetch_add##name(-i, v); \
74 ATOMIC_FETCH_OP_SUB(_relaxed)
75 ATOMIC_FETCH_OP_SUB(_acquire)
76 ATOMIC_FETCH_OP_SUB(_release)
77 ATOMIC_FETCH_OP_SUB( )
79 #undef ATOMIC_FETCH_OP_SUB
81 #define ATOMIC_OP_ADD_SUB_RETURN(name) \
82 static __always_inline int \
83 __lse_atomic_add_return##name(int i, atomic_t *v) \
85 return __lse_atomic_fetch_add##name(i, v) + i; \
88 static __always_inline int \
89 __lse_atomic_sub_return##name(int i, atomic_t *v) \
91 return __lse_atomic_fetch_sub(i, v) - i; \
94 ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
95 ATOMIC_OP_ADD_SUB_RETURN(_acquire)
96 ATOMIC_OP_ADD_SUB_RETURN(_release)
97 ATOMIC_OP_ADD_SUB_RETURN( )
99 #undef ATOMIC_OP_ADD_SUB_RETURN
101 static __always_inline void __lse_atomic_and(int i, atomic_t *v)
103 return __lse_atomic_andnot(~i, v);
106 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
107 static __always_inline int \
108 __lse_atomic_fetch_and##name(int i, atomic_t *v) \
110 return __lse_atomic_fetch_andnot##name(~i, v); \
113 ATOMIC_FETCH_OP_AND(_relaxed, )
114 ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
115 ATOMIC_FETCH_OP_AND(_release, l, "memory")
116 ATOMIC_FETCH_OP_AND( , al, "memory")
118 #undef ATOMIC_FETCH_OP_AND
120 #define ATOMIC64_OP(op, asm_op) \
121 static __always_inline void \
122 __lse_atomic64_##op(s64 i, atomic64_t *v) \
126 " " #asm_op " %[i], %[v]\n" \
127 : [v] "+Q" (v->counter) \
131 ATOMIC64_OP(andnot, stclr)
132 ATOMIC64_OP(or, stset)
133 ATOMIC64_OP(xor, steor)
134 ATOMIC64_OP(add, stadd)
136 static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
138 __lse_atomic64_add(-i, v);
143 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
144 static __always_inline long \
145 __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
151 " " #asm_op #mb " %[i], %[old], %[v]" \
152 : [v] "+Q" (v->counter), \
160 #define ATOMIC64_FETCH_OPS(op, asm_op) \
161 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
162 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
163 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
164 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
166 ATOMIC64_FETCH_OPS(andnot, ldclr)
167 ATOMIC64_FETCH_OPS(or, ldset)
168 ATOMIC64_FETCH_OPS(xor, ldeor)
169 ATOMIC64_FETCH_OPS(add, ldadd)
171 #undef ATOMIC64_FETCH_OP
172 #undef ATOMIC64_FETCH_OPS
174 #define ATOMIC64_FETCH_OP_SUB(name) \
175 static __always_inline long \
176 __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
178 return __lse_atomic64_fetch_add##name(-i, v); \
181 ATOMIC64_FETCH_OP_SUB(_relaxed)
182 ATOMIC64_FETCH_OP_SUB(_acquire)
183 ATOMIC64_FETCH_OP_SUB(_release)
184 ATOMIC64_FETCH_OP_SUB( )
186 #undef ATOMIC64_FETCH_OP_SUB
188 #define ATOMIC64_OP_ADD_SUB_RETURN(name) \
189 static __always_inline long \
190 __lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
192 return __lse_atomic64_fetch_add##name(i, v) + i; \
195 static __always_inline long \
196 __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
198 return __lse_atomic64_fetch_sub##name(i, v) - i; \
201 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
202 ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
203 ATOMIC64_OP_ADD_SUB_RETURN(_release)
204 ATOMIC64_OP_ADD_SUB_RETURN( )
206 #undef ATOMIC64_OP_ADD_SUB_RETURN
208 static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
210 return __lse_atomic64_andnot(~i, v);
213 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
214 static __always_inline long \
215 __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
217 return __lse_atomic64_fetch_andnot##name(~i, v); \
220 ATOMIC64_FETCH_OP_AND(_relaxed, )
221 ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
222 ATOMIC64_FETCH_OP_AND(_release, l, "memory")
223 ATOMIC64_FETCH_OP_AND( , al, "memory")
225 #undef ATOMIC64_FETCH_OP_AND
227 static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
233 "1: ldr %x[tmp], %[v]\n"
234 " subs %[ret], %x[tmp], #1\n"
236 " casal %x[tmp], %[ret], %[v]\n"
237 " sub %x[tmp], %x[tmp], #1\n"
238 " sub %x[tmp], %x[tmp], %[ret]\n"
239 " cbnz %x[tmp], 1b\n"
241 : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
248 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
249 static __always_inline u##sz \
250 __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
254 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
255 register u##sz x1 asm ("x1") = old; \
256 register u##sz x2 asm ("x2") = new; \
261 " mov %" #w "[tmp], %" #w "[old]\n" \
262 " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
263 " mov %" #w "[ret], %" #w "[tmp]" \
264 : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \
266 : [old] "r" (x1), [new] "r" (x2) \
272 __CMPXCHG_CASE(w, b, , 8, )
273 __CMPXCHG_CASE(w, h, , 16, )
274 __CMPXCHG_CASE(w, , , 32, )
275 __CMPXCHG_CASE(x, , , 64, )
276 __CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
277 __CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
278 __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
279 __CMPXCHG_CASE(x, , acq_, 64, a, "memory")
280 __CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
281 __CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
282 __CMPXCHG_CASE(w, , rel_, 32, l, "memory")
283 __CMPXCHG_CASE(x, , rel_, 64, l, "memory")
284 __CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
285 __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
286 __CMPXCHG_CASE(w, , mb_, 32, al, "memory")
287 __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
289 #undef __CMPXCHG_CASE
291 #define __CMPXCHG_DBL(name, mb, cl...) \
292 static __always_inline long \
293 __lse__cmpxchg_double##name(unsigned long old1, \
294 unsigned long old2, \
295 unsigned long new1, \
296 unsigned long new2, \
297 volatile void *ptr) \
299 unsigned long oldval1 = old1; \
300 unsigned long oldval2 = old2; \
301 register unsigned long x0 asm ("x0") = old1; \
302 register unsigned long x1 asm ("x1") = old2; \
303 register unsigned long x2 asm ("x2") = new1; \
304 register unsigned long x3 asm ("x3") = new2; \
305 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
309 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
310 " eor %[old1], %[old1], %[oldval1]\n" \
311 " eor %[old2], %[old2], %[oldval2]\n" \
312 " orr %[old1], %[old1], %[old2]" \
313 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
314 [v] "+Q" (*(unsigned long *)ptr) \
315 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
316 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
323 __CMPXCHG_DBL(_mb, al, "memory")
327 #endif /* __ASM_ATOMIC_LSE_H */