d955ade5df7c0b401eec0d9bb3883be4e63f2f9f
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / atomic_lse.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/atomic.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7  * Copyright (C) 2012 ARM Ltd.
8  */
9
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12
13 #define ATOMIC_OP(op, asm_op)                                           \
14 static inline void __lse_atomic_##op(int i, atomic_t *v)                \
15 {                                                                       \
16         asm volatile(                                                   \
17         __LSE_PREAMBLE                                                  \
18         "       " #asm_op "     %w[i], %[v]\n"                          \
19         : [v] "+Q" (v->counter)                                         \
20         : [i] "r" (i));                                                 \
21 }
22
23 ATOMIC_OP(andnot, stclr)
24 ATOMIC_OP(or, stset)
25 ATOMIC_OP(xor, steor)
26 ATOMIC_OP(add, stadd)
27
28 static inline void __lse_atomic_sub(int i, atomic_t *v)
29 {
30         __lse_atomic_add(-i, v);
31 }
32
33 #undef ATOMIC_OP
34
35 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                    \
36 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)     \
37 {                                                                       \
38         int old;                                                        \
39                                                                         \
40         asm volatile(                                                   \
41         __LSE_PREAMBLE                                                  \
42         "       " #asm_op #mb " %w[i], %w[old], %[v]"                   \
43         : [v] "+Q" (v->counter),                                        \
44           [old] "=r" (old)                                              \
45         : [i] "r" (i)                                                   \
46         : cl);                                                          \
47                                                                         \
48         return old;                                                     \
49 }
50
51 #define ATOMIC_FETCH_OPS(op, asm_op)                                    \
52         ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)                       \
53         ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")             \
54         ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")             \
55         ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
56
57 ATOMIC_FETCH_OPS(andnot, ldclr)
58 ATOMIC_FETCH_OPS(or, ldset)
59 ATOMIC_FETCH_OPS(xor, ldeor)
60 ATOMIC_FETCH_OPS(add, ldadd)
61
62 #undef ATOMIC_FETCH_OP
63 #undef ATOMIC_FETCH_OPS
64
65 #define ATOMIC_FETCH_OP_SUB(name)                                       \
66 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)      \
67 {                                                                       \
68         return __lse_atomic_fetch_add##name(-i, v);                     \
69 }
70
71 ATOMIC_FETCH_OP_SUB(_relaxed)
72 ATOMIC_FETCH_OP_SUB(_acquire)
73 ATOMIC_FETCH_OP_SUB(_release)
74 ATOMIC_FETCH_OP_SUB(        )
75
76 #undef ATOMIC_FETCH_OP_SUB
77
78 #define ATOMIC_OP_ADD_SUB_RETURN(name)                                  \
79 static inline int __lse_atomic_add_return##name(int i, atomic_t *v)     \
80 {                                                                       \
81         return __lse_atomic_fetch_add##name(i, v) + i;                  \
82 }                                                                       \
83                                                                         \
84 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)     \
85 {                                                                       \
86         return __lse_atomic_fetch_sub(i, v) - i;                        \
87 }
88
89 ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
90 ATOMIC_OP_ADD_SUB_RETURN(_acquire)
91 ATOMIC_OP_ADD_SUB_RETURN(_release)
92 ATOMIC_OP_ADD_SUB_RETURN(        )
93
94 #undef ATOMIC_OP_ADD_SUB_RETURN
95
96 static inline void __lse_atomic_and(int i, atomic_t *v)
97 {
98         return __lse_atomic_andnot(~i, v);
99 }
100
101 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                            \
102 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)      \
103 {                                                                       \
104         return __lse_atomic_fetch_andnot##name(~i, v);                  \
105 }
106
107 ATOMIC_FETCH_OP_AND(_relaxed,   )
108 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
109 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
110 ATOMIC_FETCH_OP_AND(        , al, "memory")
111
112 #undef ATOMIC_FETCH_OP_AND
113
114 #define ATOMIC64_OP(op, asm_op)                                         \
115 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)            \
116 {                                                                       \
117         asm volatile(                                                   \
118         __LSE_PREAMBLE                                                  \
119         "       " #asm_op "     %[i], %[v]\n"                           \
120         : [v] "+Q" (v->counter)                                         \
121         : [i] "r" (i));                                                 \
122 }
123
124 ATOMIC64_OP(andnot, stclr)
125 ATOMIC64_OP(or, stset)
126 ATOMIC64_OP(xor, steor)
127 ATOMIC64_OP(add, stadd)
128
129 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
130 {
131         __lse_atomic64_add(-i, v);
132 }
133
134 #undef ATOMIC64_OP
135
136 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                  \
137 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
138 {                                                                       \
139         s64 old;                                                        \
140                                                                         \
141         asm volatile(                                                   \
142         __LSE_PREAMBLE                                                  \
143         "       " #asm_op #mb " %[i], %[old], %[v]"                     \
144         : [v] "+Q" (v->counter),                                        \
145           [old] "=r" (old)                                              \
146         : [i] "r" (i)                                                   \
147         : cl);                                                          \
148                                                                         \
149         return old;                                                     \
150 }
151
152 #define ATOMIC64_FETCH_OPS(op, asm_op)                                  \
153         ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)                     \
154         ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")           \
155         ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")           \
156         ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
157
158 ATOMIC64_FETCH_OPS(andnot, ldclr)
159 ATOMIC64_FETCH_OPS(or, ldset)
160 ATOMIC64_FETCH_OPS(xor, ldeor)
161 ATOMIC64_FETCH_OPS(add, ldadd)
162
163 #undef ATOMIC64_FETCH_OP
164 #undef ATOMIC64_FETCH_OPS
165
166 #define ATOMIC64_FETCH_OP_SUB(name)                                     \
167 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
168 {                                                                       \
169         return __lse_atomic64_fetch_add##name(-i, v);                   \
170 }
171
172 ATOMIC64_FETCH_OP_SUB(_relaxed)
173 ATOMIC64_FETCH_OP_SUB(_acquire)
174 ATOMIC64_FETCH_OP_SUB(_release)
175 ATOMIC64_FETCH_OP_SUB(        )
176
177 #undef ATOMIC64_FETCH_OP_SUB
178
179 #define ATOMIC64_OP_ADD_SUB_RETURN(name)                                \
180 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
181 {                                                                       \
182         return __lse_atomic64_fetch_add##name(i, v) + i;                \
183 }                                                                       \
184                                                                         \
185 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
186 {                                                                       \
187         return __lse_atomic64_fetch_sub##name(i, v) - i;                \
188 }
189
190 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
191 ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
192 ATOMIC64_OP_ADD_SUB_RETURN(_release)
193 ATOMIC64_OP_ADD_SUB_RETURN(        )
194
195 #undef ATOMIC64_OP_ADD_SUB_RETURN
196
197 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
198 {
199         return __lse_atomic64_andnot(~i, v);
200 }
201
202 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                          \
203 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
204 {                                                                       \
205         return __lse_atomic64_fetch_andnot##name(~i, v);                \
206 }
207
208 ATOMIC64_FETCH_OP_AND(_relaxed,   )
209 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
210 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
211 ATOMIC64_FETCH_OP_AND(        , al, "memory")
212
213 #undef ATOMIC64_FETCH_OP_AND
214
215 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
216 {
217         unsigned long tmp;
218
219         asm volatile(
220         __LSE_PREAMBLE
221         "1:     ldr     %x[tmp], %[v]\n"
222         "       subs    %[ret], %x[tmp], #1\n"
223         "       b.lt    2f\n"
224         "       casal   %x[tmp], %[ret], %[v]\n"
225         "       sub     %x[tmp], %x[tmp], #1\n"
226         "       sub     %x[tmp], %x[tmp], %[ret]\n"
227         "       cbnz    %x[tmp], 1b\n"
228         "2:"
229         : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
230         :
231         : "cc", "memory");
232
233         return (long)v;
234 }
235
236 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)                     \
237 static __always_inline u##sz                                            \
238 __lse__cmpxchg_case_##name##sz(volatile void *ptr,                      \
239                                               u##sz old,                \
240                                               u##sz new)                \
241 {                                                                       \
242         register unsigned long x0 asm ("x0") = (unsigned long)ptr;      \
243         register u##sz x1 asm ("x1") = old;                             \
244         register u##sz x2 asm ("x2") = new;                             \
245         unsigned long tmp;                                              \
246                                                                         \
247         asm volatile(                                                   \
248         __LSE_PREAMBLE                                                  \
249         "       mov     %" #w "[tmp], %" #w "[old]\n"                   \
250         "       cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
251         "       mov     %" #w "[ret], %" #w "[tmp]"                     \
252         : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),            \
253           [tmp] "=&r" (tmp)                                             \
254         : [old] "r" (x1), [new] "r" (x2)                                \
255         : cl);                                                          \
256                                                                         \
257         return x0;                                                      \
258 }
259
260 __CMPXCHG_CASE(w, b,     ,  8,   )
261 __CMPXCHG_CASE(w, h,     , 16,   )
262 __CMPXCHG_CASE(w,  ,     , 32,   )
263 __CMPXCHG_CASE(x,  ,     , 64,   )
264 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
265 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
266 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
267 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
268 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
269 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
270 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
271 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
272 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
273 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
274 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
275 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
276
277 #undef __CMPXCHG_CASE
278
279 #define __CMPXCHG_DBL(name, mb, cl...)                                  \
280 static __always_inline long                                             \
281 __lse__cmpxchg_double##name(unsigned long old1,                         \
282                                          unsigned long old2,            \
283                                          unsigned long new1,            \
284                                          unsigned long new2,            \
285                                          volatile void *ptr)            \
286 {                                                                       \
287         unsigned long oldval1 = old1;                                   \
288         unsigned long oldval2 = old2;                                   \
289         register unsigned long x0 asm ("x0") = old1;                    \
290         register unsigned long x1 asm ("x1") = old2;                    \
291         register unsigned long x2 asm ("x2") = new1;                    \
292         register unsigned long x3 asm ("x3") = new2;                    \
293         register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
294                                                                         \
295         asm volatile(                                                   \
296         __LSE_PREAMBLE                                                  \
297         "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
298         "       eor     %[old1], %[old1], %[oldval1]\n"                 \
299         "       eor     %[old2], %[old2], %[oldval2]\n"                 \
300         "       orr     %[old1], %[old1], %[old2]"                      \
301         : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
302           [v] "+Q" (*(unsigned long *)ptr)                              \
303         : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
304           [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
305         : cl);                                                          \
306                                                                         \
307         return x0;                                                      \
308 }
309
310 __CMPXCHG_DBL(   ,   )
311 __CMPXCHG_DBL(_mb, al, "memory")
312
313 #undef __CMPXCHG_DBL
314
315 #endif  /* __ASM_ATOMIC_LSE_H */