Merge tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / cmpxchg.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/cmpxchg.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
9
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
12
13 #include <asm/barrier.h>
14 #include <asm/lse.h>
15
16 /*
17  * We need separate acquire parameters for ll/sc and lse, since the full
18  * barrier case is generated as release+dmb for the former and
19  * acquire+release for the latter.
20  */
21 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)       \
22 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr)         \
23 {                                                                               \
24         u##sz ret;                                                              \
25         unsigned long tmp;                                                      \
26                                                                                 \
27         asm volatile(ARM64_LSE_ATOMIC_INSN(                                     \
28         /* LL/SC */                                                             \
29         "       prfm    pstl1strm, %2\n"                                        \
30         "1:     ld" #acq "xr" #sfx "\t%" #w "0, %2\n"                           \
31         "       st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n"                      \
32         "       cbnz    %w1, 1b\n"                                              \
33         "       " #mb,                                                          \
34         /* LSE atomics */                                                       \
35         "       swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n"            \
36                 __nops(3)                                                       \
37         "       " #nop_lse)                                                     \
38         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr)                        \
39         : "r" (x)                                                               \
40         : cl);                                                                  \
41                                                                                 \
42         return ret;                                                             \
43 }
44
45 __XCHG_CASE(w, b,     ,  8,        ,    ,  ,  ,  ,         )
46 __XCHG_CASE(w, h,     , 16,        ,    ,  ,  ,  ,         )
47 __XCHG_CASE(w,  ,     , 32,        ,    ,  ,  ,  ,         )
48 __XCHG_CASE( ,  ,     , 64,        ,    ,  ,  ,  ,         )
49 __XCHG_CASE(w, b, acq_,  8,        ,    , a, a,  , "memory")
50 __XCHG_CASE(w, h, acq_, 16,        ,    , a, a,  , "memory")
51 __XCHG_CASE(w,  , acq_, 32,        ,    , a, a,  , "memory")
52 __XCHG_CASE( ,  , acq_, 64,        ,    , a, a,  , "memory")
53 __XCHG_CASE(w, b, rel_,  8,        ,    ,  ,  , l, "memory")
54 __XCHG_CASE(w, h, rel_, 16,        ,    ,  ,  , l, "memory")
55 __XCHG_CASE(w,  , rel_, 32,        ,    ,  ,  , l, "memory")
56 __XCHG_CASE( ,  , rel_, 64,        ,    ,  ,  , l, "memory")
57 __XCHG_CASE(w, b,  mb_,  8, dmb ish, nop,  , a, l, "memory")
58 __XCHG_CASE(w, h,  mb_, 16, dmb ish, nop,  , a, l, "memory")
59 __XCHG_CASE(w,  ,  mb_, 32, dmb ish, nop,  , a, l, "memory")
60 __XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
61
62 #undef __XCHG_CASE
63
64 #define __XCHG_GEN(sfx)                                                 \
65 static __always_inline  unsigned long __xchg##sfx(unsigned long x,      \
66                                         volatile void *ptr,             \
67                                         int size)                       \
68 {                                                                       \
69         switch (size) {                                                 \
70         case 1:                                                         \
71                 return __xchg_case##sfx##_8(x, ptr);                    \
72         case 2:                                                         \
73                 return __xchg_case##sfx##_16(x, ptr);                   \
74         case 4:                                                         \
75                 return __xchg_case##sfx##_32(x, ptr);                   \
76         case 8:                                                         \
77                 return __xchg_case##sfx##_64(x, ptr);                   \
78         default:                                                        \
79                 BUILD_BUG();                                            \
80         }                                                               \
81                                                                         \
82         unreachable();                                                  \
83 }
84
85 __XCHG_GEN()
86 __XCHG_GEN(_acq)
87 __XCHG_GEN(_rel)
88 __XCHG_GEN(_mb)
89
90 #undef __XCHG_GEN
91
92 #define __xchg_wrapper(sfx, ptr, x)                                     \
93 ({                                                                      \
94         __typeof__(*(ptr)) __ret;                                       \
95         __ret = (__typeof__(*(ptr)))                                    \
96                 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
97         __ret;                                                          \
98 })
99
100 /* xchg */
101 #define arch_xchg_relaxed(...)  __xchg_wrapper(    , __VA_ARGS__)
102 #define arch_xchg_acquire(...)  __xchg_wrapper(_acq, __VA_ARGS__)
103 #define arch_xchg_release(...)  __xchg_wrapper(_rel, __VA_ARGS__)
104 #define arch_xchg(...)          __xchg_wrapper( _mb, __VA_ARGS__)
105
106 #define __CMPXCHG_CASE(name, sz)                        \
107 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,       \
108                                               u##sz old,                \
109                                               u##sz new)                \
110 {                                                                       \
111         return __lse_ll_sc_body(_cmpxchg_case_##name##sz,               \
112                                 ptr, old, new);                         \
113 }
114
115 __CMPXCHG_CASE(    ,  8)
116 __CMPXCHG_CASE(    , 16)
117 __CMPXCHG_CASE(    , 32)
118 __CMPXCHG_CASE(    , 64)
119 __CMPXCHG_CASE(acq_,  8)
120 __CMPXCHG_CASE(acq_, 16)
121 __CMPXCHG_CASE(acq_, 32)
122 __CMPXCHG_CASE(acq_, 64)
123 __CMPXCHG_CASE(rel_,  8)
124 __CMPXCHG_CASE(rel_, 16)
125 __CMPXCHG_CASE(rel_, 32)
126 __CMPXCHG_CASE(rel_, 64)
127 __CMPXCHG_CASE(mb_,  8)
128 __CMPXCHG_CASE(mb_, 16)
129 __CMPXCHG_CASE(mb_, 32)
130 __CMPXCHG_CASE(mb_, 64)
131
132 #undef __CMPXCHG_CASE
133
134 #define __CMPXCHG_DBL(name)                                             \
135 static inline long __cmpxchg_double##name(unsigned long old1,           \
136                                          unsigned long old2,            \
137                                          unsigned long new1,            \
138                                          unsigned long new2,            \
139                                          volatile void *ptr)            \
140 {                                                                       \
141         return __lse_ll_sc_body(_cmpxchg_double##name,                  \
142                                 old1, old2, new1, new2, ptr);           \
143 }
144
145 __CMPXCHG_DBL(   )
146 __CMPXCHG_DBL(_mb)
147
148 #undef __CMPXCHG_DBL
149
150 #define __CMPXCHG_GEN(sfx)                                              \
151 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
152                                            unsigned long old,           \
153                                            unsigned long new,           \
154                                            int size)                    \
155 {                                                                       \
156         switch (size) {                                                 \
157         case 1:                                                         \
158                 return __cmpxchg_case##sfx##_8(ptr, old, new);          \
159         case 2:                                                         \
160                 return __cmpxchg_case##sfx##_16(ptr, old, new);         \
161         case 4:                                                         \
162                 return __cmpxchg_case##sfx##_32(ptr, old, new);         \
163         case 8:                                                         \
164                 return __cmpxchg_case##sfx##_64(ptr, old, new);         \
165         default:                                                        \
166                 BUILD_BUG();                                            \
167         }                                                               \
168                                                                         \
169         unreachable();                                                  \
170 }
171
172 __CMPXCHG_GEN()
173 __CMPXCHG_GEN(_acq)
174 __CMPXCHG_GEN(_rel)
175 __CMPXCHG_GEN(_mb)
176
177 #undef __CMPXCHG_GEN
178
179 #define __cmpxchg_wrapper(sfx, ptr, o, n)                               \
180 ({                                                                      \
181         __typeof__(*(ptr)) __ret;                                       \
182         __ret = (__typeof__(*(ptr)))                                    \
183                 __cmpxchg##sfx((ptr), (unsigned long)(o),               \
184                                 (unsigned long)(n), sizeof(*(ptr)));    \
185         __ret;                                                          \
186 })
187
188 /* cmpxchg */
189 #define arch_cmpxchg_relaxed(...)       __cmpxchg_wrapper(    , __VA_ARGS__)
190 #define arch_cmpxchg_acquire(...)       __cmpxchg_wrapper(_acq, __VA_ARGS__)
191 #define arch_cmpxchg_release(...)       __cmpxchg_wrapper(_rel, __VA_ARGS__)
192 #define arch_cmpxchg(...)               __cmpxchg_wrapper( _mb, __VA_ARGS__)
193 #define arch_cmpxchg_local              arch_cmpxchg_relaxed
194
195 /* cmpxchg64 */
196 #define arch_cmpxchg64_relaxed          arch_cmpxchg_relaxed
197 #define arch_cmpxchg64_acquire          arch_cmpxchg_acquire
198 #define arch_cmpxchg64_release          arch_cmpxchg_release
199 #define arch_cmpxchg64                  arch_cmpxchg
200 #define arch_cmpxchg64_local            arch_cmpxchg_local
201
202 /* cmpxchg_double */
203 #define system_has_cmpxchg_double()     1
204
205 #define __cmpxchg_double_check(ptr1, ptr2)                                      \
206 ({                                                                              \
207         if (sizeof(*(ptr1)) != 8)                                               \
208                 BUILD_BUG();                                                    \
209         VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
210 })
211
212 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                         \
213 ({                                                                              \
214         int __ret;                                                              \
215         __cmpxchg_double_check(ptr1, ptr2);                                     \
216         __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
217                                      (unsigned long)(n1), (unsigned long)(n2),  \
218                                      ptr1);                                     \
219         __ret;                                                                  \
220 })
221
222 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                   \
223 ({                                                                              \
224         int __ret;                                                              \
225         __cmpxchg_double_check(ptr1, ptr2);                                     \
226         __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
227                                   (unsigned long)(n1), (unsigned long)(n2),     \
228                                   ptr1);                                        \
229         __ret;                                                                  \
230 })
231
232 #define __CMPWAIT_CASE(w, sfx, sz)                                      \
233 static inline void __cmpwait_case_##sz(volatile void *ptr,              \
234                                        unsigned long val)               \
235 {                                                                       \
236         unsigned long tmp;                                              \
237                                                                         \
238         asm volatile(                                                   \
239         "       sevl\n"                                                 \
240         "       wfe\n"                                                  \
241         "       ldxr" #sfx "\t%" #w "[tmp], %[v]\n"                     \
242         "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
243         "       cbnz    %" #w "[tmp], 1f\n"                             \
244         "       wfe\n"                                                  \
245         "1:"                                                            \
246         : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr)           \
247         : [val] "r" (val));                                             \
248 }
249
250 __CMPWAIT_CASE(w, b, 8);
251 __CMPWAIT_CASE(w, h, 16);
252 __CMPWAIT_CASE(w,  , 32);
253 __CMPWAIT_CASE( ,  , 64);
254
255 #undef __CMPWAIT_CASE
256
257 #define __CMPWAIT_GEN(sfx)                                              \
258 static __always_inline void __cmpwait##sfx(volatile void *ptr,          \
259                                   unsigned long val,                    \
260                                   int size)                             \
261 {                                                                       \
262         switch (size) {                                                 \
263         case 1:                                                         \
264                 return __cmpwait_case##sfx##_8(ptr, (u8)val);           \
265         case 2:                                                         \
266                 return __cmpwait_case##sfx##_16(ptr, (u16)val);         \
267         case 4:                                                         \
268                 return __cmpwait_case##sfx##_32(ptr, val);              \
269         case 8:                                                         \
270                 return __cmpwait_case##sfx##_64(ptr, val);              \
271         default:                                                        \
272                 BUILD_BUG();                                            \
273         }                                                               \
274                                                                         \
275         unreachable();                                                  \
276 }
277
278 __CMPWAIT_GEN()
279
280 #undef __CMPWAIT_GEN
281
282 #define __cmpwait_relaxed(ptr, val) \
283         __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
284
285 #endif  /* __ASM_CMPXCHG_H */