2 * Based on arch/arm/include/asm/cmpxchg.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
21 #include <linux/build_bug.h>
22 #include <linux/compiler.h>
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
29 * We need separate acquire parameters for ll/sc and lse, since the full
30 * barrier case is generated as release+dmb for the former and
31 * acquire+release for the latter.
33 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
34 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
39 asm volatile(ARM64_LSE_ATOMIC_INSN( \
41 " prfm pstl1strm, %2\n" \
42 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
43 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
47 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
50 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
57 __XCHG_CASE(w, b, , 8, , , , , , )
58 __XCHG_CASE(w, h, , 16, , , , , , )
59 __XCHG_CASE(w, , , 32, , , , , , )
60 __XCHG_CASE( , , , 64, , , , , , )
61 __XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
62 __XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
63 __XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
64 __XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
65 __XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
66 __XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
67 __XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
68 __XCHG_CASE( , , rel_, 64, , , , , l, "memory")
69 __XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
70 __XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
71 __XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
72 __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
76 #define __XCHG_GEN(sfx) \
77 static inline unsigned long __xchg##sfx(unsigned long x, \
83 return __xchg_case##sfx##_8(x, ptr); \
85 return __xchg_case##sfx##_16(x, ptr); \
87 return __xchg_case##sfx##_32(x, ptr); \
89 return __xchg_case##sfx##_64(x, ptr); \
104 #define __xchg_wrapper(sfx, ptr, x) \
106 __typeof__(*(ptr)) __ret; \
107 __ret = (__typeof__(*(ptr))) \
108 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
113 #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
114 #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
115 #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
116 #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
118 #define __CMPXCHG_GEN(sfx) \
119 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
126 return __cmpxchg_case##sfx##_8(ptr, old, new); \
128 return __cmpxchg_case##sfx##_16(ptr, old, new); \
130 return __cmpxchg_case##sfx##_32(ptr, old, new); \
132 return __cmpxchg_case##sfx##_64(ptr, old, new); \
147 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
149 __typeof__(*(ptr)) __ret; \
150 __ret = (__typeof__(*(ptr))) \
151 __cmpxchg##sfx((ptr), (unsigned long)(o), \
152 (unsigned long)(n), sizeof(*(ptr))); \
157 #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
158 #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
159 #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
160 #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
161 #define arch_cmpxchg_local arch_cmpxchg_relaxed
164 #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
165 #define arch_cmpxchg64_acquire arch_cmpxchg_acquire
166 #define arch_cmpxchg64_release arch_cmpxchg_release
167 #define arch_cmpxchg64 arch_cmpxchg
168 #define arch_cmpxchg64_local arch_cmpxchg_local
171 #define system_has_cmpxchg_double() 1
173 #define __cmpxchg_double_check(ptr1, ptr2) \
175 if (sizeof(*(ptr1)) != 8) \
177 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
180 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
183 __cmpxchg_double_check(ptr1, ptr2); \
184 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
185 (unsigned long)(n1), (unsigned long)(n2), \
190 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
193 __cmpxchg_double_check(ptr1, ptr2); \
194 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
195 (unsigned long)(n1), (unsigned long)(n2), \
200 #define __CMPWAIT_CASE(w, sfx, sz) \
201 static inline void __cmpwait_case_##sz(volatile void *ptr, \
209 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
210 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
211 " cbnz %" #w "[tmp], 1f\n" \
214 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
215 : [val] "r" (val)); \
218 __CMPWAIT_CASE(w, b, 8);
219 __CMPWAIT_CASE(w, h, 16);
220 __CMPWAIT_CASE(w, , 32);
221 __CMPWAIT_CASE( , , 64);
223 #undef __CMPWAIT_CASE
225 #define __CMPWAIT_GEN(sfx) \
226 static inline void __cmpwait##sfx(volatile void *ptr, \
232 return __cmpwait_case##sfx##_8(ptr, (u8)val); \
234 return __cmpwait_case##sfx##_16(ptr, (u16)val); \
236 return __cmpwait_case##sfx##_32(ptr, val); \
238 return __cmpwait_case##sfx##_64(ptr, val); \
250 #define __cmpwait_relaxed(ptr, val) \
251 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
253 #endif /* __ASM_CMPXCHG_H */