1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Low level function for atomic operations
5 * Copyright IBM Corp. 1999, 2016
8 #ifndef __ARCH_S390_ATOMIC_OPS__
9 #define __ARCH_S390_ATOMIC_OPS__
11 #include <linux/limits.h>
12 #include <asm/march.h>
14 static __always_inline int __atomic_read(const atomic_t *v)
19 " l %[c],%[counter]\n"
20 : [c] "=d" (c) : [counter] "R" (v->counter));
24 static __always_inline void __atomic_set(atomic_t *v, int i)
26 if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
28 " mvhi %[counter], %[i]\n"
29 : [counter] "=Q" (v->counter) : [i] "K" (i));
32 " st %[i],%[counter]\n"
33 : [counter] "=R" (v->counter) : [i] "d" (i));
37 static __always_inline s64 __atomic64_read(const atomic64_t *v)
42 " lg %[c],%[counter]\n"
43 : [c] "=d" (c) : [counter] "RT" (v->counter));
47 static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
49 if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
51 " mvghi %[counter], %[i]\n"
52 : [counter] "=Q" (v->counter) : [i] "K" (i));
55 " stg %[i],%[counter]\n"
56 : [counter] "=RT" (v->counter) : [i] "d" (i));
60 #ifdef MARCH_HAS_Z196_FEATURES
62 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
63 static __always_inline op_type op_name(op_type val, op_type *ptr) \
68 op_string " %[old],%[val],%[ptr]\n" \
70 : [old] "=d" (old), [ptr] "+QS" (*ptr) \
71 : [val] "d" (val) : "cc", "memory"); \
75 #define __ATOMIC_OPS(op_name, op_type, op_string) \
76 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
77 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
79 __ATOMIC_OPS(__atomic_add, int, "laa")
80 __ATOMIC_OPS(__atomic_and, int, "lan")
81 __ATOMIC_OPS(__atomic_or, int, "lao")
82 __ATOMIC_OPS(__atomic_xor, int, "lax")
84 __ATOMIC_OPS(__atomic64_add, long, "laag")
85 __ATOMIC_OPS(__atomic64_and, long, "lang")
86 __ATOMIC_OPS(__atomic64_or, long, "laog")
87 __ATOMIC_OPS(__atomic64_xor, long, "laxg")
92 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
93 static __always_inline void op_name(op_type val, op_type *ptr) \
96 op_string " %[ptr],%[val]\n" \
98 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
101 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
102 __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
103 __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
105 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
106 __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
108 #undef __ATOMIC_CONST_OPS
109 #undef __ATOMIC_CONST_OP
111 #else /* MARCH_HAS_Z196_FEATURES */
113 #define __ATOMIC_OP(op_name, op_string) \
114 static __always_inline int op_name(int val, int *ptr) \
119 "0: lr %[new],%[old]\n" \
120 op_string " %[new],%[val]\n" \
121 " cs %[old],%[new],%[ptr]\n" \
123 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
124 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
128 #define __ATOMIC_OPS(op_name, op_string) \
129 __ATOMIC_OP(op_name, op_string) \
130 __ATOMIC_OP(op_name##_barrier, op_string)
132 __ATOMIC_OPS(__atomic_add, "ar")
133 __ATOMIC_OPS(__atomic_and, "nr")
134 __ATOMIC_OPS(__atomic_or, "or")
135 __ATOMIC_OPS(__atomic_xor, "xr")
139 #define __ATOMIC64_OP(op_name, op_string) \
140 static __always_inline long op_name(long val, long *ptr) \
145 "0: lgr %[new],%[old]\n" \
146 op_string " %[new],%[val]\n" \
147 " csg %[old],%[new],%[ptr]\n" \
149 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
150 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
154 #define __ATOMIC64_OPS(op_name, op_string) \
155 __ATOMIC64_OP(op_name, op_string) \
156 __ATOMIC64_OP(op_name##_barrier, op_string)
158 __ATOMIC64_OPS(__atomic64_add, "agr")
159 __ATOMIC64_OPS(__atomic64_and, "ngr")
160 __ATOMIC64_OPS(__atomic64_or, "ogr")
161 __ATOMIC64_OPS(__atomic64_xor, "xgr")
163 #undef __ATOMIC64_OPS
165 #define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
166 #define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
167 #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
168 #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
170 #endif /* MARCH_HAS_Z196_FEATURES */
172 static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
175 " cs %[old],%[new],%[ptr]"
176 : [old] "+d" (old), [ptr] "+Q" (*ptr)
182 static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
185 " csg %[old],%[new],%[ptr]"
186 : [old] "+d" (old), [ptr] "+QS" (*ptr)
192 /* GCC versions before 14.2.0 may die with an ICE in some configurations. */
193 #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
195 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
200 " cs %[old],%[new],%[ptr]"
201 : [old] "+d" (old), [ptr] "+Q" (*ptr), "=@cc" (cc)
207 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
212 " csg %[old],%[new],%[ptr]"
213 : [old] "+d" (old), [ptr] "+QS" (*ptr), "=@cc" (cc)
219 #else /* __GCC_ASM_FLAG_OUTPUTS__ */
221 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
223 int old_expected = old;
226 " cs %[old],%[new],%[ptr]"
227 : [old] "+d" (old), [ptr] "+Q" (*ptr)
230 return old == old_expected;
233 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
235 long old_expected = old;
238 " csg %[old],%[new],%[ptr]"
239 : [old] "+d" (old), [ptr] "+QS" (*ptr)
242 return old == old_expected;
245 #endif /* __GCC_ASM_FLAG_OUTPUTS__ */
247 #endif /* __ARCH_S390_ATOMIC_OPS__ */