1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
10 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/compiler.h>
15 #if __SIZEOF_LONG__ == 4
18 #define __AMADD "amadd.w "
19 #define __AMAND_DB "amand_db.w "
20 #define __AMOR_DB "amor_db.w "
21 #define __AMXOR_DB "amxor_db.w "
22 #elif __SIZEOF_LONG__ == 8
25 #define __AMADD "amadd.d "
26 #define __AMAND_DB "amand_db.d "
27 #define __AMOR_DB "amor_db.d "
28 #define __AMXOR_DB "amxor_db.d "
31 #define ATOMIC_INIT(i) { (i) }
34 * arch_atomic_read - read atomic variable
35 * @v: pointer of type atomic_t
37 * Atomically reads the value of @v.
39 #define arch_atomic_read(v) READ_ONCE((v)->counter)
42 * arch_atomic_set - set atomic variable
43 * @v: pointer of type atomic_t
46 * Atomically sets the value of @v to @i.
48 #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
50 #define ATOMIC_OP(op, I, asm_op) \
51 static inline void arch_atomic_##op(int i, atomic_t *v) \
53 __asm__ __volatile__( \
54 "am"#asm_op"_db.w" " $zero, %1, %0 \n" \
55 : "+ZB" (v->counter) \
60 #define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
61 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
65 __asm__ __volatile__( \
66 "am"#asm_op"_db.w" " %1, %2, %0 \n" \
67 : "+ZB" (v->counter), "=&r" (result) \
71 return result c_op I; \
74 #define ATOMIC_FETCH_OP(op, I, asm_op) \
75 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
79 __asm__ __volatile__( \
80 "am"#asm_op"_db.w" " %1, %2, %0 \n" \
81 : "+ZB" (v->counter), "=&r" (result) \
88 #define ATOMIC_OPS(op, I, asm_op, c_op) \
89 ATOMIC_OP(op, I, asm_op) \
90 ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
91 ATOMIC_FETCH_OP(op, I, asm_op)
93 ATOMIC_OPS(add, i, add, +)
94 ATOMIC_OPS(sub, -i, add, +)
96 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
97 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
98 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
99 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
103 #define ATOMIC_OPS(op, I, asm_op) \
104 ATOMIC_OP(op, I, asm_op) \
105 ATOMIC_FETCH_OP(op, I, asm_op)
107 ATOMIC_OPS(and, i, and)
108 ATOMIC_OPS(or, i, or)
109 ATOMIC_OPS(xor, i, xor)
111 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
112 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
113 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
116 #undef ATOMIC_FETCH_OP
117 #undef ATOMIC_OP_RETURN
120 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
124 __asm__ __volatile__ (
125 "0: ll.w %[p], %[c]\n"
126 " beq %[p], %[u], 1f\n"
127 " add.w %[rc], %[p], %[a]\n"
128 " sc.w %[rc], %[c]\n"
134 : [p]"=&r" (prev), [rc]"=&r" (rc),
135 [c]"=ZB" (v->counter)
136 : [a]"r" (a), [u]"r" (u)
141 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
144 * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
145 * @i: integer value to subtract
146 * @v: pointer of type atomic_t
148 * Atomically test @v and subtract @i if @v is greater or equal than @i.
149 * The function returns the old value of @v minus @i.
151 static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
156 if (__builtin_constant_p(i)) {
157 __asm__ __volatile__(
158 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
159 " addi.w %0, %1, %3 \n"
160 " or %1, %0, $zero \n"
161 " blt %0, $zero, 2f \n"
163 " beq $zero, %1, 1b \n"
166 : "=&r" (result), "=&r" (temp),
167 "+" GCC_OFF_SMALL_ASM() (v->counter)
170 __asm__ __volatile__(
171 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
172 " sub.w %0, %1, %3 \n"
173 " or %1, %0, $zero \n"
174 " blt %0, $zero, 2f \n"
176 " beq $zero, %1, 1b \n"
179 : "=&r" (result), "=&r" (temp),
180 "+" GCC_OFF_SMALL_ASM() (v->counter)
187 #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
188 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
191 * arch_atomic_dec_if_positive - decrement by 1 if old value positive
192 * @v: pointer of type atomic_t
194 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
198 #define ATOMIC64_INIT(i) { (i) }
201 * arch_atomic64_read - read atomic variable
202 * @v: pointer of type atomic64_t
205 #define arch_atomic64_read(v) READ_ONCE((v)->counter)
208 * arch_atomic64_set - set atomic variable
209 * @v: pointer of type atomic64_t
212 #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
214 #define ATOMIC64_OP(op, I, asm_op) \
215 static inline void arch_atomic64_##op(long i, atomic64_t *v) \
217 __asm__ __volatile__( \
218 "am"#asm_op"_db.d " " $zero, %1, %0 \n" \
219 : "+ZB" (v->counter) \
224 #define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
225 static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
228 __asm__ __volatile__( \
229 "am"#asm_op"_db.d " " %1, %2, %0 \n" \
230 : "+ZB" (v->counter), "=&r" (result) \
234 return result c_op I; \
237 #define ATOMIC64_FETCH_OP(op, I, asm_op) \
238 static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
242 __asm__ __volatile__( \
243 "am"#asm_op"_db.d " " %1, %2, %0 \n" \
244 : "+ZB" (v->counter), "=&r" (result) \
251 #define ATOMIC64_OPS(op, I, asm_op, c_op) \
252 ATOMIC64_OP(op, I, asm_op) \
253 ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
254 ATOMIC64_FETCH_OP(op, I, asm_op)
256 ATOMIC64_OPS(add, i, add, +)
257 ATOMIC64_OPS(sub, -i, add, +)
259 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
260 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
261 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
262 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
266 #define ATOMIC64_OPS(op, I, asm_op) \
267 ATOMIC64_OP(op, I, asm_op) \
268 ATOMIC64_FETCH_OP(op, I, asm_op)
270 ATOMIC64_OPS(and, i, and)
271 ATOMIC64_OPS(or, i, or)
272 ATOMIC64_OPS(xor, i, xor)
274 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
275 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
276 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
279 #undef ATOMIC64_FETCH_OP
280 #undef ATOMIC64_OP_RETURN
283 static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
287 __asm__ __volatile__ (
288 "0: ll.d %[p], %[c]\n"
289 " beq %[p], %[u], 1f\n"
290 " add.d %[rc], %[p], %[a]\n"
291 " sc.d %[rc], %[c]\n"
297 : [p]"=&r" (prev), [rc]"=&r" (rc),
298 [c] "=ZB" (v->counter)
299 : [a]"r" (a), [u]"r" (u)
304 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
307 * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
308 * @i: integer value to subtract
309 * @v: pointer of type atomic64_t
311 * Atomically test @v and subtract @i if @v is greater or equal than @i.
312 * The function returns the old value of @v minus @i.
314 static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
319 if (__builtin_constant_p(i)) {
320 __asm__ __volatile__(
321 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
322 " addi.d %0, %1, %3 \n"
323 " or %1, %0, $zero \n"
324 " blt %0, $zero, 2f \n"
326 " beq %1, $zero, 1b \n"
329 : "=&r" (result), "=&r" (temp),
330 "+" GCC_OFF_SMALL_ASM() (v->counter)
333 __asm__ __volatile__(
334 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
335 " sub.d %0, %1, %3 \n"
336 " or %1, %0, $zero \n"
337 " blt %0, $zero, 2f \n"
339 " beq %1, $zero, 1b \n"
342 : "=&r" (result), "=&r" (temp),
343 "+" GCC_OFF_SMALL_ASM() (v->counter)
350 #define arch_atomic64_cmpxchg(v, o, n) \
351 ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
352 #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
355 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
356 * @v: pointer of type atomic64_t
358 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
360 #endif /* CONFIG_64BIT */
362 #endif /* _ASM_ATOMIC_H */