2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cmpxchg.h>
27 #define ATOMIC_OPS(pfx, type) \
28 static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
30 return READ_ONCE(v->counter); \
33 static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
35 WRITE_ONCE(v->counter, i); \
38 static __always_inline type \
39 arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
41 return arch_cmpxchg(&v->counter, o, n); \
44 static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
46 return arch_xchg(&v->counter, n); \
49 ATOMIC_OPS(atomic, int)
52 # define ATOMIC64_INIT(i) { (i) }
53 ATOMIC_OPS(atomic64, s64)
56 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
57 static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
61 if (!kernel_uses_llsc) { \
62 unsigned long flags; \
64 raw_local_irq_save(flags); \
66 raw_local_irq_restore(flags); \
70 __asm__ __volatile__( \
72 " .set " MIPS_ISA_LEVEL " \n" \
73 " " __SYNC(full, loongson3_war) " \n" \
74 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
75 " " #asm_op " %0, %2 \n" \
76 " " #sc " %0, %1 \n" \
77 "\t" __SC_BEQZ "%0, 1b \n" \
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
80 : "Ir" (i) : __LLSC_CLOBBER); \
83 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
84 static __inline__ type \
85 arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
89 if (!kernel_uses_llsc) { \
90 unsigned long flags; \
92 raw_local_irq_save(flags); \
93 result = v->counter; \
95 v->counter = result; \
96 raw_local_irq_restore(flags); \
100 __asm__ __volatile__( \
102 " .set " MIPS_ISA_LEVEL " \n" \
103 " " __SYNC(full, loongson3_war) " \n" \
104 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
105 " " #asm_op " %0, %1, %3 \n" \
106 " " #sc " %0, %2 \n" \
107 "\t" __SC_BEQZ "%0, 1b \n" \
108 " " #asm_op " %0, %1, %3 \n" \
110 : "=&r" (result), "=&r" (temp), \
111 "+" GCC_OFF_SMALL_ASM() (v->counter) \
112 : "Ir" (i) : __LLSC_CLOBBER); \
117 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
118 static __inline__ type \
119 arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
123 if (!kernel_uses_llsc) { \
124 unsigned long flags; \
126 raw_local_irq_save(flags); \
127 result = v->counter; \
129 raw_local_irq_restore(flags); \
133 __asm__ __volatile__( \
135 " .set " MIPS_ISA_LEVEL " \n" \
136 " " __SYNC(full, loongson3_war) " \n" \
137 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
138 " " #asm_op " %0, %1, %3 \n" \
139 " " #sc " %0, %2 \n" \
140 "\t" __SC_BEQZ "%0, 1b \n" \
143 : "=&r" (result), "=&r" (temp), \
144 "+" GCC_OFF_SMALL_ASM() (v->counter) \
145 : "Ir" (i) : __LLSC_CLOBBER); \
151 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
154 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
156 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
157 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
159 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
160 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
161 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
162 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
165 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
166 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
167 # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
168 # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
169 # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
170 # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
171 #endif /* CONFIG_64BIT */
174 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
176 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
178 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
179 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
180 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
182 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
183 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
184 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
187 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
188 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
189 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
190 # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191 # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
192 # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
196 #undef ATOMIC_FETCH_OP
197 #undef ATOMIC_OP_RETURN
201 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
202 * @i: integer value to subtract
203 * @v: pointer of type atomic_t
205 * Atomically test @v and subtract @i if @v is greater or equal than @i.
206 * The function returns the old value of @v minus @i.
208 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
209 static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
213 smp_mb__before_atomic(); \
215 if (!kernel_uses_llsc) { \
216 unsigned long flags; \
218 raw_local_irq_save(flags); \
219 result = v->counter; \
222 v->counter = result; \
223 raw_local_irq_restore(flags); \
224 smp_mb__after_atomic(); \
228 __asm__ __volatile__( \
230 " .set " MIPS_ISA_LEVEL " \n" \
231 " " __SYNC(full, loongson3_war) " \n" \
232 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
234 " " #op " %0, %1, %3 \n" \
238 " .set " MIPS_ISA_LEVEL " \n" \
239 " " #sc " %1, %2 \n" \
240 " " __SC_BEQZ "%1, 1b \n" \
241 "2: " __SYNC(full, loongson3_war) " \n" \
243 : "=&r" (result), "=&r" (temp), \
244 "+" GCC_OFF_SMALL_ASM() (v->counter) \
249 * In the Loongson3 workaround case we already have a \
250 * completion barrier at 2: above, which is needed due to the \
251 * bltz that can branch to code outside of the LL/SC loop. As \
252 * such, we don't need to emit another barrier here. \
254 if (__SYNC_loongson3_war == 0) \
255 smp_mb__after_atomic(); \
260 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
261 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
264 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
265 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
270 #endif /* _ASM_ATOMIC_H */