2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
20 #include <asm/barrier.h>
21 #include <asm/compiler.h>
22 #include <asm/cpu-features.h>
23 #include <asm/cmpxchg.h>
26 #define ATOMIC_OPS(pfx, type) \
27 static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
29 return READ_ONCE(v->counter); \
32 static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
34 WRITE_ONCE(v->counter, i); \
37 static __always_inline type \
38 arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
40 return arch_cmpxchg(&v->counter, o, n); \
43 static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
45 return arch_xchg(&v->counter, n); \
48 ATOMIC_OPS(atomic, int)
51 # define ATOMIC64_INIT(i) { (i) }
52 ATOMIC_OPS(atomic64, s64)
55 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
56 static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
60 if (!kernel_uses_llsc) { \
61 unsigned long flags; \
63 raw_local_irq_save(flags); \
65 raw_local_irq_restore(flags); \
69 __asm__ __volatile__( \
71 " .set " MIPS_ISA_LEVEL " \n" \
72 " " __SYNC(full, loongson3_war) " \n" \
73 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
74 " " #asm_op " %0, %2 \n" \
75 " " #sc " %0, %1 \n" \
76 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
78 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
79 : "Ir" (i) : __LLSC_CLOBBER); \
82 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
83 static __inline__ type \
84 arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
88 if (!kernel_uses_llsc) { \
89 unsigned long flags; \
91 raw_local_irq_save(flags); \
92 result = v->counter; \
94 v->counter = result; \
95 raw_local_irq_restore(flags); \
99 __asm__ __volatile__( \
101 " .set " MIPS_ISA_LEVEL " \n" \
102 " " __SYNC(full, loongson3_war) " \n" \
103 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
104 " " #asm_op " %0, %1, %3 \n" \
105 " " #sc " %0, %2 \n" \
106 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
107 " " #asm_op " %0, %1, %3 \n" \
109 : "=&r" (result), "=&r" (temp), \
110 "+" GCC_OFF_SMALL_ASM() (v->counter) \
111 : "Ir" (i) : __LLSC_CLOBBER); \
116 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
117 static __inline__ type \
118 arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
122 if (!kernel_uses_llsc) { \
123 unsigned long flags; \
125 raw_local_irq_save(flags); \
126 result = v->counter; \
128 raw_local_irq_restore(flags); \
132 __asm__ __volatile__( \
134 " .set " MIPS_ISA_LEVEL " \n" \
135 " " __SYNC(full, loongson3_war) " \n" \
136 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
137 " " #asm_op " %0, %1, %3 \n" \
138 " " #sc " %0, %2 \n" \
139 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
142 : "=&r" (result), "=&r" (temp), \
143 "+" GCC_OFF_SMALL_ASM() (v->counter) \
144 : "Ir" (i) : __LLSC_CLOBBER); \
150 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
151 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
155 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
156 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
158 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
159 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
160 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
161 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
164 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
165 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
166 # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
167 # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
168 # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
169 # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
170 #endif /* CONFIG_64BIT */
173 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
174 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
177 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
178 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
179 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
181 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
182 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
183 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
186 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
187 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
188 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
189 # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
190 # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
191 # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
195 #undef ATOMIC_FETCH_OP
196 #undef ATOMIC_OP_RETURN
200 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
201 * @i: integer value to subtract
202 * @v: pointer of type atomic_t
204 * Atomically test @v and subtract @i if @v is greater or equal than @i.
205 * The function returns the old value of @v minus @i.
207 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
208 static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
212 smp_mb__before_atomic(); \
214 if (!kernel_uses_llsc) { \
215 unsigned long flags; \
217 raw_local_irq_save(flags); \
218 result = v->counter; \
221 v->counter = result; \
222 raw_local_irq_restore(flags); \
223 smp_mb__after_atomic(); \
227 __asm__ __volatile__( \
229 " .set " MIPS_ISA_LEVEL " \n" \
230 " " __SYNC(full, loongson3_war) " \n" \
231 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
233 " " #op " %0, %1, %3 \n" \
237 " .set " MIPS_ISA_LEVEL " \n" \
238 " " #sc " %1, %2 \n" \
239 " " __stringify(SC_BEQZ) " %1, 1b \n" \
240 "2: " __SYNC(full, loongson3_war) " \n" \
242 : "=&r" (result), "=&r" (temp), \
243 "+" GCC_OFF_SMALL_ASM() (v->counter) \
248 * In the Loongson3 workaround case we already have a \
249 * completion barrier at 2: above, which is needed due to the \
250 * bltz that can branch to code outside of the LL/SC loop. As \
251 * such, we don't need to emit another barrier here. \
253 if (__SYNC_loongson3_war == 0) \
254 smp_mb__after_atomic(); \
259 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
260 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
263 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
264 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
269 #endif /* _ASM_ATOMIC_H */