2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
20 #include <asm/barrier.h>
21 #include <asm/compiler.h>
22 #include <asm/cpu-features.h>
23 #include <asm/cmpxchg.h>
26 #define ATOMIC_OPS(pfx, type) \
27 static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
29 return READ_ONCE(v->counter); \
32 static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
34 WRITE_ONCE(v->counter, i); \
37 ATOMIC_OPS(atomic, int)
40 # define ATOMIC64_INIT(i) { (i) }
41 ATOMIC_OPS(atomic64, s64)
44 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
45 static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
49 if (!kernel_uses_llsc) { \
50 unsigned long flags; \
52 raw_local_irq_save(flags); \
54 raw_local_irq_restore(flags); \
58 __asm__ __volatile__( \
60 " .set " MIPS_ISA_LEVEL " \n" \
61 " " __SYNC(full, loongson3_war) " \n" \
62 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
63 " " #asm_op " %0, %2 \n" \
64 " " #sc " %0, %1 \n" \
65 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
67 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
68 : "Ir" (i) : __LLSC_CLOBBER); \
71 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
72 static __inline__ type \
73 arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
77 if (!kernel_uses_llsc) { \
78 unsigned long flags; \
80 raw_local_irq_save(flags); \
81 result = v->counter; \
83 v->counter = result; \
84 raw_local_irq_restore(flags); \
88 __asm__ __volatile__( \
90 " .set " MIPS_ISA_LEVEL " \n" \
91 " " __SYNC(full, loongson3_war) " \n" \
92 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
93 " " #asm_op " %0, %1, %3 \n" \
94 " " #sc " %0, %2 \n" \
95 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
96 " " #asm_op " %0, %1, %3 \n" \
98 : "=&r" (result), "=&r" (temp), \
99 "+" GCC_OFF_SMALL_ASM() (v->counter) \
100 : "Ir" (i) : __LLSC_CLOBBER); \
105 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
106 static __inline__ type \
107 arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
111 if (!kernel_uses_llsc) { \
112 unsigned long flags; \
114 raw_local_irq_save(flags); \
115 result = v->counter; \
117 raw_local_irq_restore(flags); \
121 __asm__ __volatile__( \
123 " .set " MIPS_ISA_LEVEL " \n" \
124 " " __SYNC(full, loongson3_war) " \n" \
125 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
126 " " #asm_op " %0, %1, %3 \n" \
127 " " #sc " %0, %2 \n" \
128 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
131 : "=&r" (result), "=&r" (temp), \
132 "+" GCC_OFF_SMALL_ASM() (v->counter) \
133 : "Ir" (i) : __LLSC_CLOBBER); \
139 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
140 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
141 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
142 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
144 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
145 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
147 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
148 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
149 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
150 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
153 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
154 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
155 # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
156 # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
157 # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
158 # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
159 #endif /* CONFIG_64BIT */
162 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
163 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
164 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
166 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
167 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
168 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
170 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
171 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
172 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
175 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
176 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
177 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
178 # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
179 # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
180 # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
184 #undef ATOMIC_FETCH_OP
185 #undef ATOMIC_OP_RETURN
189 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
190 * @i: integer value to subtract
191 * @v: pointer of type atomic_t
193 * Atomically test @v and subtract @i if @v is greater or equal than @i.
194 * The function returns the old value of @v minus @i.
196 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
197 static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
201 smp_mb__before_atomic(); \
203 if (!kernel_uses_llsc) { \
204 unsigned long flags; \
206 raw_local_irq_save(flags); \
207 result = v->counter; \
210 v->counter = result; \
211 raw_local_irq_restore(flags); \
212 smp_mb__after_atomic(); \
216 __asm__ __volatile__( \
218 " .set " MIPS_ISA_LEVEL " \n" \
219 " " __SYNC(full, loongson3_war) " \n" \
220 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
222 " " #op " %0, %1, %3 \n" \
226 " .set " MIPS_ISA_LEVEL " \n" \
227 " " #sc " %1, %2 \n" \
228 " " __stringify(SC_BEQZ) " %1, 1b \n" \
229 "2: " __SYNC(full, loongson3_war) " \n" \
231 : "=&r" (result), "=&r" (temp), \
232 "+" GCC_OFF_SMALL_ASM() (v->counter) \
237 * In the Loongson3 workaround case we already have a \
238 * completion barrier at 2: above, which is needed due to the \
239 * bltz that can branch to code outside of the LL/SC loop. As \
240 * such, we don't need to emit another barrier here. \
242 if (__SYNC_loongson3_war == 0) \
243 smp_mb__after_atomic(); \
248 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
249 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
252 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
253 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
258 #endif /* _ASM_ATOMIC_H */