} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned int val; \
\
- /* \
- * Explicit full memory barrier needed before/after as \
- * LLOCK/SCOND themselves don't provide any such semantics \
- */ \
- smp_mb(); \
- \
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
[i] "ir" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return val; \
}
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
- /* \
- * Explicit full memory barrier needed before/after as \
- * LLOCK/SCOND themselves don't provide any such semantics \
- */ \
- smp_mb(); \
- \
__asm__ __volatile__( \
"1: llock %[orig], [%[ctr]] \n" \
" " #asm_op " %[val], %[orig], %[i] \n" \
[i] "ir" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return orig; \
}
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_OPS(xor, ^=, xor)
#define arch_atomic_andnot arch_atomic_andnot
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
- smp_mb(); \
- \
__asm__ __volatile__( \
"1: \n" \
" llockd %0, [%1] \n" \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
\
- smp_mb(); \
- \
return val; \
}
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 val, orig; \
\
- smp_mb(); \
- \
__asm__ __volatile__( \
"1: \n" \
" llockd %0, [%2] \n" \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
\
- smp_mb(); \
- \
return orig; \
}
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_OPS(xor, xor, xor)
#define arch_atomic64_andnot arch_atomic64_andnot
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP