1 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
2 #define __ASM_METAG_ATOMIC_LOCK1_H
4 #define ATOMIC_INIT(i) { (i) }
6 #include <linux/compiler.h>
8 #include <asm/barrier.h>
9 #include <asm/global_lock.h>
11 static inline int atomic_read(const atomic_t *v)
13 return READ_ONCE((v)->counter);
17 * atomic_set needs to be take the lock to protect atomic_add_unless from a
18 * possible race, as it reads the counter twice:
21 * atomic_add_unless(1, 0)
22 * ret = v->counter (non-zero)
23 * if (ret != u) v->counter = 0
24 * v->counter += 1 (counter set to 1)
26 * Making atomic_set take the lock ensures that ordering and logical
27 * consistency is preserved.
29 static inline int atomic_set(atomic_t *v, int i)
33 __global_lock1(flags);
36 __global_unlock1(flags);
40 #define ATOMIC_OP(op, c_op) \
41 static inline void atomic_##op(int i, atomic_t *v) \
43 unsigned long flags; \
45 __global_lock1(flags); \
48 __global_unlock1(flags); \
51 #define ATOMIC_OP_RETURN(op, c_op) \
52 static inline int atomic_##op##_return(int i, atomic_t *v) \
54 unsigned long result; \
55 unsigned long flags; \
57 __global_lock1(flags); \
58 result = v->counter; \
61 v->counter = result; \
62 __global_unlock1(flags); \
67 #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
76 #undef ATOMIC_OP_RETURN
79 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
84 __global_lock1(flags);
90 __global_unlock1(flags);
95 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
97 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
102 __global_lock1(flags);
108 __global_unlock1(flags);
113 static inline int atomic_sub_if_positive(int i, atomic_t *v)
118 __global_lock1(flags);
119 ret = v->counter - 1;
124 __global_unlock1(flags);
129 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */