1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
17 static __always_inline int arch_atomic_read(const atomic_t *v)
20 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
21 * it's non-inlined function that increases binary size and stack usage.
23 return __READ_ONCE((v)->counter);
26 static __always_inline void arch_atomic_set(atomic_t *v, int i)
28 __WRITE_ONCE(v->counter, i);
31 static __always_inline void arch_atomic_add(int i, atomic_t *v)
33 asm volatile(LOCK_PREFIX "addl %1,%0"
35 : "ir" (i) : "memory");
38 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
40 asm volatile(LOCK_PREFIX "subl %1,%0"
42 : "ir" (i) : "memory");
45 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
47 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
49 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
51 static __always_inline void arch_atomic_inc(atomic_t *v)
53 asm volatile(LOCK_PREFIX "incl %0"
54 : "+m" (v->counter) :: "memory");
56 #define arch_atomic_inc arch_atomic_inc
58 static __always_inline void arch_atomic_dec(atomic_t *v)
60 asm volatile(LOCK_PREFIX "decl %0"
61 : "+m" (v->counter) :: "memory");
63 #define arch_atomic_dec arch_atomic_dec
65 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
69 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
71 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
73 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
75 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
77 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
79 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
81 #define arch_atomic_add_negative arch_atomic_add_negative
83 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
85 return i + xadd(&v->counter, i);
87 #define arch_atomic_add_return arch_atomic_add_return
89 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
91 return arch_atomic_add_return(-i, v);
93 #define arch_atomic_sub_return arch_atomic_sub_return
95 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
97 return xadd(&v->counter, i);
99 #define arch_atomic_fetch_add arch_atomic_fetch_add
101 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
103 return xadd(&v->counter, -i);
105 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
107 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
109 return arch_cmpxchg(&v->counter, old, new);
111 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
113 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
115 return arch_try_cmpxchg(&v->counter, old, new);
117 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
119 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
121 return arch_xchg(&v->counter, new);
123 #define arch_atomic_xchg arch_atomic_xchg
125 static __always_inline void arch_atomic_and(int i, atomic_t *v)
127 asm volatile(LOCK_PREFIX "andl %1,%0"
133 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
135 int val = arch_atomic_read(v);
137 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
141 #define arch_atomic_fetch_and arch_atomic_fetch_and
143 static __always_inline void arch_atomic_or(int i, atomic_t *v)
145 asm volatile(LOCK_PREFIX "orl %1,%0"
151 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
153 int val = arch_atomic_read(v);
155 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
159 #define arch_atomic_fetch_or arch_atomic_fetch_or
161 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
163 asm volatile(LOCK_PREFIX "xorl %1,%0"
169 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
171 int val = arch_atomic_read(v);
173 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
177 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
180 # include <asm/atomic64_32.h>
182 # include <asm/atomic64_64.h>
185 #endif /* _ASM_X86_ATOMIC_H */