1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BITOPS_H
3 #define _ASM_X86_BITOPS_H
6 * Copyright 1992, Linus Torvalds.
8 * Note: inlines with more than a single statement should be marked
9 * __always_inline to avoid problems with older gcc's inlining heuristics.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <asm/alternative.h>
18 #include <asm/rmwcc.h>
19 #include <asm/barrier.h>
21 #if BITS_PER_LONG == 32
22 # define _BITOPS_LONG_SHIFT 5
23 #elif BITS_PER_LONG == 64
24 # define _BITOPS_LONG_SHIFT 6
26 # error "Unexpected BITS_PER_LONG"
29 #define BIT_64(n) (U64_C(1) << (n))
32 * These have to be done with inline assembly: that way the bit-setting
33 * is guaranteed to be atomic. All bit operations return 0 if the bit
34 * was cleared before the operation and != 0 if it was not.
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
39 #define RLONG_ADDR(x) "m" (*(volatile long *) (x))
40 #define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
42 #define ADDR RLONG_ADDR(addr)
45 * We do the locked ops that don't return the old value as
46 * a mask operation on a byte.
48 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
49 #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
50 #define CONST_MASK(nr) (1 << ((nr) & 7))
53 * set_bit - Atomically set a bit in memory
55 * @addr: the address to start counting from
57 * This function is atomic and may not be reordered. See __set_bit()
58 * if you do not require the atomic guarantees.
60 * Note: there are no guarantees that this function will not be reordered
61 * on non x86 architectures, so if you are writing portable code,
62 * make sure not to rely on its reordering guarantees.
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
67 static __always_inline void
68 set_bit(long nr, volatile unsigned long *addr)
70 if (IS_IMMEDIATE(nr)) {
71 asm volatile(LOCK_PREFIX "orb %1,%0"
72 : CONST_MASK_ADDR(nr, addr)
73 : "iq" ((u8)CONST_MASK(nr))
76 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
77 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
82 * __set_bit - Set a bit in memory
84 * @addr: the address to start counting from
86 * Unlike set_bit(), this function is non-atomic and may be reordered.
87 * If it's called on the same region of memory simultaneously, the effect
88 * may be that only one operation succeeds.
90 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
92 asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
96 * clear_bit - Clears a bit in memory
98 * @addr: Address to start counting from
100 * clear_bit() is atomic and may not be reordered. However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103 * in order to ensure changes are visible on other processors.
105 static __always_inline void
106 clear_bit(long nr, volatile unsigned long *addr)
108 if (IS_IMMEDIATE(nr)) {
109 asm volatile(LOCK_PREFIX "andb %1,%0"
110 : CONST_MASK_ADDR(nr, addr)
111 : "iq" ((u8)~CONST_MASK(nr)));
113 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
114 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
119 * clear_bit_unlock - Clears a bit in memory
121 * @addr: Address to start counting from
123 * clear_bit() is atomic and implies release semantics before the memory
124 * operation. It can be used for an unlock.
126 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
132 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
134 asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
137 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
140 asm volatile(LOCK_PREFIX "andb %2,%1"
142 : CC_OUT(s) (negative), WBYTE_ADDR(addr)
143 : "ir" ((char) ~(1 << nr)) : "memory");
147 // Let everybody know we have it
148 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
151 * __clear_bit_unlock - Clears a bit in memory
153 * @addr: Address to start counting from
155 * __clear_bit() is non-atomic and implies release semantics before the memory
156 * operation. It can be used for an unlock if no other CPUs can concurrently
157 * modify other bits in the word.
159 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
161 __clear_bit(nr, addr);
165 * __change_bit - Toggle a bit in memory
166 * @nr: the bit to change
167 * @addr: the address to start counting from
169 * Unlike change_bit(), this function is non-atomic and may be reordered.
170 * If it's called on the same region of memory simultaneously, the effect
171 * may be that only one operation succeeds.
173 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
175 asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
179 * change_bit - Toggle a bit in memory
181 * @addr: Address to start counting from
183 * change_bit() is atomic and may not be reordered.
184 * Note that @nr may be almost arbitrarily large; this function is not
185 * restricted to acting on a single-word quantity.
187 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
189 if (IS_IMMEDIATE(nr)) {
190 asm volatile(LOCK_PREFIX "xorb %1,%0"
191 : CONST_MASK_ADDR(nr, addr)
192 : "iq" ((u8)CONST_MASK(nr)));
194 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
195 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
200 * test_and_set_bit - Set a bit and return its old value
202 * @addr: Address to count from
204 * This operation is atomic and cannot be reordered.
205 * It also implies a memory barrier.
207 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
209 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
213 * test_and_set_bit_lock - Set a bit and return its old value for lock
215 * @addr: Address to count from
217 * This is the same as test_and_set_bit on x86.
219 static __always_inline bool
220 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
222 return test_and_set_bit(nr, addr);
226 * __test_and_set_bit - Set a bit and return its old value
228 * @addr: Address to count from
230 * This operation is non-atomic and can be reordered.
231 * If two examples of this operation race, one can appear to succeed
232 * but actually fail. You must protect multiple accesses with a lock.
234 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
238 asm(__ASM_SIZE(bts) " %2,%1"
241 : ADDR, "Ir" (nr) : "memory");
246 * test_and_clear_bit - Clear a bit and return its old value
248 * @addr: Address to count from
250 * This operation is atomic and cannot be reordered.
251 * It also implies a memory barrier.
253 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
255 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
259 * __test_and_clear_bit - Clear a bit and return its old value
261 * @addr: Address to count from
263 * This operation is non-atomic and can be reordered.
264 * If two examples of this operation race, one can appear to succeed
265 * but actually fail. You must protect multiple accesses with a lock.
267 * Note: the operation is performed atomically with respect to
268 * the local CPU, but not other CPUs. Portable code should not
269 * rely on this behaviour.
270 * KVM relies on this behaviour on x86 for modifying memory that is also
271 * accessed from a hypervisor on the same CPU if running in a VM: don't change
272 * this without also updating arch/x86/kernel/kvm.c
274 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
278 asm volatile(__ASM_SIZE(btr) " %2,%1"
281 : ADDR, "Ir" (nr) : "memory");
285 /* WARNING: non atomic and it can be reordered! */
286 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
290 asm volatile(__ASM_SIZE(btc) " %2,%1"
293 : ADDR, "Ir" (nr) : "memory");
299 * test_and_change_bit - Change a bit and return its old value
301 * @addr: Address to count from
303 * This operation is atomic and cannot be reordered.
304 * It also implies a memory barrier.
306 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
308 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
311 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
313 return ((1UL << (nr & (BITS_PER_LONG-1))) &
314 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
317 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
321 asm volatile(__ASM_SIZE(bt) " %2,%1"
324 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
329 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
331 * test_bit - Determine whether a bit is set
332 * @nr: bit number to test
333 * @addr: Address to start counting from
335 static bool test_bit(int nr, const volatile unsigned long *addr);
338 #define test_bit(nr, addr) \
339 (__builtin_constant_p((nr)) \
340 ? constant_test_bit((nr), (addr)) \
341 : variable_test_bit((nr), (addr)))
344 * __ffs - find first set bit in word
345 * @word: The word to search
347 * Undefined if no bit exists, so code should check against 0 first.
349 static __always_inline unsigned long __ffs(unsigned long word)
358 * ffz - find first zero bit in word
359 * @word: The word to search
361 * Undefined if no zero exists, so code should check against ~0UL first.
363 static __always_inline unsigned long ffz(unsigned long word)
372 * __fls: find last set bit in word
373 * @word: The word to search
375 * Undefined if no set bit exists, so code should check against 0 first.
377 static __always_inline unsigned long __fls(unsigned long word)
389 * ffs - find first set bit in word
390 * @x: the word to search
392 * This is defined the same way as the libc and compiler builtin ffs
393 * routines, therefore differs in spirit from the other bitops.
395 * ffs(value) returns 0 if value is 0 or the position of the first
396 * set bit if value is nonzero. The first (least significant) bit
399 static __always_inline int ffs(int x)
405 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
406 * dest reg is undefined if x==0, but their CPU architect says its
407 * value is written to set it to the same as before, except that the
408 * top 32 bits will be cleared.
410 * We cannot do this on 32 bits because at the very least some
411 * 486 CPUs did not behave this way.
415 : "rm" (x), "0" (-1));
416 #elif defined(CONFIG_X86_CMOV)
419 : "=&r" (r) : "rm" (x), "r" (-1));
424 "1:" : "=r" (r) : "rm" (x));
430 * fls - find last set bit in word
431 * @x: the word to search
433 * This is defined in a similar way as the libc and compiler builtin
434 * ffs, but returns the position of the most significant set bit.
436 * fls(value) returns 0 if value is 0 or the position of the last
437 * set bit if value is nonzero. The last (most significant) bit is
440 static __always_inline int fls(unsigned int x)
446 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
447 * dest reg is undefined if x==0, but their CPU architect says its
448 * value is written to set it to the same as before, except that the
449 * top 32 bits will be cleared.
451 * We cannot do this on 32 bits because at the very least some
452 * 486 CPUs did not behave this way.
456 : "rm" (x), "0" (-1));
457 #elif defined(CONFIG_X86_CMOV)
460 : "=&r" (r) : "rm" (x), "rm" (-1));
465 "1:" : "=r" (r) : "rm" (x));
471 * fls64 - find last set bit in a 64-bit word
472 * @x: the word to search
474 * This is defined in a similar way as the libc and compiler builtin
475 * ffsll, but returns the position of the most significant set bit.
477 * fls64(value) returns 0 if value is 0 or the position of the last
478 * set bit if value is nonzero. The last (most significant) bit is
482 static __always_inline int fls64(__u64 x)
486 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
487 * dest reg is undefined if x==0, but their CPU architect says its
488 * value is written to set it to the same as before.
496 #include <asm-generic/bitops/fls64.h>
499 #include <asm-generic/bitops/find.h>
501 #include <asm-generic/bitops/sched.h>
503 #include <asm/arch_hweight.h>
505 #include <asm-generic/bitops/const_hweight.h>
507 #include <asm-generic/bitops/le.h>
509 #include <asm-generic/bitops/ext2-atomic-setbit.h>
511 #endif /* __KERNEL__ */
512 #endif /* _ASM_X86_BITOPS_H */