1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
7 * Note: inlines with more than a single statement should be marked
8 * __always_inline to avoid problems with older gcc's inlining heuristics.
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
19 #if BITS_PER_LONG == 32
20 # define _BITOPS_LONG_SHIFT 5
21 #elif BITS_PER_LONG == 64
22 # define _BITOPS_LONG_SHIFT 6
24 # error "Unexpected BITS_PER_LONG"
27 #define BIT_64(n) (U64_C(1) << (n))
30 * These have to be done with inline assembly: that way the bit-setting
31 * is guaranteed to be atomic. All bit operations return 0 if the bit
32 * was cleared before the operation and != 0 if it was not.
34 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
38 /* Technically wrong, but this avoids compilation errors on some gcc
40 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
42 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
45 #define ADDR BITOP_ADDR(addr)
48 * We do the locked ops that don't return the old value as
49 * a mask operation on a byte.
51 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
52 #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
53 #define CONST_MASK(nr) (1 << ((nr) & 7))
56 * set_bit - Atomically set a bit in memory
58 * @addr: the address to start counting from
60 * This function is atomic and may not be reordered. See __set_bit()
61 * if you do not require the atomic guarantees.
63 * Note: there are no guarantees that this function will not be reordered
64 * on non x86 architectures, so if you are writing portable code,
65 * make sure not to rely on its reordering guarantees.
67 * Note that @nr may be almost arbitrarily large; this function is not
68 * restricted to acting on a single-word quantity.
70 static __always_inline void
71 set_bit(long nr, volatile unsigned long *addr)
73 if (IS_IMMEDIATE(nr)) {
74 asm volatile(LOCK_PREFIX "orb %1,%0"
75 : CONST_MASK_ADDR(nr, addr)
76 : "iq" ((u8)CONST_MASK(nr))
79 asm volatile(LOCK_PREFIX "bts %1,%0"
80 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
85 * __set_bit - Set a bit in memory
87 * @addr: the address to start counting from
89 * Unlike set_bit(), this function is non-atomic and may be reordered.
90 * If it's called on the same region of memory simultaneously, the effect
91 * may be that only one operation succeeds.
93 static inline void __set_bit(long nr, volatile unsigned long *addr)
95 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
99 * clear_bit - Clears a bit in memory
101 * @addr: Address to start counting from
103 * clear_bit() is atomic and may not be reordered. However, it does
104 * not contain a memory barrier, so if it is used for locking purposes,
105 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
106 * in order to ensure changes are visible on other processors.
108 static __always_inline void
109 clear_bit(long nr, volatile unsigned long *addr)
111 if (IS_IMMEDIATE(nr)) {
112 asm volatile(LOCK_PREFIX "andb %1,%0"
113 : CONST_MASK_ADDR(nr, addr)
114 : "iq" ((u8)~CONST_MASK(nr)));
116 asm volatile(LOCK_PREFIX "btr %1,%0"
123 * clear_bit_unlock - Clears a bit in memory
125 * @addr: Address to start counting from
127 * clear_bit() is atomic and implies release semantics before the memory
128 * operation. It can be used for an unlock.
130 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
136 static inline void __clear_bit(long nr, volatile unsigned long *addr)
138 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
142 * __clear_bit_unlock - Clears a bit in memory
144 * @addr: Address to start counting from
146 * __clear_bit() is non-atomic and implies release semantics before the memory
147 * operation. It can be used for an unlock if no other CPUs can concurrently
148 * modify other bits in the word.
150 * No memory barrier is required here, because x86 cannot reorder stores past
151 * older loads. Same principle as spin_unlock.
153 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
156 __clear_bit(nr, addr);
159 #define smp_mb__before_clear_bit() barrier()
160 #define smp_mb__after_clear_bit() barrier()
163 * __change_bit - Toggle a bit in memory
164 * @nr: the bit to change
165 * @addr: the address to start counting from
167 * Unlike change_bit(), this function is non-atomic and may be reordered.
168 * If it's called on the same region of memory simultaneously, the effect
169 * may be that only one operation succeeds.
171 static inline void __change_bit(long nr, volatile unsigned long *addr)
173 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
177 * change_bit - Toggle a bit in memory
179 * @addr: Address to start counting from
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
185 static inline void change_bit(long nr, volatile unsigned long *addr)
187 if (IS_IMMEDIATE(nr)) {
188 asm volatile(LOCK_PREFIX "xorb %1,%0"
189 : CONST_MASK_ADDR(nr, addr)
190 : "iq" ((u8)CONST_MASK(nr)));
192 asm volatile(LOCK_PREFIX "btc %1,%0"
199 * test_and_set_bit - Set a bit and return its old value
201 * @addr: Address to count from
203 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier.
206 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
208 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
212 * test_and_set_bit_lock - Set a bit and return its old value for lock
214 * @addr: Address to count from
216 * This is the same as test_and_set_bit on x86.
218 static __always_inline int
219 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
221 return test_and_set_bit(nr, addr);
225 * __test_and_set_bit - Set a bit and return its old value
227 * @addr: Address to count from
229 * This operation is non-atomic and can be reordered.
230 * If two examples of this operation race, one can appear to succeed
231 * but actually fail. You must protect multiple accesses with a lock.
233 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
239 : "=r" (oldbit), ADDR
245 * test_and_clear_bit - Clear a bit and return its old value
247 * @addr: Address to count from
249 * This operation is atomic and cannot be reordered.
250 * It also implies a memory barrier.
252 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
254 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
258 * __test_and_clear_bit - Clear a bit and return its old value
260 * @addr: Address to count from
262 * This operation is non-atomic and can be reordered.
263 * If two examples of this operation race, one can appear to succeed
264 * but actually fail. You must protect multiple accesses with a lock.
266 * Note: the operation is performed atomically with respect to
267 * the local CPU, but not other CPUs. Portable code should not
268 * rely on this behaviour.
269 * KVM relies on this behaviour on x86 for modifying memory that is also
270 * accessed from a hypervisor on the same CPU if running in a VM: don't change
271 * this without also updating arch/x86/kernel/kvm.c
273 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
277 asm volatile("btr %2,%1\n\t"
279 : "=r" (oldbit), ADDR
284 /* WARNING: non atomic and it can be reordered! */
285 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
289 asm volatile("btc %2,%1\n\t"
291 : "=r" (oldbit), ADDR
292 : "Ir" (nr) : "memory");
298 * test_and_change_bit - Change a bit and return its old value
300 * @addr: Address to count from
302 * This operation is atomic and cannot be reordered.
303 * It also implies a memory barrier.
305 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
307 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
310 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
312 return ((1UL << (nr & (BITS_PER_LONG-1))) &
313 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
316 static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
320 asm volatile("bt %2,%1\n\t"
323 : "m" (*(unsigned long *)addr), "Ir" (nr));
328 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
330 * test_bit - Determine whether a bit is set
331 * @nr: bit number to test
332 * @addr: Address to start counting from
334 static int test_bit(int nr, const volatile unsigned long *addr);
337 #define test_bit(nr, addr) \
338 (__builtin_constant_p((nr)) \
339 ? constant_test_bit((nr), (addr)) \
340 : variable_test_bit((nr), (addr)))
343 * __ffs - find first set bit in word
344 * @word: The word to search
346 * Undefined if no bit exists, so code should check against 0 first.
348 static inline unsigned long __ffs(unsigned long word)
357 * ffz - find first zero bit in word
358 * @word: The word to search
360 * Undefined if no zero exists, so code should check against ~0UL first.
362 static inline unsigned long ffz(unsigned long word)
371 * __fls: find last set bit in word
372 * @word: The word to search
374 * Undefined if no set bit exists, so code should check against 0 first.
376 static inline unsigned long __fls(unsigned long word)
388 * ffs - find first set bit in word
389 * @x: the word to search
391 * This is defined the same way as the libc and compiler builtin ffs
392 * routines, therefore differs in spirit from the other bitops.
394 * ffs(value) returns 0 if value is 0 or the position of the first
395 * set bit if value is nonzero. The first (least significant) bit
398 static inline int ffs(int x)
404 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
405 * dest reg is undefined if x==0, but their CPU architect says its
406 * value is written to set it to the same as before, except that the
407 * top 32 bits will be cleared.
409 * We cannot do this on 32 bits because at the very least some
410 * 486 CPUs did not behave this way.
414 : "rm" (x), "0" (-1));
415 #elif defined(CONFIG_X86_CMOV)
418 : "=&r" (r) : "rm" (x), "r" (-1));
423 "1:" : "=r" (r) : "rm" (x));
429 * fls - find last set bit in word
430 * @x: the word to search
432 * This is defined in a similar way as the libc and compiler builtin
433 * ffs, but returns the position of the most significant set bit.
435 * fls(value) returns 0 if value is 0 or the position of the last
436 * set bit if value is nonzero. The last (most significant) bit is
439 static inline int fls(int x)
445 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
446 * dest reg is undefined if x==0, but their CPU architect says its
447 * value is written to set it to the same as before, except that the
448 * top 32 bits will be cleared.
450 * We cannot do this on 32 bits because at the very least some
451 * 486 CPUs did not behave this way.
455 : "rm" (x), "0" (-1));
456 #elif defined(CONFIG_X86_CMOV)
459 : "=&r" (r) : "rm" (x), "rm" (-1));
464 "1:" : "=r" (r) : "rm" (x));
470 * fls64 - find last set bit in a 64-bit word
471 * @x: the word to search
473 * This is defined in a similar way as the libc and compiler builtin
474 * ffsll, but returns the position of the most significant set bit.
476 * fls64(value) returns 0 if value is 0 or the position of the last
477 * set bit if value is nonzero. The last (most significant) bit is
481 static __always_inline int fls64(__u64 x)
485 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
486 * dest reg is undefined if x==0, but their CPU architect says its
487 * value is written to set it to the same as before.
495 #include <asm-generic/bitops/fls64.h>
498 #include <asm-generic/bitops/find.h>
500 #include <asm-generic/bitops/sched.h>
502 #define ARCH_HAS_FAST_MULTIPLIER 1
504 #include <asm/arch_hweight.h>
506 #include <asm-generic/bitops/const_hweight.h>
508 #include <asm-generic/bitops/le.h>
510 #include <asm-generic/bitops/ext2-atomic-setbit.h>
512 #endif /* __KERNEL__ */
513 #endif /* _ASM_X86_BITOPS_H */