2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
23 #include <asm/sgidefs.h>
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
30 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33 int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35 int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37 int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39 int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
44 * set_bit - Atomically set a bit in memory
46 * @addr: the address to start counting from
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
53 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 int bit = nr & SZLONG_MASK;
59 if (kernel_uses_llsc && R10000_LLSC_WAR) {
63 "1: " __LL "%0, %1 # set_bit \n"
68 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
70 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
74 " " __LL "%0, %1 # set_bit \n"
75 " " __INS "%0, %3, %2, 1 \n"
77 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
78 : "ir" (bit), "r" (~0));
79 } while (unlikely(!temp));
80 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
81 } else if (kernel_uses_llsc) {
85 " .set "MIPS_ISA_ARCH_LEVEL" \n"
86 " " __LL "%0, %1 # set_bit \n"
90 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
92 } while (unlikely(!temp));
94 __mips_set_bit(nr, addr);
98 * clear_bit - Clears a bit in memory
100 * @addr: Address to start counting from
102 * clear_bit() is atomic and may not be reordered. However, it does
103 * not contain a memory barrier, so if it is used for locking purposes,
104 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
105 * in order to ensure changes are visible on other processors.
107 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
109 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
110 int bit = nr & SZLONG_MASK;
113 if (kernel_uses_llsc && R10000_LLSC_WAR) {
114 __asm__ __volatile__(
116 " .set arch=r4000 \n"
117 "1: " __LL "%0, %1 # clear_bit \n"
122 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
123 : "ir" (~(1UL << bit)));
124 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
125 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
127 __asm__ __volatile__(
128 " " __LL "%0, %1 # clear_bit \n"
129 " " __INS "%0, $0, %2, 1 \n"
131 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
133 } while (unlikely(!temp));
134 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
135 } else if (kernel_uses_llsc) {
137 __asm__ __volatile__(
139 " .set "MIPS_ISA_ARCH_LEVEL" \n"
140 " " __LL "%0, %1 # clear_bit \n"
144 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
145 : "ir" (~(1UL << bit)));
146 } while (unlikely(!temp));
148 __mips_clear_bit(nr, addr);
152 * clear_bit_unlock - Clears a bit in memory
154 * @addr: Address to start counting from
156 * clear_bit() is atomic and implies release semantics before the memory
157 * operation. It can be used for an unlock.
159 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
161 smp_mb__before_atomic();
166 * change_bit - Toggle a bit in memory
168 * @addr: Address to start counting from
170 * change_bit() is atomic and may not be reordered.
171 * Note that @nr may be almost arbitrarily large; this function is not
172 * restricted to acting on a single-word quantity.
174 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
176 int bit = nr & SZLONG_MASK;
178 if (kernel_uses_llsc && R10000_LLSC_WAR) {
179 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
182 __asm__ __volatile__(
184 " .set arch=r4000 \n"
185 "1: " __LL "%0, %1 # change_bit \n"
190 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
191 : "ir" (1UL << bit));
192 } else if (kernel_uses_llsc) {
193 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197 __asm__ __volatile__(
199 " .set "MIPS_ISA_ARCH_LEVEL" \n"
200 " " __LL "%0, %1 # change_bit \n"
204 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
205 : "ir" (1UL << bit));
206 } while (unlikely(!temp));
208 __mips_change_bit(nr, addr);
212 * test_and_set_bit - Set a bit and return its old value
214 * @addr: Address to count from
216 * This operation is atomic and cannot be reordered.
217 * It also implies a memory barrier.
219 static inline int test_and_set_bit(unsigned long nr,
220 volatile unsigned long *addr)
222 int bit = nr & SZLONG_MASK;
225 smp_mb__before_llsc();
227 if (kernel_uses_llsc && R10000_LLSC_WAR) {
228 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
231 __asm__ __volatile__(
233 " .set arch=r4000 \n"
234 "1: " __LL "%0, %1 # test_and_set_bit \n"
240 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
243 } else if (kernel_uses_llsc) {
244 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
248 __asm__ __volatile__(
250 " .set "MIPS_ISA_ARCH_LEVEL" \n"
251 " " __LL "%0, %1 # test_and_set_bit \n"
255 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
258 } while (unlikely(!res));
260 res = temp & (1UL << bit);
262 res = __mips_test_and_set_bit(nr, addr);
270 * test_and_set_bit_lock - Set a bit and return its old value
272 * @addr: Address to count from
274 * This operation is atomic and implies acquire ordering semantics
275 * after the memory operation.
277 static inline int test_and_set_bit_lock(unsigned long nr,
278 volatile unsigned long *addr)
280 int bit = nr & SZLONG_MASK;
283 if (kernel_uses_llsc && R10000_LLSC_WAR) {
284 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
287 __asm__ __volatile__(
289 " .set arch=r4000 \n"
290 "1: " __LL "%0, %1 # test_and_set_bit \n"
296 : "=&r" (temp), "+m" (*m), "=&r" (res)
299 } else if (kernel_uses_llsc) {
300 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
304 __asm__ __volatile__(
306 " .set "MIPS_ISA_ARCH_LEVEL" \n"
307 " " __LL "%0, %1 # test_and_set_bit \n"
311 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
314 } while (unlikely(!res));
316 res = temp & (1UL << bit);
318 res = __mips_test_and_set_bit_lock(nr, addr);
325 * test_and_clear_bit - Clear a bit and return its old value
327 * @addr: Address to count from
329 * This operation is atomic and cannot be reordered.
330 * It also implies a memory barrier.
332 static inline int test_and_clear_bit(unsigned long nr,
333 volatile unsigned long *addr)
335 int bit = nr & SZLONG_MASK;
338 smp_mb__before_llsc();
340 if (kernel_uses_llsc && R10000_LLSC_WAR) {
341 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
344 __asm__ __volatile__(
346 " .set arch=r4000 \n"
347 "1: " __LL "%0, %1 # test_and_clear_bit \n"
354 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
357 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
358 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
359 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
363 __asm__ __volatile__(
364 " " __LL "%0, %1 # test_and_clear_bit \n"
365 " " __EXT "%2, %0, %3, 1 \n"
366 " " __INS "%0, $0, %3, 1 \n"
368 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
371 } while (unlikely(!temp));
373 } else if (kernel_uses_llsc) {
374 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
378 __asm__ __volatile__(
380 " .set "MIPS_ISA_ARCH_LEVEL" \n"
381 " " __LL "%0, %1 # test_and_clear_bit \n"
386 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
389 } while (unlikely(!res));
391 res = temp & (1UL << bit);
393 res = __mips_test_and_clear_bit(nr, addr);
401 * test_and_change_bit - Change a bit and return its old value
403 * @addr: Address to count from
405 * This operation is atomic and cannot be reordered.
406 * It also implies a memory barrier.
408 static inline int test_and_change_bit(unsigned long nr,
409 volatile unsigned long *addr)
411 int bit = nr & SZLONG_MASK;
414 smp_mb__before_llsc();
416 if (kernel_uses_llsc && R10000_LLSC_WAR) {
417 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
420 __asm__ __volatile__(
422 " .set arch=r4000 \n"
423 "1: " __LL "%0, %1 # test_and_change_bit \n"
429 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432 } else if (kernel_uses_llsc) {
433 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
437 __asm__ __volatile__(
439 " .set "MIPS_ISA_ARCH_LEVEL" \n"
440 " " __LL "%0, %1 # test_and_change_bit \n"
442 " " __SC "\t%2, %1 \n"
444 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447 } while (unlikely(!res));
449 res = temp & (1UL << bit);
451 res = __mips_test_and_change_bit(nr, addr);
458 #include <asm-generic/bitops/non-atomic.h>
461 * __clear_bit_unlock - Clears a bit in memory
463 * @addr: Address to start counting from
465 * __clear_bit() is non-atomic and implies release semantics before the memory
466 * operation. It can be used for an unlock if no other CPUs can concurrently
467 * modify other bits in the word.
469 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
471 smp_mb__before_llsc();
472 __clear_bit(nr, addr);
477 * Return the bit position (0..63) of the most significant 1 bit in a word
478 * Returns -1 if no 1 bit exists
480 static inline unsigned long __fls(unsigned long word)
484 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
488 " .set "MIPS_ISA_LEVEL" \n"
497 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
501 " .set "MIPS_ISA_LEVEL" \n"
510 num = BITS_PER_LONG - 1;
512 #if BITS_PER_LONG == 64
513 if (!(word & (~0ul << 32))) {
518 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
522 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
526 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
530 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
534 if (!(word & (~0ul << (BITS_PER_LONG-1))))
540 * __ffs - find first bit in word.
541 * @word: The word to search
543 * Returns 0..SZLONG-1
544 * Undefined if no bit exists, so code should check against 0 first.
546 static inline unsigned long __ffs(unsigned long word)
548 return __fls(word & -word);
552 * fls - find last bit set.
553 * @word: The word to search
555 * This is defined the same way as ffs.
556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
558 static inline int fls(unsigned int x)
562 if (!__builtin_constant_p(x) &&
563 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
566 " .set "MIPS_ISA_LEVEL" \n"
578 if (!(x & 0xffff0000u)) {
582 if (!(x & 0xff000000u)) {
586 if (!(x & 0xf0000000u)) {
590 if (!(x & 0xc0000000u)) {
594 if (!(x & 0x80000000u)) {
601 #include <asm-generic/bitops/fls64.h>
604 * ffs - find first bit set.
605 * @word: The word to search
607 * This is defined the same way as
608 * the libc and compiler builtin ffs routines, therefore
609 * differs in spirit from the above ffz (man ffs).
611 static inline int ffs(int word)
616 return fls(word & -word);
619 #include <asm-generic/bitops/ffz.h>
620 #include <asm-generic/bitops/find.h>
624 #include <asm-generic/bitops/sched.h>
626 #include <asm/arch_hweight.h>
627 #include <asm-generic/bitops/const_hweight.h>
629 #include <asm-generic/bitops/le.h>
630 #include <asm-generic/bitops/ext2-atomic.h>
632 #endif /* __KERNEL__ */
634 #endif /* _ASM_BITOPS_H */