1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3 #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
8 * arch___set_bit - Set a bit in memory
10 * @addr: the address to start counting from
12 * Unlike set_bit(), this function is non-atomic and may be reordered.
13 * If it's called on the same region of memory simultaneously, the effect
14 * may be that only one operation succeeds.
16 static __always_inline void
17 arch___set_bit(unsigned int nr, volatile unsigned long *addr)
19 unsigned long mask = BIT_MASK(nr);
20 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
24 #define __set_bit arch___set_bit
26 static __always_inline void
27 arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
29 unsigned long mask = BIT_MASK(nr);
30 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
34 #define __clear_bit arch___clear_bit
37 * arch___change_bit - Toggle a bit in memory
38 * @nr: the bit to change
39 * @addr: the address to start counting from
41 * Unlike change_bit(), this function is non-atomic and may be reordered.
42 * If it's called on the same region of memory simultaneously, the effect
43 * may be that only one operation succeeds.
45 static __always_inline
46 void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
48 unsigned long mask = BIT_MASK(nr);
49 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
53 #define __change_bit arch___change_bit
56 * arch___test_and_set_bit - Set a bit and return its old value
58 * @addr: Address to count from
60 * This operation is non-atomic and can be reordered.
61 * If two examples of this operation race, one can appear to succeed
62 * but actually fail. You must protect multiple accesses with a lock.
64 static __always_inline int
65 arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
67 unsigned long mask = BIT_MASK(nr);
68 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69 unsigned long old = *p;
72 return (old & mask) != 0;
74 #define __test_and_set_bit arch___test_and_set_bit
77 * arch___test_and_clear_bit - Clear a bit and return its old value
79 * @addr: Address to count from
81 * This operation is non-atomic and can be reordered.
82 * If two examples of this operation race, one can appear to succeed
83 * but actually fail. You must protect multiple accesses with a lock.
85 static __always_inline int
86 arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
88 unsigned long mask = BIT_MASK(nr);
89 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
90 unsigned long old = *p;
93 return (old & mask) != 0;
95 #define __test_and_clear_bit arch___test_and_clear_bit
97 /* WARNING: non atomic and it can be reordered! */
98 static __always_inline int
99 arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
101 unsigned long mask = BIT_MASK(nr);
102 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
103 unsigned long old = *p;
106 return (old & mask) != 0;
108 #define __test_and_change_bit arch___test_and_change_bit
111 * arch_test_bit - Determine whether a bit is set
112 * @nr: bit number to test
113 * @addr: Address to start counting from
115 static __always_inline int
116 arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
118 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
120 #define test_bit arch_test_bit
122 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */