1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Prevent the compiler from merging or refetching reads or writes. The
4 * compiler is also forbidden from reordering successive instances of
5 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
6 * particular ordering. One way to make the compiler aware of ordering is to
7 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
10 * These two macros will also work on aggregate data types like structs or
13 * Their two major use cases are: (1) Mediating communication between
14 * process-level code and irq/NMI handlers, all running on the same CPU,
15 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
16 * mutilate accesses that either do not require ordering or that interact
17 * with an explicit memory barrier or atomic instruction that provides the
20 #ifndef __ASM_GENERIC_RWONCE_H
21 #define __ASM_GENERIC_RWONCE_H
25 #include <linux/compiler_types.h>
26 #include <linux/kasan-checks.h>
27 #include <linux/kcsan-checks.h>
29 #include <asm/barrier.h>
32 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
33 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
34 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
35 * (e.g. a virtual address) and a strong prevailing wind.
37 #define compiletime_assert_rwonce_type(t) \
38 compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
39 "Unsupported access size for {READ,WRITE}_ONCE().")
42 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
43 * atomicity or dependency ordering guarantees. Note that this may result
46 #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
48 #define __READ_ONCE_SCALAR(x) \
50 __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
51 smp_read_barrier_depends(); \
55 #define READ_ONCE(x) \
57 compiletime_assert_rwonce_type(x); \
58 __READ_ONCE_SCALAR(x); \
61 #define __WRITE_ONCE(x, val) \
63 *(volatile typeof(x) *)&(x) = (val); \
66 #define WRITE_ONCE(x, val) \
68 compiletime_assert_rwonce_type(x); \
69 __WRITE_ONCE(x, val); \
72 static __no_sanitize_or_inline
73 unsigned long __read_once_word_nocheck(const void *addr)
75 return __READ_ONCE(*(unsigned long *)addr);
79 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
80 * word from memory atomically but without telling KASAN/KCSAN. This is
81 * usually used by unwinding code when walking the stack of a running process.
83 #define READ_ONCE_NOCHECK(x) \
86 compiletime_assert(sizeof(x) == sizeof(__x), \
87 "Unsupported access size for READ_ONCE_NOCHECK()."); \
88 __x = __read_once_word_nocheck(&(x)); \
89 smp_read_barrier_depends(); \
93 static __no_kasan_or_inline
94 unsigned long read_word_at_a_time(const void *addr)
96 kasan_check_read(addr, 1);
97 return *(unsigned long *)addr;
100 #endif /* __ASSEMBLY__ */
101 #endif /* __ASM_GENERIC_RWONCE_H */