1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/barrier.h
5 * Copyright (C) 2012 ARM Ltd.
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
12 #include <linux/kasan-checks.h>
14 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
15 #define nops(n) asm volatile(__nops(n))
17 #define sev() asm volatile("sev" : : : "memory")
18 #define wfe() asm volatile("wfe" : : : "memory")
19 #define wfi() asm volatile("wfi" : : : "memory")
21 #define isb() asm volatile("isb" : : : "memory")
22 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
23 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
25 #define psb_csync() asm volatile("hint #17" : : : "memory")
26 #define tsb_csync() asm volatile("hint #18" : : : "memory")
27 #define csdb() asm volatile("hint #20" : : : "memory")
29 #ifdef CONFIG_ARM64_PSEUDO_NMI
32 extern struct static_key_false gic_pmr_sync; \
34 if (static_branch_unlikely(&gic_pmr_sync)) \
38 #define pmr_sync() do {} while (0)
45 #define dma_mb() dmb(osh)
46 #define dma_rmb() dmb(oshld)
47 #define dma_wmb() dmb(oshst)
50 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
53 #define array_index_mask_nospec array_index_mask_nospec
54 static inline unsigned long array_index_mask_nospec(unsigned long idx,
63 : "r" (idx), "Ir" (sz)
71 * Ensure that reads of the counter are treated the same as memory reads
72 * for the purposes of ordering by subsequent memory barriers.
74 * This insanity brought to you by speculative system register reads,
75 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
77 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
79 #define arch_counter_enforce_ordering(val) do { \
80 u64 tmp, _val = (val); \
86 : "=r" (tmp) : "r" (_val)); \
89 #define __smp_mb() dmb(ish)
90 #define __smp_rmb() dmb(ishld)
91 #define __smp_wmb() dmb(ishst)
93 #define __smp_store_release(p, v) \
95 typeof(p) __p = (p); \
96 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
97 { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
98 compiletime_assert_atomic_type(*p); \
99 kasan_check_write(__p, sizeof(*p)); \
100 switch (sizeof(*p)) { \
102 asm volatile ("stlrb %w1, %0" \
104 : "r" (*(__u8 *)__u.__c) \
108 asm volatile ("stlrh %w1, %0" \
110 : "r" (*(__u16 *)__u.__c) \
114 asm volatile ("stlr %w1, %0" \
116 : "r" (*(__u32 *)__u.__c) \
120 asm volatile ("stlr %1, %0" \
122 : "r" (*(__u64 *)__u.__c) \
128 #define __smp_load_acquire(p) \
130 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
131 typeof(p) __p = (p); \
132 compiletime_assert_atomic_type(*p); \
133 kasan_check_read(__p, sizeof(*p)); \
134 switch (sizeof(*p)) { \
136 asm volatile ("ldarb %w0, %1" \
137 : "=r" (*(__u8 *)__u.__c) \
138 : "Q" (*__p) : "memory"); \
141 asm volatile ("ldarh %w0, %1" \
142 : "=r" (*(__u16 *)__u.__c) \
143 : "Q" (*__p) : "memory"); \
146 asm volatile ("ldar %w0, %1" \
147 : "=r" (*(__u32 *)__u.__c) \
148 : "Q" (*__p) : "memory"); \
151 asm volatile ("ldar %0, %1" \
152 : "=r" (*(__u64 *)__u.__c) \
153 : "Q" (*__p) : "memory"); \
156 (typeof(*p))__u.__val; \
159 #define smp_cond_load_relaxed(ptr, cond_expr) \
161 typeof(ptr) __PTR = (ptr); \
162 __unqual_scalar_typeof(*ptr) VAL; \
164 VAL = READ_ONCE(*__PTR); \
167 __cmpwait_relaxed(__PTR, VAL); \
172 #define smp_cond_load_acquire(ptr, cond_expr) \
174 typeof(ptr) __PTR = (ptr); \
175 __unqual_scalar_typeof(*ptr) VAL; \
177 VAL = smp_load_acquire(__PTR); \
180 __cmpwait_relaxed(__PTR, VAL); \
185 #include <asm-generic/barrier.h>
187 #endif /* __ASSEMBLY__ */
189 #endif /* __ASM_BARRIER_H */