1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
5 #include <linux/compiler_types.h>
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 int expect, int is_constant);
20 #define likely_notrace(x) __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
23 #define __branch_check__(x, expect, is_constant) ({ \
25 static struct ftrace_likely_data \
27 __section(_ftrace_annotated_branch) \
29 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
33 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
45 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
48 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
60 #define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \
63 __section(_ftrace_branch) \
70 (__if_trace.miss_hit[1]++,1) : \
71 (__if_trace.miss_hit[0]++,0); \
74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
77 # define likely(x) __builtin_expect(!!(x), 1)
78 # define unlikely(x) __builtin_expect(!!(x), 0)
81 /* Optimization barrier */
83 # define barrier() __memory_barrier()
87 # define barrier_data(ptr) barrier()
90 /* workaround for GCC PR82365 if needed */
91 #ifndef barrier_before_unreachable
92 # define barrier_before_unreachable() do { } while (0)
95 /* Unreachable code */
96 #ifdef CONFIG_STACK_VALIDATION
98 * These macros help objtool understand GCC code flow for unreachable code.
99 * The __COUNTER__ based labels are a hack to make each instance of the macros
100 * unique, to convince GCC not to merge duplicate inline asm statements.
102 #define annotate_reachable() ({ \
103 asm volatile("%c0:\n\t" \
104 ".pushsection .discard.reachable\n\t" \
105 ".long %c0b - .\n\t" \
106 ".popsection\n\t" : : "i" (__COUNTER__)); \
108 #define annotate_unreachable() ({ \
109 asm volatile("%c0:\n\t" \
110 ".pushsection .discard.unreachable\n\t" \
111 ".long %c0b - .\n\t" \
112 ".popsection\n\t" : : "i" (__COUNTER__)); \
114 #define ASM_UNREACHABLE \
116 ".pushsection .discard.unreachable\n\t" \
117 ".long 999b - .\n\t" \
120 /* Annotate a C jump table to allow objtool to follow the code flow */
121 #define __annotate_jump_table __section(.rodata..c_jump_table)
123 #ifdef CONFIG_DEBUG_ENTRY
124 /* Begin/end of an instrumentation safe region */
125 #define instrumentation_begin() ({ \
126 asm volatile("%c0: nop\n\t" \
127 ".pushsection .discard.instr_begin\n\t" \
128 ".long %c0b - .\n\t" \
129 ".popsection\n\t" : : "i" (__COUNTER__)); \
133 * Because instrumentation_{begin,end}() can nest, objtool validation considers
134 * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
135 * When the value is greater than 0, we consider instrumentation allowed.
137 * There is a problem with code like:
141 * instrumentation_begin();
144 * instrumentation_begin();
146 * instrumentation_end();
149 * instrumentation_end();
152 * If instrumentation_end() would be an empty label, like all the other
153 * annotations, the inner _end(), which is at the end of a conditional block,
154 * would land on the instruction after the block.
156 * If we then consider the sum of the !cond path, we'll see that the call to
157 * bar() is with a 0-value, even though, we meant it to happen with a positive
160 * To avoid this, have _end() be a NOP instruction, this ensures it will be
161 * part of the condition block and does not escape.
163 #define instrumentation_end() ({ \
164 asm volatile("%c0: nop\n\t" \
165 ".pushsection .discard.instr_end\n\t" \
166 ".long %c0b - .\n\t" \
167 ".popsection\n\t" : : "i" (__COUNTER__)); \
169 #endif /* CONFIG_DEBUG_ENTRY */
172 #define annotate_reachable()
173 #define annotate_unreachable()
174 #define __annotate_jump_table
177 #ifndef instrumentation_begin
178 #define instrumentation_begin() do { } while(0)
179 #define instrumentation_end() do { } while(0)
182 #ifndef ASM_UNREACHABLE
183 # define ASM_UNREACHABLE
186 # define unreachable() do { \
187 annotate_unreachable(); \
188 __builtin_unreachable(); \
193 * KENTRY - kernel entry point
194 * This can be used to annotate symbols (functions or data) that are used
195 * without their linker symbol being referenced explicitly. For example,
196 * interrupt vector handlers, or functions in the kernel image that are found
199 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
200 * are handled in their own way (with KEEP() in linker scripts).
202 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
203 * linker script. For example an architecture could KEEP() its entire
204 * boot/exception vector code rather than annotate each function and data.
207 # define KENTRY(sym) \
208 extern typeof(sym) sym; \
209 static const unsigned long __kentry_##sym \
211 __section("___kentry" "+" #sym ) \
212 = (unsigned long)&sym;
216 # define RELOC_HIDE(ptr, off) \
217 ({ unsigned long __ptr; \
218 __ptr = (unsigned long) (ptr); \
219 (typeof(ptr)) (__ptr + (off)); })
222 #ifndef OPTIMIZER_HIDE_VAR
223 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
224 #define OPTIMIZER_HIDE_VAR(var) \
225 __asm__ ("" : "=r" (var) : "0" (var))
228 /* Not-quite-unique ID. */
230 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
234 * Prevent the compiler from merging or refetching reads or writes. The
235 * compiler is also forbidden from reordering successive instances of
236 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
237 * particular ordering. One way to make the compiler aware of ordering is to
238 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
241 * These two macros will also work on aggregate data types like structs or
244 * Their two major use cases are: (1) Mediating communication between
245 * process-level code and irq/NMI handlers, all running on the same CPU,
246 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247 * mutilate accesses that either do not require ordering or that interact
248 * with an explicit memory barrier or atomic instruction that provides the
251 #include <asm/barrier.h>
252 #include <linux/kasan-checks.h>
253 #include <linux/kcsan-checks.h>
256 * data_race - mark an expression as containing intentional data races
258 * This data_race() macro is useful for situations in which data races
259 * should be forgiven. One example is diagnostic code that accesses
260 * shared variables but is not a part of the core synchronization design.
262 * This macro *does not* affect normal code generation, but is a hint
263 * to tooling that data races here are to be ignored.
265 #define data_race(expr) \
267 __unqual_scalar_typeof(({ expr; })) __v = ({ \
268 __kcsan_disable_current(); \
271 __kcsan_enable_current(); \
276 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
277 * atomicity or dependency ordering guarantees. Note that this may result
280 #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
282 #define __READ_ONCE_SCALAR(x) \
284 __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
285 smp_read_barrier_depends(); \
289 #define READ_ONCE(x) \
291 compiletime_assert_rwonce_type(x); \
292 __READ_ONCE_SCALAR(x); \
295 #define __WRITE_ONCE(x, val) \
297 *(volatile typeof(x) *)&(x) = (val); \
300 #define WRITE_ONCE(x, val) \
302 compiletime_assert_rwonce_type(x); \
303 __WRITE_ONCE(x, val); \
306 static __no_sanitize_or_inline
307 unsigned long __read_once_word_nocheck(const void *addr)
309 return __READ_ONCE(*(unsigned long *)addr);
313 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
314 * word from memory atomically but without telling KASAN/KCSAN. This is
315 * usually used by unwinding code when walking the stack of a running process.
317 #define READ_ONCE_NOCHECK(x) \
320 compiletime_assert(sizeof(x) == sizeof(__x), \
321 "Unsupported access size for READ_ONCE_NOCHECK()."); \
322 __x = __read_once_word_nocheck(&(x)); \
323 smp_read_barrier_depends(); \
327 static __no_kasan_or_inline
328 unsigned long read_word_at_a_time(const void *addr)
330 kasan_check_read(addr, 1);
331 return *(unsigned long *)addr;
334 #endif /* __KERNEL__ */
337 * Force the compiler to emit 'sym' as a symbol, so that we can reference
338 * it from inline assembler. Necessary in case 'sym' could be inlined
339 * otherwise, or eliminated entirely due to lack of references that are
340 * visible to the compiler.
342 #define __ADDRESSABLE(sym) \
343 static void * __section(.discard.addressable) __used \
344 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
347 * offset_to_ptr - convert a relative memory offset to an absolute pointer
348 * @off: the address of the 32-bit offset value
350 static inline void *offset_to_ptr(const int *off)
352 return (void *)((unsigned long)off + *off);
355 #endif /* __ASSEMBLY__ */
357 /* Compile time object size, -1 for unknown */
358 #ifndef __compiletime_object_size
359 # define __compiletime_object_size(obj) -1
361 #ifndef __compiletime_warning
362 # define __compiletime_warning(message)
364 #ifndef __compiletime_error
365 # define __compiletime_error(message)
369 # define __compiletime_assert(condition, msg, prefix, suffix) \
371 extern void prefix ## suffix(void) __compiletime_error(msg); \
373 prefix ## suffix(); \
376 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
379 #define _compiletime_assert(condition, msg, prefix, suffix) \
380 __compiletime_assert(condition, msg, prefix, suffix)
383 * compiletime_assert - break build and emit msg if condition is false
384 * @condition: a compile-time constant condition to check
385 * @msg: a message to emit if condition is false
387 * In tradition of POSIX assert, this macro will break the build if the
388 * supplied condition is *false*, emitting the supplied error message if the
389 * compiler has support to do so.
391 #define compiletime_assert(condition, msg) \
392 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
394 #define compiletime_assert_atomic_type(t) \
395 compiletime_assert(__native_word(t), \
396 "Need native word sized stores/loads for atomicity.")
399 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
400 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
401 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
402 * (e.g. a virtual address) and a strong prevailing wind.
404 #define compiletime_assert_rwonce_type(t) \
405 compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
406 "Unsupported access size for {READ,WRITE}_ONCE().")
408 /* &a[0] degrades to a pointer: a different type from an array */
409 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
412 * This is needed in functions which generate the stack canary, see
413 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
415 #define prevent_tail_call_optimization() mb()
417 #endif /* __LINUX_COMPILER_H */