1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
10 #include <objtool/builtin.h>
11 #include <objtool/cfi.h>
12 #include <objtool/arch.h>
13 #include <objtool/check.h>
14 #include <objtool/special.h>
15 #include <objtool/warn.h>
16 #include <objtool/endianness.h>
18 #include <linux/objtool.h>
19 #include <linux/hashtable.h>
20 #include <linux/kernel.h>
21 #include <linux/static_call_types.h>
24 struct list_head list;
25 struct instruction *insn;
29 struct cfi_init_state initial_func_cfi;
31 struct instruction *find_insn(struct objtool_file *file,
32 struct section *sec, unsigned long offset)
34 struct instruction *insn;
36 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
37 if (insn->sec == sec && insn->offset == offset)
44 static struct instruction *next_insn_same_sec(struct objtool_file *file,
45 struct instruction *insn)
47 struct instruction *next = list_next_entry(insn, list);
49 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
55 static struct instruction *next_insn_same_func(struct objtool_file *file,
56 struct instruction *insn)
58 struct instruction *next = list_next_entry(insn, list);
59 struct symbol *func = insn->func;
64 if (&next->list != &file->insn_list && next->func == func)
67 /* Check if we're already in the subfunction: */
68 if (func == func->cfunc)
71 /* Move to the subfunction: */
72 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
75 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
76 struct instruction *insn)
78 struct instruction *prev = list_prev_entry(insn, list);
80 if (&prev->list != &file->insn_list && prev->func == insn->func)
86 #define func_for_each_insn(file, func, insn) \
87 for (insn = find_insn(file, func->sec, func->offset); \
89 insn = next_insn_same_func(file, insn))
91 #define sym_for_each_insn(file, sym, insn) \
92 for (insn = find_insn(file, sym->sec, sym->offset); \
93 insn && &insn->list != &file->insn_list && \
94 insn->sec == sym->sec && \
95 insn->offset < sym->offset + sym->len; \
96 insn = list_next_entry(insn, list))
98 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
99 for (insn = list_prev_entry(insn, list); \
100 &insn->list != &file->insn_list && \
101 insn->sec == sym->sec && insn->offset >= sym->offset; \
102 insn = list_prev_entry(insn, list))
104 #define sec_for_each_insn_from(file, insn) \
105 for (; insn; insn = next_insn_same_sec(file, insn))
107 #define sec_for_each_insn_continue(file, insn) \
108 for (insn = next_insn_same_sec(file, insn); insn; \
109 insn = next_insn_same_sec(file, insn))
111 static bool is_sibling_call(struct instruction *insn)
114 * Assume only ELF functions can make sibling calls. This ensures
115 * sibling call detection consistency between vmlinux.o and individual
121 /* An indirect jump is either a sibling call or a jump to a table. */
122 if (insn->type == INSN_JUMP_DYNAMIC)
123 return list_empty(&insn->alts);
125 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
126 return (is_static_jump(insn) && insn->call_dest);
130 * This checks to see if the given function is a "noreturn" function.
132 * For global functions which are outside the scope of this object file, we
133 * have to keep a manual list of them.
135 * For local functions, we have to detect them manually by simply looking for
136 * the lack of a return instruction.
138 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
142 struct instruction *insn;
146 * Unfortunately these have to be hard coded because the noreturn
147 * attribute isn't provided in ELF data.
149 static const char * const global_noreturns[] = {
154 "__module_put_and_exit",
160 "machine_real_restart",
161 "rewind_stack_do_exit",
162 "kunit_try_catch_throw",
169 if (func->bind == STB_WEAK)
172 if (func->bind == STB_GLOBAL)
173 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
174 if (!strcmp(func->name, global_noreturns[i]))
180 insn = find_insn(file, func->sec, func->offset);
184 func_for_each_insn(file, func, insn) {
187 if (insn->type == INSN_RETURN)
195 * A function can have a sibling call instead of a return. In that
196 * case, the function's dead-end status depends on whether the target
197 * of the sibling call returns.
199 func_for_each_insn(file, func, insn) {
200 if (is_sibling_call(insn)) {
201 struct instruction *dest = insn->jump_dest;
204 /* sibling call to another file */
207 /* local sibling call */
208 if (recursion == 5) {
210 * Infinite recursion: two functions have
211 * sibling calls to each other. This is a very
212 * rare case. It means they aren't dead ends.
217 return __dead_end_function(file, dest->func, recursion+1);
224 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
226 return __dead_end_function(file, func, 0);
229 static void init_cfi_state(struct cfi_state *cfi)
233 for (i = 0; i < CFI_NUM_REGS; i++) {
234 cfi->regs[i].base = CFI_UNDEFINED;
235 cfi->vals[i].base = CFI_UNDEFINED;
237 cfi->cfa.base = CFI_UNDEFINED;
238 cfi->drap_reg = CFI_UNDEFINED;
239 cfi->drap_offset = -1;
242 static void init_insn_state(struct insn_state *state, struct section *sec)
244 memset(state, 0, sizeof(*state));
245 init_cfi_state(&state->cfi);
248 * We need the full vmlinux for noinstr validation, otherwise we can
249 * not correctly determine insn->call_dest->sec (external symbols do
250 * not have a section).
252 if (vmlinux && noinstr && sec)
253 state->noinstr = sec->noinstr;
257 * Call the arch-specific instruction decoder for all the instructions and add
258 * them to the global instruction list.
260 static int decode_instructions(struct objtool_file *file)
264 unsigned long offset;
265 struct instruction *insn;
266 unsigned long nr_insns = 0;
269 for_each_sec(file, sec) {
271 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
274 if (strcmp(sec->name, ".altinstr_replacement") &&
275 strcmp(sec->name, ".altinstr_aux") &&
276 strncmp(sec->name, ".discard.", 9))
279 if (!strcmp(sec->name, ".noinstr.text") ||
280 !strcmp(sec->name, ".entry.text"))
283 for (offset = 0; offset < sec->len; offset += insn->len) {
284 insn = malloc(sizeof(*insn));
286 WARN("malloc failed");
289 memset(insn, 0, sizeof(*insn));
290 INIT_LIST_HEAD(&insn->alts);
291 INIT_LIST_HEAD(&insn->stack_ops);
292 init_cfi_state(&insn->cfi);
295 insn->offset = offset;
297 ret = arch_decode_instruction(file->elf, sec, offset,
299 &insn->len, &insn->type,
305 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
306 list_add_tail(&insn->list, &file->insn_list);
310 list_for_each_entry(func, &sec->symbol_list, list) {
311 if (func->type != STT_FUNC || func->alias != func)
314 if (!find_insn(file, sec, func->offset)) {
315 WARN("%s(): can't find starting instruction",
320 sym_for_each_insn(file, func, insn)
326 printf("nr_insns: %lu\n", nr_insns);
335 static struct instruction *find_last_insn(struct objtool_file *file,
338 struct instruction *insn = NULL;
340 unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
342 for (offset = sec->len - 1; offset >= end && !insn; offset--)
343 insn = find_insn(file, sec, offset);
349 * Mark "ud2" instructions and manually annotated dead ends.
351 static int add_dead_ends(struct objtool_file *file)
355 struct instruction *insn;
358 * By default, "ud2" is a dead end unless otherwise annotated, because
359 * GCC 7 inserts it for certain divide-by-zero cases.
361 for_each_insn(file, insn)
362 if (insn->type == INSN_BUG)
363 insn->dead_end = true;
366 * Check for manually annotated dead ends.
368 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
372 list_for_each_entry(reloc, &sec->reloc_list, list) {
373 if (reloc->sym->type != STT_SECTION) {
374 WARN("unexpected relocation symbol type in %s", sec->name);
377 insn = find_insn(file, reloc->sym->sec, reloc->addend);
379 insn = list_prev_entry(insn, list);
380 else if (reloc->addend == reloc->sym->sec->len) {
381 insn = find_last_insn(file, reloc->sym->sec);
383 WARN("can't find unreachable insn at %s+0x%x",
384 reloc->sym->sec->name, reloc->addend);
388 WARN("can't find unreachable insn at %s+0x%x",
389 reloc->sym->sec->name, reloc->addend);
393 insn->dead_end = true;
398 * These manually annotated reachable checks are needed for GCC 4.4,
399 * where the Linux unreachable() macro isn't supported. In that case
400 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
403 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
407 list_for_each_entry(reloc, &sec->reloc_list, list) {
408 if (reloc->sym->type != STT_SECTION) {
409 WARN("unexpected relocation symbol type in %s", sec->name);
412 insn = find_insn(file, reloc->sym->sec, reloc->addend);
414 insn = list_prev_entry(insn, list);
415 else if (reloc->addend == reloc->sym->sec->len) {
416 insn = find_last_insn(file, reloc->sym->sec);
418 WARN("can't find reachable insn at %s+0x%x",
419 reloc->sym->sec->name, reloc->addend);
423 WARN("can't find reachable insn at %s+0x%x",
424 reloc->sym->sec->name, reloc->addend);
428 insn->dead_end = false;
434 static int create_static_call_sections(struct objtool_file *file)
437 struct static_call_site *site;
438 struct instruction *insn;
439 struct symbol *key_sym;
440 char *key_name, *tmp;
443 sec = find_section_by_name(file->elf, ".static_call_sites");
445 INIT_LIST_HEAD(&file->static_call_list);
446 WARN("file already has .static_call_sites section, skipping");
450 if (list_empty(&file->static_call_list))
454 list_for_each_entry(insn, &file->static_call_list, call_node)
457 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
458 sizeof(struct static_call_site), idx);
463 list_for_each_entry(insn, &file->static_call_list, call_node) {
465 site = (struct static_call_site *)sec->data->d_buf + idx;
466 memset(site, 0, sizeof(struct static_call_site));
468 /* populate reloc for 'addr' */
469 if (elf_add_reloc_to_insn(file->elf, sec,
470 idx * sizeof(struct static_call_site),
472 insn->sec, insn->offset))
475 /* find key symbol */
476 key_name = strdup(insn->call_dest->name);
481 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
482 STATIC_CALL_TRAMP_PREFIX_LEN)) {
483 WARN("static_call: trampoline name malformed: %s", key_name);
486 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
487 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
489 key_sym = find_symbol_by_name(file->elf, tmp);
492 WARN("static_call: can't find static_call_key symbol: %s", tmp);
497 * For modules(), the key might not be exported, which
498 * means the module can make static calls but isn't
499 * allowed to change them.
501 * In that case we temporarily set the key to be the
502 * trampoline address. This is fixed up in
503 * static_call_add_module().
505 key_sym = insn->call_dest;
509 /* populate reloc for 'key' */
510 if (elf_add_reloc(file->elf, sec,
511 idx * sizeof(struct static_call_site) + 4,
512 R_X86_64_PC32, key_sym,
513 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
522 static int create_mcount_loc_sections(struct objtool_file *file)
526 struct instruction *insn;
529 sec = find_section_by_name(file->elf, "__mcount_loc");
531 INIT_LIST_HEAD(&file->mcount_loc_list);
532 WARN("file already has __mcount_loc section, skipping");
536 if (list_empty(&file->mcount_loc_list))
540 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
543 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
548 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
550 loc = (unsigned long *)sec->data->d_buf + idx;
551 memset(loc, 0, sizeof(unsigned long));
553 if (elf_add_reloc_to_insn(file->elf, sec,
554 idx * sizeof(unsigned long),
556 insn->sec, insn->offset))
566 * Warnings shouldn't be reported for ignored functions.
568 static void add_ignores(struct objtool_file *file)
570 struct instruction *insn;
575 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
579 list_for_each_entry(reloc, &sec->reloc_list, list) {
580 switch (reloc->sym->type) {
586 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
592 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
596 func_for_each_insn(file, func, insn)
602 * This is a whitelist of functions that is allowed to be called with AC set.
603 * The list is meant to be minimal and only contains compiler instrumentation
604 * ABI and a few functions used to implement *_{to,from}_user() functions.
606 * These functions must not directly change AC, but may PUSHF/POPF.
608 static const char *uaccess_safe_builtin[] = {
612 /* KASAN out-of-line */
613 "__asan_loadN_noabort",
614 "__asan_load1_noabort",
615 "__asan_load2_noabort",
616 "__asan_load4_noabort",
617 "__asan_load8_noabort",
618 "__asan_load16_noabort",
619 "__asan_storeN_noabort",
620 "__asan_store1_noabort",
621 "__asan_store2_noabort",
622 "__asan_store4_noabort",
623 "__asan_store8_noabort",
624 "__asan_store16_noabort",
625 "__kasan_check_read",
626 "__kasan_check_write",
628 "__asan_report_load_n_noabort",
629 "__asan_report_load1_noabort",
630 "__asan_report_load2_noabort",
631 "__asan_report_load4_noabort",
632 "__asan_report_load8_noabort",
633 "__asan_report_load16_noabort",
634 "__asan_report_store_n_noabort",
635 "__asan_report_store1_noabort",
636 "__asan_report_store2_noabort",
637 "__asan_report_store4_noabort",
638 "__asan_report_store8_noabort",
639 "__asan_report_store16_noabort",
641 "__kcsan_check_access",
642 "kcsan_found_watchpoint",
643 "kcsan_setup_watchpoint",
644 "kcsan_check_scoped_accesses",
645 "kcsan_disable_current",
646 "kcsan_enable_current_nowarn",
651 "__tsan_write_range",
662 "__tsan_read_write1",
663 "__tsan_read_write2",
664 "__tsan_read_write4",
665 "__tsan_read_write8",
666 "__tsan_read_write16",
667 "__tsan_atomic8_load",
668 "__tsan_atomic16_load",
669 "__tsan_atomic32_load",
670 "__tsan_atomic64_load",
671 "__tsan_atomic8_store",
672 "__tsan_atomic16_store",
673 "__tsan_atomic32_store",
674 "__tsan_atomic64_store",
675 "__tsan_atomic8_exchange",
676 "__tsan_atomic16_exchange",
677 "__tsan_atomic32_exchange",
678 "__tsan_atomic64_exchange",
679 "__tsan_atomic8_fetch_add",
680 "__tsan_atomic16_fetch_add",
681 "__tsan_atomic32_fetch_add",
682 "__tsan_atomic64_fetch_add",
683 "__tsan_atomic8_fetch_sub",
684 "__tsan_atomic16_fetch_sub",
685 "__tsan_atomic32_fetch_sub",
686 "__tsan_atomic64_fetch_sub",
687 "__tsan_atomic8_fetch_and",
688 "__tsan_atomic16_fetch_and",
689 "__tsan_atomic32_fetch_and",
690 "__tsan_atomic64_fetch_and",
691 "__tsan_atomic8_fetch_or",
692 "__tsan_atomic16_fetch_or",
693 "__tsan_atomic32_fetch_or",
694 "__tsan_atomic64_fetch_or",
695 "__tsan_atomic8_fetch_xor",
696 "__tsan_atomic16_fetch_xor",
697 "__tsan_atomic32_fetch_xor",
698 "__tsan_atomic64_fetch_xor",
699 "__tsan_atomic8_fetch_nand",
700 "__tsan_atomic16_fetch_nand",
701 "__tsan_atomic32_fetch_nand",
702 "__tsan_atomic64_fetch_nand",
703 "__tsan_atomic8_compare_exchange_strong",
704 "__tsan_atomic16_compare_exchange_strong",
705 "__tsan_atomic32_compare_exchange_strong",
706 "__tsan_atomic64_compare_exchange_strong",
707 "__tsan_atomic8_compare_exchange_weak",
708 "__tsan_atomic16_compare_exchange_weak",
709 "__tsan_atomic32_compare_exchange_weak",
710 "__tsan_atomic64_compare_exchange_weak",
711 "__tsan_atomic8_compare_exchange_val",
712 "__tsan_atomic16_compare_exchange_val",
713 "__tsan_atomic32_compare_exchange_val",
714 "__tsan_atomic64_compare_exchange_val",
715 "__tsan_atomic_thread_fence",
716 "__tsan_atomic_signal_fence",
720 "__sanitizer_cov_trace_pc",
721 "__sanitizer_cov_trace_const_cmp1",
722 "__sanitizer_cov_trace_const_cmp2",
723 "__sanitizer_cov_trace_const_cmp4",
724 "__sanitizer_cov_trace_const_cmp8",
725 "__sanitizer_cov_trace_cmp1",
726 "__sanitizer_cov_trace_cmp2",
727 "__sanitizer_cov_trace_cmp4",
728 "__sanitizer_cov_trace_cmp8",
729 "__sanitizer_cov_trace_switch",
731 "ubsan_type_mismatch_common",
732 "__ubsan_handle_type_mismatch",
733 "__ubsan_handle_type_mismatch_v1",
734 "__ubsan_handle_shift_out_of_bounds",
736 "csum_partial_copy_generic",
738 "copy_mc_fragile_handle_tail",
739 "copy_mc_enhanced_fast_string",
740 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
744 static void add_uaccess_safe(struct objtool_file *file)
752 for (name = uaccess_safe_builtin; *name; name++) {
753 func = find_symbol_by_name(file->elf, *name);
757 func->uaccess_safe = true;
762 * FIXME: For now, just ignore any alternatives which add retpolines. This is
763 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
764 * But it at least allows objtool to understand the control flow *around* the
767 static int add_ignore_alternatives(struct objtool_file *file)
771 struct instruction *insn;
773 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
777 list_for_each_entry(reloc, &sec->reloc_list, list) {
778 if (reloc->sym->type != STT_SECTION) {
779 WARN("unexpected relocation symbol type in %s", sec->name);
783 insn = find_insn(file, reloc->sym->sec, reloc->addend);
785 WARN("bad .discard.ignore_alts entry");
789 insn->ignore_alts = true;
795 __weak bool arch_is_retpoline(struct symbol *sym)
801 * Find the destination instructions for all jumps.
803 static int add_jump_destinations(struct objtool_file *file)
805 struct instruction *insn;
807 struct section *dest_sec;
808 unsigned long dest_off;
810 for_each_insn(file, insn) {
811 if (!is_static_jump(insn))
814 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
815 insn->offset, insn->len);
817 dest_sec = insn->sec;
818 dest_off = arch_jump_destination(insn);
819 } else if (reloc->sym->type == STT_SECTION) {
820 dest_sec = reloc->sym->sec;
821 dest_off = arch_dest_reloc_offset(reloc->addend);
822 } else if (arch_is_retpoline(reloc->sym)) {
824 * Retpoline jumps are really dynamic jumps in
825 * disguise, so convert them accordingly.
827 if (insn->type == INSN_JUMP_UNCONDITIONAL)
828 insn->type = INSN_JUMP_DYNAMIC;
830 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
832 list_add_tail(&insn->call_node,
833 &file->retpoline_call_list);
835 insn->retpoline_safe = true;
837 } else if (insn->func) {
838 /* internal or external sibling call (with reloc) */
839 insn->call_dest = reloc->sym;
840 if (insn->call_dest->static_call_tramp) {
841 list_add_tail(&insn->call_node,
842 &file->static_call_list);
845 } else if (reloc->sym->sec->idx) {
846 dest_sec = reloc->sym->sec;
847 dest_off = reloc->sym->sym.st_value +
848 arch_dest_reloc_offset(reloc->addend);
850 /* non-func asm code jumping to another file */
854 insn->jump_dest = find_insn(file, dest_sec, dest_off);
855 if (!insn->jump_dest) {
858 * This is a special case where an alt instruction
859 * jumps past the end of the section. These are
860 * handled later in handle_group_alt().
862 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
865 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
866 insn->sec, insn->offset, dest_sec->name,
872 * Cross-function jump.
874 if (insn->func && insn->jump_dest->func &&
875 insn->func != insn->jump_dest->func) {
878 * For GCC 8+, create parent/child links for any cold
879 * subfunctions. This is _mostly_ redundant with a
880 * similar initialization in read_symbols().
882 * If a function has aliases, we want the *first* such
883 * function in the symbol table to be the subfunction's
884 * parent. In that case we overwrite the
885 * initialization done in read_symbols().
887 * However this code can't completely replace the
888 * read_symbols() code because this doesn't detect the
889 * case where the parent function's only reference to a
890 * subfunction is through a jump table.
892 if (!strstr(insn->func->name, ".cold") &&
893 strstr(insn->jump_dest->func->name, ".cold")) {
894 insn->func->cfunc = insn->jump_dest->func;
895 insn->jump_dest->func->pfunc = insn->func;
897 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
898 insn->jump_dest->offset == insn->jump_dest->func->offset) {
900 /* internal sibling call (without reloc) */
901 insn->call_dest = insn->jump_dest->func;
902 if (insn->call_dest->static_call_tramp) {
903 list_add_tail(&insn->call_node,
904 &file->static_call_list);
913 static void remove_insn_ops(struct instruction *insn)
915 struct stack_op *op, *tmp;
917 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
923 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
925 struct symbol *call_dest;
927 call_dest = find_func_by_offset(sec, offset);
929 call_dest = find_symbol_by_offset(sec, offset);
935 * Find the destination instructions for all calls.
937 static int add_call_destinations(struct objtool_file *file)
939 struct instruction *insn;
940 unsigned long dest_off;
943 for_each_insn(file, insn) {
944 if (insn->type != INSN_CALL)
947 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
948 insn->offset, insn->len);
950 dest_off = arch_jump_destination(insn);
951 insn->call_dest = find_call_destination(insn->sec, dest_off);
956 if (!insn->call_dest) {
957 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
961 if (insn->func && insn->call_dest->type != STT_FUNC) {
962 WARN_FUNC("unsupported call to non-function",
963 insn->sec, insn->offset);
967 } else if (reloc->sym->type == STT_SECTION) {
968 dest_off = arch_dest_reloc_offset(reloc->addend);
969 insn->call_dest = find_call_destination(reloc->sym->sec,
971 if (!insn->call_dest) {
972 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
973 insn->sec, insn->offset,
974 reloc->sym->sec->name,
979 } else if (arch_is_retpoline(reloc->sym)) {
981 * Retpoline calls are really dynamic calls in
982 * disguise, so convert them accordingly.
984 insn->type = INSN_CALL_DYNAMIC;
985 insn->retpoline_safe = true;
987 list_add_tail(&insn->call_node,
988 &file->retpoline_call_list);
990 remove_insn_ops(insn);
994 insn->call_dest = reloc->sym;
996 if (insn->call_dest && insn->call_dest->static_call_tramp) {
997 list_add_tail(&insn->call_node,
998 &file->static_call_list);
1002 * Many compilers cannot disable KCOV with a function attribute
1003 * so they need a little help, NOP out any KCOV calls from noinstr
1006 if (insn->sec->noinstr &&
1007 !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
1009 reloc->type = R_NONE;
1010 elf_write_reloc(file->elf, reloc);
1013 elf_write_insn(file->elf, insn->sec,
1014 insn->offset, insn->len,
1015 arch_nop_insn(insn->len));
1016 insn->type = INSN_NOP;
1019 if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
1021 reloc->type = R_NONE;
1022 elf_write_reloc(file->elf, reloc);
1025 elf_write_insn(file->elf, insn->sec,
1026 insn->offset, insn->len,
1027 arch_nop_insn(insn->len));
1029 insn->type = INSN_NOP;
1031 list_add_tail(&insn->mcount_loc_node,
1032 &file->mcount_loc_list);
1036 * Whatever stack impact regular CALLs have, should be undone
1037 * by the RETURN of the called function.
1039 * Annotated intra-function calls retain the stack_ops but
1040 * are converted to JUMP, see read_intra_function_calls().
1042 remove_insn_ops(insn);
1049 * The .alternatives section requires some extra special care over and above
1050 * other special sections because alternatives are patched in place.
1052 static int handle_group_alt(struct objtool_file *file,
1053 struct special_alt *special_alt,
1054 struct instruction *orig_insn,
1055 struct instruction **new_insn)
1057 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1058 struct alt_group *orig_alt_group, *new_alt_group;
1059 unsigned long dest_off;
1062 orig_alt_group = malloc(sizeof(*orig_alt_group));
1063 if (!orig_alt_group) {
1064 WARN("malloc failed");
1067 orig_alt_group->cfi = calloc(special_alt->orig_len,
1068 sizeof(struct cfi_state *));
1069 if (!orig_alt_group->cfi) {
1070 WARN("calloc failed");
1074 last_orig_insn = NULL;
1076 sec_for_each_insn_from(file, insn) {
1077 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1080 insn->alt_group = orig_alt_group;
1081 last_orig_insn = insn;
1083 orig_alt_group->orig_group = NULL;
1084 orig_alt_group->first_insn = orig_insn;
1085 orig_alt_group->last_insn = last_orig_insn;
1088 new_alt_group = malloc(sizeof(*new_alt_group));
1089 if (!new_alt_group) {
1090 WARN("malloc failed");
1094 if (special_alt->new_len < special_alt->orig_len) {
1096 * Insert a fake nop at the end to make the replacement
1097 * alt_group the same size as the original. This is needed to
1098 * allow propagate_alt_cfi() to do its magic. When the last
1099 * instruction affects the stack, the instruction after it (the
1100 * nop) will propagate the new state to the shared CFI array.
1102 nop = malloc(sizeof(*nop));
1104 WARN("malloc failed");
1107 memset(nop, 0, sizeof(*nop));
1108 INIT_LIST_HEAD(&nop->alts);
1109 INIT_LIST_HEAD(&nop->stack_ops);
1110 init_cfi_state(&nop->cfi);
1112 nop->sec = special_alt->new_sec;
1113 nop->offset = special_alt->new_off + special_alt->new_len;
1114 nop->len = special_alt->orig_len - special_alt->new_len;
1115 nop->type = INSN_NOP;
1116 nop->func = orig_insn->func;
1117 nop->alt_group = new_alt_group;
1118 nop->ignore = orig_insn->ignore_alts;
1121 if (!special_alt->new_len) {
1127 sec_for_each_insn_from(file, insn) {
1128 struct reloc *alt_reloc;
1130 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1133 last_new_insn = insn;
1135 insn->ignore = orig_insn->ignore_alts;
1136 insn->func = orig_insn->func;
1137 insn->alt_group = new_alt_group;
1140 * Since alternative replacement code is copy/pasted by the
1141 * kernel after applying relocations, generally such code can't
1142 * have relative-address relocation references to outside the
1143 * .altinstr_replacement section, unless the arch's
1144 * alternatives code can adjust the relative offsets
1147 alt_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1148 insn->offset, insn->len);
1150 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1152 WARN_FUNC("unsupported relocation in alternatives section",
1153 insn->sec, insn->offset);
1157 if (!is_static_jump(insn))
1160 if (!insn->immediate)
1163 dest_off = arch_jump_destination(insn);
1164 if (dest_off == special_alt->new_off + special_alt->new_len)
1165 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1167 if (!insn->jump_dest) {
1168 WARN_FUNC("can't find alternative jump destination",
1169 insn->sec, insn->offset);
1174 if (!last_new_insn) {
1175 WARN_FUNC("can't find last new alternative instruction",
1176 special_alt->new_sec, special_alt->new_off);
1181 list_add(&nop->list, &last_new_insn->list);
1183 new_alt_group->orig_group = orig_alt_group;
1184 new_alt_group->first_insn = *new_insn;
1185 new_alt_group->last_insn = nop ? : last_new_insn;
1186 new_alt_group->cfi = orig_alt_group->cfi;
1191 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1192 * If the original instruction is a jump, make the alt entry an effective nop
1193 * by just skipping the original instruction.
1195 static int handle_jump_alt(struct objtool_file *file,
1196 struct special_alt *special_alt,
1197 struct instruction *orig_insn,
1198 struct instruction **new_insn)
1200 if (orig_insn->type == INSN_NOP)
1203 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
1204 WARN_FUNC("unsupported instruction at jump label",
1205 orig_insn->sec, orig_insn->offset);
1209 *new_insn = list_next_entry(orig_insn, list);
1214 * Read all the special sections which have alternate instructions which can be
1215 * patched in or redirected to at runtime. Each instruction having alternate
1216 * instruction(s) has them added to its insn->alts list, which will be
1217 * traversed in validate_branch().
1219 static int add_special_section_alts(struct objtool_file *file)
1221 struct list_head special_alts;
1222 struct instruction *orig_insn, *new_insn;
1223 struct special_alt *special_alt, *tmp;
1224 struct alternative *alt;
1227 ret = special_get_alts(file->elf, &special_alts);
1231 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1233 orig_insn = find_insn(file, special_alt->orig_sec,
1234 special_alt->orig_off);
1236 WARN_FUNC("special: can't find orig instruction",
1237 special_alt->orig_sec, special_alt->orig_off);
1243 if (!special_alt->group || special_alt->new_len) {
1244 new_insn = find_insn(file, special_alt->new_sec,
1245 special_alt->new_off);
1247 WARN_FUNC("special: can't find new instruction",
1248 special_alt->new_sec,
1249 special_alt->new_off);
1255 if (special_alt->group) {
1256 if (!special_alt->orig_len) {
1257 WARN_FUNC("empty alternative entry",
1258 orig_insn->sec, orig_insn->offset);
1262 ret = handle_group_alt(file, special_alt, orig_insn,
1266 } else if (special_alt->jump_or_nop) {
1267 ret = handle_jump_alt(file, special_alt, orig_insn,
1273 alt = malloc(sizeof(*alt));
1275 WARN("malloc failed");
1280 alt->insn = new_insn;
1281 alt->skip_orig = special_alt->skip_orig;
1282 orig_insn->ignore_alts |= special_alt->skip_alt;
1283 list_add_tail(&alt->list, &orig_insn->alts);
1285 list_del(&special_alt->list);
1293 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1294 struct reloc *table)
1296 struct reloc *reloc = table;
1297 struct instruction *dest_insn;
1298 struct alternative *alt;
1299 struct symbol *pfunc = insn->func->pfunc;
1300 unsigned int prev_offset = 0;
1303 * Each @reloc is a switch table relocation which points to the target
1306 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1308 /* Check for the end of the table: */
1309 if (reloc != table && reloc->jump_table_start)
1312 /* Make sure the table entries are consecutive: */
1313 if (prev_offset && reloc->offset != prev_offset + 8)
1316 /* Detect function pointers from contiguous objects: */
1317 if (reloc->sym->sec == pfunc->sec &&
1318 reloc->addend == pfunc->offset)
1321 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1325 /* Make sure the destination is in the same function: */
1326 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1329 alt = malloc(sizeof(*alt));
1331 WARN("malloc failed");
1335 alt->insn = dest_insn;
1336 list_add_tail(&alt->list, &insn->alts);
1337 prev_offset = reloc->offset;
1341 WARN_FUNC("can't find switch jump table",
1342 insn->sec, insn->offset);
1350 * find_jump_table() - Given a dynamic jump, find the switch jump table
1351 * associated with it.
1353 static struct reloc *find_jump_table(struct objtool_file *file,
1354 struct symbol *func,
1355 struct instruction *insn)
1357 struct reloc *table_reloc;
1358 struct instruction *dest_insn, *orig_insn = insn;
1361 * Backward search using the @first_jump_src links, these help avoid
1362 * much of the 'in between' code. Which avoids us getting confused by
1366 insn && insn->func && insn->func->pfunc == func;
1367 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1369 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1372 /* allow small jumps within the range */
1373 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1375 (insn->jump_dest->offset <= insn->offset ||
1376 insn->jump_dest->offset > orig_insn->offset))
1379 table_reloc = arch_find_switch_table(file, insn);
1382 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1383 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1393 * First pass: Mark the head of each jump table so that in the next pass,
1394 * we know when a given jump table ends and the next one starts.
1396 static void mark_func_jump_tables(struct objtool_file *file,
1397 struct symbol *func)
1399 struct instruction *insn, *last = NULL;
1400 struct reloc *reloc;
1402 func_for_each_insn(file, func, insn) {
1407 * Store back-pointers for unconditional forward jumps such
1408 * that find_jump_table() can back-track using those and
1409 * avoid some potentially confusing code.
1411 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1412 insn->offset > last->offset &&
1413 insn->jump_dest->offset > insn->offset &&
1414 !insn->jump_dest->first_jump_src) {
1416 insn->jump_dest->first_jump_src = insn;
1417 last = insn->jump_dest;
1420 if (insn->type != INSN_JUMP_DYNAMIC)
1423 reloc = find_jump_table(file, func, insn);
1425 reloc->jump_table_start = true;
1426 insn->jump_table = reloc;
1431 static int add_func_jump_tables(struct objtool_file *file,
1432 struct symbol *func)
1434 struct instruction *insn;
1437 func_for_each_insn(file, func, insn) {
1438 if (!insn->jump_table)
1441 ret = add_jump_table(file, insn, insn->jump_table);
1450 * For some switch statements, gcc generates a jump table in the .rodata
1451 * section which contains a list of addresses within the function to jump to.
1452 * This finds these jump tables and adds them to the insn->alts lists.
1454 static int add_jump_table_alts(struct objtool_file *file)
1456 struct section *sec;
1457 struct symbol *func;
1463 for_each_sec(file, sec) {
1464 list_for_each_entry(func, &sec->symbol_list, list) {
1465 if (func->type != STT_FUNC)
1468 mark_func_jump_tables(file, func);
1469 ret = add_func_jump_tables(file, func);
1478 static void set_func_state(struct cfi_state *state)
1480 state->cfa = initial_func_cfi.cfa;
1481 memcpy(&state->regs, &initial_func_cfi.regs,
1482 CFI_NUM_REGS * sizeof(struct cfi_reg));
1483 state->stack_size = initial_func_cfi.cfa.offset;
1486 static int read_unwind_hints(struct objtool_file *file)
1488 struct section *sec, *relocsec;
1489 struct reloc *reloc;
1490 struct unwind_hint *hint;
1491 struct instruction *insn;
1494 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1498 relocsec = sec->reloc;
1500 WARN("missing .rela.discard.unwind_hints section");
1504 if (sec->len % sizeof(struct unwind_hint)) {
1505 WARN("struct unwind_hint size mismatch");
1511 for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
1512 hint = (struct unwind_hint *)sec->data->d_buf + i;
1514 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1516 WARN("can't find reloc for unwind_hints[%d]", i);
1520 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1522 WARN("can't find insn for unwind_hints[%d]", i);
1528 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1529 set_func_state(&insn->cfi);
1533 if (arch_decode_hint_reg(insn, hint->sp_reg)) {
1534 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1535 insn->sec, insn->offset, hint->sp_reg);
1539 insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1540 insn->cfi.type = hint->type;
1541 insn->cfi.end = hint->end;
1547 static int read_retpoline_hints(struct objtool_file *file)
1549 struct section *sec;
1550 struct instruction *insn;
1551 struct reloc *reloc;
1553 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1557 list_for_each_entry(reloc, &sec->reloc_list, list) {
1558 if (reloc->sym->type != STT_SECTION) {
1559 WARN("unexpected relocation symbol type in %s", sec->name);
1563 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1565 WARN("bad .discard.retpoline_safe entry");
1569 if (insn->type != INSN_JUMP_DYNAMIC &&
1570 insn->type != INSN_CALL_DYNAMIC) {
1571 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1572 insn->sec, insn->offset);
1576 insn->retpoline_safe = true;
1582 static int read_instr_hints(struct objtool_file *file)
1584 struct section *sec;
1585 struct instruction *insn;
1586 struct reloc *reloc;
1588 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1592 list_for_each_entry(reloc, &sec->reloc_list, list) {
1593 if (reloc->sym->type != STT_SECTION) {
1594 WARN("unexpected relocation symbol type in %s", sec->name);
1598 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1600 WARN("bad .discard.instr_end entry");
1607 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1611 list_for_each_entry(reloc, &sec->reloc_list, list) {
1612 if (reloc->sym->type != STT_SECTION) {
1613 WARN("unexpected relocation symbol type in %s", sec->name);
1617 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1619 WARN("bad .discard.instr_begin entry");
1629 static int read_intra_function_calls(struct objtool_file *file)
1631 struct instruction *insn;
1632 struct section *sec;
1633 struct reloc *reloc;
1635 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1639 list_for_each_entry(reloc, &sec->reloc_list, list) {
1640 unsigned long dest_off;
1642 if (reloc->sym->type != STT_SECTION) {
1643 WARN("unexpected relocation symbol type in %s",
1648 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1650 WARN("bad .discard.intra_function_call entry");
1654 if (insn->type != INSN_CALL) {
1655 WARN_FUNC("intra_function_call not a direct call",
1656 insn->sec, insn->offset);
1661 * Treat intra-function CALLs as JMPs, but with a stack_op.
1662 * See add_call_destinations(), which strips stack_ops from
1665 insn->type = INSN_JUMP_UNCONDITIONAL;
1667 dest_off = insn->offset + insn->len + insn->immediate;
1668 insn->jump_dest = find_insn(file, insn->sec, dest_off);
1669 if (!insn->jump_dest) {
1670 WARN_FUNC("can't find call dest at %s+0x%lx",
1671 insn->sec, insn->offset,
1672 insn->sec->name, dest_off);
1680 static int read_static_call_tramps(struct objtool_file *file)
1682 struct section *sec;
1683 struct symbol *func;
1685 for_each_sec(file, sec) {
1686 list_for_each_entry(func, &sec->symbol_list, list) {
1687 if (func->bind == STB_GLOBAL &&
1688 !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
1689 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
1690 func->static_call_tramp = true;
1697 static void mark_rodata(struct objtool_file *file)
1699 struct section *sec;
1703 * Search for the following rodata sections, each of which can
1704 * potentially contain jump tables:
1706 * - .rodata: can contain GCC switch tables
1707 * - .rodata.<func>: same, if -fdata-sections is being used
1708 * - .rodata..c_jump_table: contains C annotated jump tables
1710 * .rodata.str1.* sections are ignored; they don't contain jump tables.
1712 for_each_sec(file, sec) {
1713 if (!strncmp(sec->name, ".rodata", 7) &&
1714 !strstr(sec->name, ".str1.")) {
1720 file->rodata = found;
1723 __weak int arch_rewrite_retpolines(struct objtool_file *file)
1728 static int decode_sections(struct objtool_file *file)
1734 ret = decode_instructions(file);
1738 ret = add_dead_ends(file);
1743 add_uaccess_safe(file);
1745 ret = add_ignore_alternatives(file);
1750 * Must be before add_{jump_call}_destination.
1752 ret = read_static_call_tramps(file);
1757 * Must be before add_special_section_alts() as that depends on
1758 * jump_dest being set.
1760 ret = add_jump_destinations(file);
1764 ret = add_special_section_alts(file);
1769 * Must be before add_call_destination(); it changes INSN_CALL to
1772 ret = read_intra_function_calls(file);
1776 ret = add_call_destinations(file);
1780 ret = add_jump_table_alts(file);
1784 ret = read_unwind_hints(file);
1788 ret = read_retpoline_hints(file);
1792 ret = read_instr_hints(file);
1797 * Must be after add_special_section_alts(), since this will emit
1798 * alternatives. Must be after add_{jump,call}_destination(), since
1799 * those create the call insn lists.
1801 ret = arch_rewrite_retpolines(file);
1808 static bool is_fentry_call(struct instruction *insn)
1810 if (insn->type == INSN_CALL && insn->call_dest &&
1811 insn->call_dest->type == STT_NOTYPE &&
1812 !strcmp(insn->call_dest->name, "__fentry__"))
1818 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
1820 struct cfi_state *cfi = &state->cfi;
1823 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
1826 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
1829 if (cfi->stack_size != initial_func_cfi.cfa.offset)
1832 for (i = 0; i < CFI_NUM_REGS; i++) {
1833 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
1834 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
1841 static bool check_reg_frame_pos(const struct cfi_reg *reg,
1842 int expected_offset)
1844 return reg->base == CFI_CFA &&
1845 reg->offset == expected_offset;
1848 static bool has_valid_stack_frame(struct insn_state *state)
1850 struct cfi_state *cfi = &state->cfi;
1852 if (cfi->cfa.base == CFI_BP &&
1853 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
1854 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
1857 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
1863 static int update_cfi_state_regs(struct instruction *insn,
1864 struct cfi_state *cfi,
1865 struct stack_op *op)
1867 struct cfi_reg *cfa = &cfi->cfa;
1869 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
1873 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
1877 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
1880 /* add immediate to sp */
1881 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
1882 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
1883 cfa->offset -= op->src.offset;
1888 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
1890 if (arch_callee_saved_reg(reg) &&
1891 cfi->regs[reg].base == CFI_UNDEFINED) {
1892 cfi->regs[reg].base = base;
1893 cfi->regs[reg].offset = offset;
1897 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
1899 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
1900 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
1904 * A note about DRAP stack alignment:
1906 * GCC has the concept of a DRAP register, which is used to help keep track of
1907 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
1908 * register. The typical DRAP pattern is:
1910 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
1911 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
1912 * 41 ff 72 f8 pushq -0x8(%r10)
1914 * 48 89 e5 mov %rsp,%rbp
1921 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1924 * There are some variations in the epilogues, like:
1932 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1937 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
1938 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
1939 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
1940 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
1942 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1945 * Sometimes r13 is used as the DRAP register, in which case it's saved and
1946 * restored beforehand:
1949 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
1950 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
1952 * 49 8d 65 f0 lea -0x10(%r13),%rsp
1956 static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
1957 struct stack_op *op)
1959 struct cfi_reg *cfa = &cfi->cfa;
1960 struct cfi_reg *regs = cfi->regs;
1962 /* stack operations don't make sense with an undefined CFA */
1963 if (cfa->base == CFI_UNDEFINED) {
1965 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
1971 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
1972 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
1973 return update_cfi_state_regs(insn, cfi, op);
1975 switch (op->dest.type) {
1978 switch (op->src.type) {
1981 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
1982 cfa->base == CFI_SP &&
1983 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
1985 /* mov %rsp, %rbp */
1986 cfa->base = op->dest.reg;
1987 cfi->bp_scratch = false;
1990 else if (op->src.reg == CFI_SP &&
1991 op->dest.reg == CFI_BP && cfi->drap) {
1993 /* drap: mov %rsp, %rbp */
1994 regs[CFI_BP].base = CFI_BP;
1995 regs[CFI_BP].offset = -cfi->stack_size;
1996 cfi->bp_scratch = false;
1999 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2004 * This is needed for the rare case where GCC
2011 cfi->vals[op->dest.reg].base = CFI_CFA;
2012 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2015 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2016 cfa->base == CFI_BP) {
2021 * Restore the original stack pointer (Clang).
2023 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2026 else if (op->dest.reg == cfa->base) {
2028 /* mov %reg, %rsp */
2029 if (cfa->base == CFI_SP &&
2030 cfi->vals[op->src.reg].base == CFI_CFA) {
2033 * This is needed for the rare case
2034 * where GCC does something dumb like:
2036 * lea 0x8(%rsp), %rcx
2040 cfa->offset = -cfi->vals[op->src.reg].offset;
2041 cfi->stack_size = cfa->offset;
2043 } else if (cfa->base == CFI_SP &&
2044 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2045 cfi->vals[op->src.reg].offset == cfa->offset) {
2050 * 1: mov %rsp, (%[tos])
2051 * 2: mov %[tos], %rsp
2057 * 1 - places a pointer to the previous
2058 * stack at the Top-of-Stack of the
2061 * 2 - switches to the new stack.
2063 * 3 - pops the Top-of-Stack to restore
2064 * the original stack.
2066 * Note: we set base to SP_INDIRECT
2067 * here and preserve offset. Therefore
2068 * when the unwinder reaches ToS it
2069 * will dereference SP and then add the
2070 * offset to find the next frame, IOW:
2073 cfa->base = CFI_SP_INDIRECT;
2076 cfa->base = CFI_UNDEFINED;
2081 else if (op->dest.reg == CFI_SP &&
2082 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2083 cfi->vals[op->src.reg].offset == cfa->offset) {
2086 * The same stack swizzle case 2) as above. But
2087 * because we can't change cfa->base, case 3)
2088 * will become a regular POP. Pretend we're a
2089 * PUSH so things don't go unbalanced.
2091 cfi->stack_size += 8;
2098 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2101 cfi->stack_size -= op->src.offset;
2102 if (cfa->base == CFI_SP)
2103 cfa->offset -= op->src.offset;
2107 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2109 /* lea disp(%rbp), %rsp */
2110 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2114 if (!cfi->drap && op->src.reg == CFI_SP &&
2115 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2116 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2118 /* lea disp(%rsp), %rbp */
2120 cfa->offset -= op->src.offset;
2121 cfi->bp_scratch = false;
2125 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2127 /* drap: lea disp(%rsp), %drap */
2128 cfi->drap_reg = op->dest.reg;
2131 * lea disp(%rsp), %reg
2133 * This is needed for the rare case where GCC
2134 * does something dumb like:
2136 * lea 0x8(%rsp), %rcx
2140 cfi->vals[op->dest.reg].base = CFI_CFA;
2141 cfi->vals[op->dest.reg].offset = \
2142 -cfi->stack_size + op->src.offset;
2147 if (cfi->drap && op->dest.reg == CFI_SP &&
2148 op->src.reg == cfi->drap_reg) {
2150 /* drap: lea disp(%drap), %rsp */
2152 cfa->offset = cfi->stack_size = -op->src.offset;
2153 cfi->drap_reg = CFI_UNDEFINED;
2158 if (op->dest.reg == cfi->cfa.base) {
2159 WARN_FUNC("unsupported stack register modification",
2160 insn->sec, insn->offset);
2167 if (op->dest.reg != CFI_SP ||
2168 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2169 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2170 WARN_FUNC("unsupported stack pointer realignment",
2171 insn->sec, insn->offset);
2175 if (cfi->drap_reg != CFI_UNDEFINED) {
2176 /* drap: and imm, %rsp */
2177 cfa->base = cfi->drap_reg;
2178 cfa->offset = cfi->stack_size = 0;
2183 * Older versions of GCC (4.8ish) realign the stack
2184 * without DRAP, with a frame pointer.
2191 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2193 /* pop %rsp; # restore from a stack swizzle */
2198 if (!cfi->drap && op->dest.reg == cfa->base) {
2204 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2205 op->dest.reg == cfi->drap_reg &&
2206 cfi->drap_offset == -cfi->stack_size) {
2208 /* drap: pop %drap */
2209 cfa->base = cfi->drap_reg;
2211 cfi->drap_offset = -1;
2213 } else if (regs[op->dest.reg].offset == -cfi->stack_size) {
2216 restore_reg(cfi, op->dest.reg);
2219 cfi->stack_size -= 8;
2220 if (cfa->base == CFI_SP)
2225 case OP_SRC_REG_INDIRECT:
2226 if (!cfi->drap && op->dest.reg == cfa->base &&
2227 op->dest.reg == CFI_BP) {
2229 /* mov disp(%rsp), %rbp */
2231 cfa->offset = cfi->stack_size;
2234 if (cfi->drap && op->src.reg == CFI_BP &&
2235 op->src.offset == cfi->drap_offset) {
2237 /* drap: mov disp(%rbp), %drap */
2238 cfa->base = cfi->drap_reg;
2240 cfi->drap_offset = -1;
2243 if (cfi->drap && op->src.reg == CFI_BP &&
2244 op->src.offset == regs[op->dest.reg].offset) {
2246 /* drap: mov disp(%rbp), %reg */
2247 restore_reg(cfi, op->dest.reg);
2249 } else if (op->src.reg == cfa->base &&
2250 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2252 /* mov disp(%rbp), %reg */
2253 /* mov disp(%rsp), %reg */
2254 restore_reg(cfi, op->dest.reg);
2256 } else if (op->src.reg == CFI_SP &&
2257 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2259 /* mov disp(%rsp), %reg */
2260 restore_reg(cfi, op->dest.reg);
2266 WARN_FUNC("unknown stack-related instruction",
2267 insn->sec, insn->offset);
2275 cfi->stack_size += 8;
2276 if (cfa->base == CFI_SP)
2279 if (op->src.type != OP_SRC_REG)
2283 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2285 /* drap: push %drap */
2286 cfa->base = CFI_BP_INDIRECT;
2287 cfa->offset = -cfi->stack_size;
2289 /* save drap so we know when to restore it */
2290 cfi->drap_offset = -cfi->stack_size;
2292 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2294 /* drap: push %rbp */
2295 cfi->stack_size = 0;
2299 /* drap: push %reg */
2300 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2306 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2309 /* detect when asm code uses rbp as a scratch register */
2310 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2311 cfa->base != CFI_BP)
2312 cfi->bp_scratch = true;
2315 case OP_DEST_REG_INDIRECT:
2318 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2320 /* drap: mov %drap, disp(%rbp) */
2321 cfa->base = CFI_BP_INDIRECT;
2322 cfa->offset = op->dest.offset;
2324 /* save drap offset so we know when to restore it */
2325 cfi->drap_offset = op->dest.offset;
2328 /* drap: mov reg, disp(%rbp) */
2329 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2332 } else if (op->dest.reg == cfa->base) {
2334 /* mov reg, disp(%rbp) */
2335 /* mov reg, disp(%rsp) */
2336 save_reg(cfi, op->src.reg, CFI_CFA,
2337 op->dest.offset - cfi->cfa.offset);
2339 } else if (op->dest.reg == CFI_SP) {
2341 /* mov reg, disp(%rsp) */
2342 save_reg(cfi, op->src.reg, CFI_CFA,
2343 op->dest.offset - cfi->stack_size);
2345 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2347 /* mov %rsp, (%reg); # setup a stack swizzle. */
2348 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2349 cfi->vals[op->dest.reg].offset = cfa->offset;
2355 if ((!cfi->drap && cfa->base != CFI_BP) ||
2356 (cfi->drap && cfa->base != cfi->drap_reg)) {
2357 WARN_FUNC("leave instruction with modified stack frame",
2358 insn->sec, insn->offset);
2362 /* leave (mov %rbp, %rsp; pop %rbp) */
2364 cfi->stack_size = -cfi->regs[CFI_BP].offset - 8;
2365 restore_reg(cfi, CFI_BP);
2375 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2376 WARN_FUNC("unknown stack-related memory operation",
2377 insn->sec, insn->offset);
2382 cfi->stack_size -= 8;
2383 if (cfa->base == CFI_SP)
2389 WARN_FUNC("unknown stack-related instruction",
2390 insn->sec, insn->offset);
2398 * The stack layouts of alternatives instructions can sometimes diverge when
2399 * they have stack modifications. That's fine as long as the potential stack
2400 * layouts don't conflict at any given potential instruction boundary.
2402 * Flatten the CFIs of the different alternative code streams (both original
2403 * and replacement) into a single shared CFI array which can be used to detect
2404 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2406 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2408 struct cfi_state **alt_cfi;
2411 if (!insn->alt_group)
2414 alt_cfi = insn->alt_group->cfi;
2415 group_off = insn->offset - insn->alt_group->first_insn->offset;
2417 if (!alt_cfi[group_off]) {
2418 alt_cfi[group_off] = &insn->cfi;
2420 if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) {
2421 WARN_FUNC("stack layout conflict in alternatives",
2422 insn->sec, insn->offset);
2430 static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
2432 struct stack_op *op;
2434 list_for_each_entry(op, &insn->stack_ops, list) {
2436 if (update_cfi_state(insn, &state->cfi, op))
2439 if (!insn->alt_group)
2442 if (op->dest.type == OP_DEST_PUSHF) {
2443 if (!state->uaccess_stack) {
2444 state->uaccess_stack = 1;
2445 } else if (state->uaccess_stack >> 31) {
2446 WARN_FUNC("PUSHF stack exhausted",
2447 insn->sec, insn->offset);
2450 state->uaccess_stack <<= 1;
2451 state->uaccess_stack |= state->uaccess;
2454 if (op->src.type == OP_SRC_POPF) {
2455 if (state->uaccess_stack) {
2456 state->uaccess = state->uaccess_stack & 1;
2457 state->uaccess_stack >>= 1;
2458 if (state->uaccess_stack == 1)
2459 state->uaccess_stack = 0;
2467 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2469 struct cfi_state *cfi1 = &insn->cfi;
2472 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2474 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2475 insn->sec, insn->offset,
2476 cfi1->cfa.base, cfi1->cfa.offset,
2477 cfi2->cfa.base, cfi2->cfa.offset);
2479 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2480 for (i = 0; i < CFI_NUM_REGS; i++) {
2481 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2482 sizeof(struct cfi_reg)))
2485 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2486 insn->sec, insn->offset,
2487 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2488 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2492 } else if (cfi1->type != cfi2->type) {
2494 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2495 insn->sec, insn->offset, cfi1->type, cfi2->type);
2497 } else if (cfi1->drap != cfi2->drap ||
2498 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2499 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2501 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2502 insn->sec, insn->offset,
2503 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2504 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2512 static inline bool func_uaccess_safe(struct symbol *func)
2515 return func->uaccess_safe;
2520 static inline const char *call_dest_name(struct instruction *insn)
2522 if (insn->call_dest)
2523 return insn->call_dest->name;
2528 static inline bool noinstr_call_dest(struct symbol *func)
2531 * We can't deal with indirect function calls at present;
2532 * assume they're instrumented.
2538 * If the symbol is from a noinstr section; we good.
2540 if (func->sec->noinstr)
2544 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2545 * something 'BAD' happened. At the risk of taking the machine down,
2546 * let them proceed to get the message out.
2548 if (!strncmp(func->name, "__ubsan_handle_", 15))
2554 static int validate_call(struct instruction *insn, struct insn_state *state)
2556 if (state->noinstr && state->instr <= 0 &&
2557 !noinstr_call_dest(insn->call_dest)) {
2558 WARN_FUNC("call to %s() leaves .noinstr.text section",
2559 insn->sec, insn->offset, call_dest_name(insn));
2563 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2564 WARN_FUNC("call to %s() with UACCESS enabled",
2565 insn->sec, insn->offset, call_dest_name(insn));
2570 WARN_FUNC("call to %s() with DF set",
2571 insn->sec, insn->offset, call_dest_name(insn));
2578 static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2580 if (has_modified_stack_frame(insn, state)) {
2581 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2582 insn->sec, insn->offset);
2586 return validate_call(insn, state);
2589 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2591 if (state->noinstr && state->instr > 0) {
2592 WARN_FUNC("return with instrumentation enabled",
2593 insn->sec, insn->offset);
2597 if (state->uaccess && !func_uaccess_safe(func)) {
2598 WARN_FUNC("return with UACCESS enabled",
2599 insn->sec, insn->offset);
2603 if (!state->uaccess && func_uaccess_safe(func)) {
2604 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2605 insn->sec, insn->offset);
2610 WARN_FUNC("return with DF set",
2611 insn->sec, insn->offset);
2615 if (func && has_modified_stack_frame(insn, state)) {
2616 WARN_FUNC("return with modified stack frame",
2617 insn->sec, insn->offset);
2621 if (state->cfi.bp_scratch) {
2622 WARN_FUNC("BP used as a scratch register",
2623 insn->sec, insn->offset);
2630 static struct instruction *next_insn_to_validate(struct objtool_file *file,
2631 struct instruction *insn)
2633 struct alt_group *alt_group = insn->alt_group;
2636 * Simulate the fact that alternatives are patched in-place. When the
2637 * end of a replacement alt_group is reached, redirect objtool flow to
2638 * the end of the original alt_group.
2640 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2641 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2643 return next_insn_same_sec(file, insn);
2647 * Follow the branch starting at the given instruction, and recursively follow
2648 * any other branches (jumps). Meanwhile, track the frame pointer state at
2649 * each instruction and validate all the rules described in
2650 * tools/objtool/Documentation/stack-validation.txt.
2652 static int validate_branch(struct objtool_file *file, struct symbol *func,
2653 struct instruction *insn, struct insn_state state)
2655 struct alternative *alt;
2656 struct instruction *next_insn;
2657 struct section *sec;
2664 next_insn = next_insn_to_validate(file, insn);
2666 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2667 WARN("%s() falls through to next function %s()",
2668 func->name, insn->func->name);
2672 if (func && insn->ignore) {
2673 WARN_FUNC("BUG: why am I validating an ignored function?",
2678 visited = 1 << state.uaccess;
2679 if (insn->visited) {
2680 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2683 if (insn->visited & visited)
2688 state.instr += insn->instr;
2691 state.cfi = insn->cfi;
2693 insn->cfi = state.cfi;
2695 insn->visited |= visited;
2697 if (propagate_alt_cfi(file, insn))
2700 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
2701 bool skip_orig = false;
2703 list_for_each_entry(alt, &insn->alts, list) {
2707 ret = validate_branch(file, func, alt->insn, state);
2710 BT_FUNC("(alt)", insn);
2719 if (handle_insn_ops(insn, &state))
2722 switch (insn->type) {
2725 return validate_return(func, insn, &state);
2728 case INSN_CALL_DYNAMIC:
2729 ret = validate_call(insn, &state);
2733 if (!no_fp && func && !is_fentry_call(insn) &&
2734 !has_valid_stack_frame(&state)) {
2735 WARN_FUNC("call without frame pointer save/setup",
2740 if (dead_end_function(file, insn->call_dest))
2745 case INSN_JUMP_CONDITIONAL:
2746 case INSN_JUMP_UNCONDITIONAL:
2747 if (is_sibling_call(insn)) {
2748 ret = validate_sibling_call(insn, &state);
2752 } else if (insn->jump_dest) {
2753 ret = validate_branch(file, func,
2754 insn->jump_dest, state);
2757 BT_FUNC("(branch)", insn);
2762 if (insn->type == INSN_JUMP_UNCONDITIONAL)
2767 case INSN_JUMP_DYNAMIC:
2768 case INSN_JUMP_DYNAMIC_CONDITIONAL:
2769 if (is_sibling_call(insn)) {
2770 ret = validate_sibling_call(insn, &state);
2775 if (insn->type == INSN_JUMP_DYNAMIC)
2780 case INSN_CONTEXT_SWITCH:
2781 if (func && (!next_insn || !next_insn->hint)) {
2782 WARN_FUNC("unsupported instruction in callable function",
2789 if (state.uaccess) {
2790 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
2794 state.uaccess = true;
2798 if (!state.uaccess && func) {
2799 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
2803 if (func_uaccess_safe(func) && !state.uaccess_stack) {
2804 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
2808 state.uaccess = false;
2813 WARN_FUNC("recursive STD", sec, insn->offset);
2821 if (!state.df && func) {
2822 WARN_FUNC("redundant CLD", sec, insn->offset);
2837 if (state.cfi.cfa.base == CFI_UNDEFINED)
2839 WARN("%s: unexpected end of section", sec->name);
2849 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
2851 struct instruction *insn;
2852 struct insn_state state;
2853 int ret, warnings = 0;
2858 init_insn_state(&state, sec);
2861 insn = find_insn(file, sec, 0);
2865 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
2868 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
2869 if (insn->hint && !insn->visited) {
2870 ret = validate_branch(file, insn->func, insn, state);
2871 if (ret && backtrace)
2872 BT_FUNC("<=== (hint)", insn);
2876 insn = list_next_entry(insn, list);
2882 static int validate_retpoline(struct objtool_file *file)
2884 struct instruction *insn;
2887 for_each_insn(file, insn) {
2888 if (insn->type != INSN_JUMP_DYNAMIC &&
2889 insn->type != INSN_CALL_DYNAMIC)
2892 if (insn->retpoline_safe)
2896 * .init.text code is ran before userspace and thus doesn't
2897 * strictly need retpolines, except for modules which are
2898 * loaded late, they very much do need retpoline in their
2901 if (!strcmp(insn->sec->name, ".init.text") && !module)
2904 WARN_FUNC("indirect %s found in RETPOLINE build",
2905 insn->sec, insn->offset,
2906 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
2914 static bool is_kasan_insn(struct instruction *insn)
2916 return (insn->type == INSN_CALL &&
2917 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
2920 static bool is_ubsan_insn(struct instruction *insn)
2922 return (insn->type == INSN_CALL &&
2923 !strcmp(insn->call_dest->name,
2924 "__ubsan_handle_builtin_unreachable"));
2927 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
2930 struct instruction *prev_insn;
2932 if (insn->ignore || insn->type == INSN_NOP)
2936 * Ignore any unused exceptions. This can happen when a whitelisted
2937 * function has an exception table entry.
2939 * Also ignore alternative replacement instructions. This can happen
2940 * when a whitelisted function uses one of the ALTERNATIVE macros.
2942 if (!strcmp(insn->sec->name, ".fixup") ||
2943 !strcmp(insn->sec->name, ".altinstr_replacement") ||
2944 !strcmp(insn->sec->name, ".altinstr_aux"))
2951 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
2952 * __builtin_unreachable(). The BUG() macro has an unreachable() after
2953 * the UD2, which causes GCC's undefined trap logic to emit another UD2
2954 * (or occasionally a JMP to UD2).
2956 * It may also insert a UD2 after calling a __noreturn function.
2958 prev_insn = list_prev_entry(insn, list);
2959 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
2960 (insn->type == INSN_BUG ||
2961 (insn->type == INSN_JUMP_UNCONDITIONAL &&
2962 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
2966 * Check if this (or a subsequent) instruction is related to
2967 * CONFIG_UBSAN or CONFIG_KASAN.
2969 * End the search at 5 instructions to avoid going into the weeds.
2971 for (i = 0; i < 5; i++) {
2973 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
2976 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
2977 if (insn->jump_dest &&
2978 insn->jump_dest->func == insn->func) {
2979 insn = insn->jump_dest;
2986 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
2989 insn = list_next_entry(insn, list);
2995 static int validate_symbol(struct objtool_file *file, struct section *sec,
2996 struct symbol *sym, struct insn_state *state)
2998 struct instruction *insn;
3002 WARN("%s() is missing an ELF size annotation", sym->name);
3006 if (sym->pfunc != sym || sym->alias != sym)
3009 insn = find_insn(file, sec, sym->offset);
3010 if (!insn || insn->ignore || insn->visited)
3013 state->uaccess = sym->uaccess_safe;
3015 ret = validate_branch(file, insn->func, insn, *state);
3016 if (ret && backtrace)
3017 BT_FUNC("<=== (sym)", insn);
3021 static int validate_section(struct objtool_file *file, struct section *sec)
3023 struct insn_state state;
3024 struct symbol *func;
3027 list_for_each_entry(func, &sec->symbol_list, list) {
3028 if (func->type != STT_FUNC)
3031 init_insn_state(&state, sec);
3032 set_func_state(&state.cfi);
3034 warnings += validate_symbol(file, sec, func, &state);
3040 static int validate_vmlinux_functions(struct objtool_file *file)
3042 struct section *sec;
3045 sec = find_section_by_name(file->elf, ".noinstr.text");
3047 warnings += validate_section(file, sec);
3048 warnings += validate_unwind_hints(file, sec);
3051 sec = find_section_by_name(file->elf, ".entry.text");
3053 warnings += validate_section(file, sec);
3054 warnings += validate_unwind_hints(file, sec);
3060 static int validate_functions(struct objtool_file *file)
3062 struct section *sec;
3065 for_each_sec(file, sec) {
3066 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3069 warnings += validate_section(file, sec);
3075 static int validate_reachable_instructions(struct objtool_file *file)
3077 struct instruction *insn;
3079 if (file->ignore_unreachables)
3082 for_each_insn(file, insn) {
3083 if (insn->visited || ignore_unreachable_insn(file, insn))
3086 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3093 int check(struct objtool_file *file)
3095 int ret, warnings = 0;
3097 arch_initial_func_cfi_state(&initial_func_cfi);
3099 ret = decode_sections(file);
3104 if (list_empty(&file->insn_list))
3107 if (vmlinux && !validate_dup) {
3108 ret = validate_vmlinux_functions(file);
3117 ret = validate_retpoline(file);
3123 ret = validate_functions(file);
3128 ret = validate_unwind_hints(file, NULL);
3134 ret = validate_reachable_instructions(file);
3140 ret = create_static_call_sections(file);
3146 ret = create_mcount_loc_sections(file);
3154 * For now, don't fail the kernel build on fatal warnings. These
3155 * errors are still fairly common due to the growing matrix of
3156 * supported toolchains and their recent pace of change.