objtool: Fix objtool regression on x32 systems
[linux-2.6-microblaze.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26         struct list_head list;
27         struct instruction *insn;
28         bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38                               struct section *sec, unsigned long offset)
39 {
40         struct instruction *insn;
41
42         hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43                 if (insn->sec == sec && insn->offset == offset)
44                         return insn;
45         }
46
47         return NULL;
48 }
49
50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51                                               struct instruction *insn)
52 {
53         struct instruction *next = list_next_entry(insn, list);
54
55         if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56                 return NULL;
57
58         return next;
59 }
60
61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62                                                struct instruction *insn)
63 {
64         struct instruction *next = list_next_entry(insn, list);
65         struct symbol *func = insn->func;
66
67         if (!func)
68                 return NULL;
69
70         if (&next->list != &file->insn_list && next->func == func)
71                 return next;
72
73         /* Check if we're already in the subfunction: */
74         if (func == func->cfunc)
75                 return NULL;
76
77         /* Move to the subfunction: */
78         return find_insn(file, func->cfunc->sec, func->cfunc->offset);
79 }
80
81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82                                                struct instruction *insn)
83 {
84         struct instruction *prev = list_prev_entry(insn, list);
85
86         if (&prev->list != &file->insn_list && prev->func == insn->func)
87                 return prev;
88
89         return NULL;
90 }
91
92 #define func_for_each_insn(file, func, insn)                            \
93         for (insn = find_insn(file, func->sec, func->offset);           \
94              insn;                                                      \
95              insn = next_insn_same_func(file, insn))
96
97 #define sym_for_each_insn(file, sym, insn)                              \
98         for (insn = find_insn(file, sym->sec, sym->offset);             \
99              insn && &insn->list != &file->insn_list &&                 \
100                 insn->sec == sym->sec &&                                \
101                 insn->offset < sym->offset + sym->len;                  \
102              insn = list_next_entry(insn, list))
103
104 #define sym_for_each_insn_continue_reverse(file, sym, insn)             \
105         for (insn = list_prev_entry(insn, list);                        \
106              &insn->list != &file->insn_list &&                         \
107                 insn->sec == sym->sec && insn->offset >= sym->offset;   \
108              insn = list_prev_entry(insn, list))
109
110 #define sec_for_each_insn_from(file, insn)                              \
111         for (; insn; insn = next_insn_same_sec(file, insn))
112
113 #define sec_for_each_insn_continue(file, insn)                          \
114         for (insn = next_insn_same_sec(file, insn); insn;               \
115              insn = next_insn_same_sec(file, insn))
116
117 static bool is_jump_table_jump(struct instruction *insn)
118 {
119         struct alt_group *alt_group = insn->alt_group;
120
121         if (insn->jump_table)
122                 return true;
123
124         /* Retpoline alternative for a jump table? */
125         return alt_group && alt_group->orig_group &&
126                alt_group->orig_group->first_insn->jump_table;
127 }
128
129 static bool is_sibling_call(struct instruction *insn)
130 {
131         /*
132          * Assume only ELF functions can make sibling calls.  This ensures
133          * sibling call detection consistency between vmlinux.o and individual
134          * objects.
135          */
136         if (!insn->func)
137                 return false;
138
139         /* An indirect jump is either a sibling call or a jump to a table. */
140         if (insn->type == INSN_JUMP_DYNAMIC)
141                 return !is_jump_table_jump(insn);
142
143         /* add_jump_destinations() sets insn->call_dest for sibling calls. */
144         return (is_static_jump(insn) && insn->call_dest);
145 }
146
147 /*
148  * This checks to see if the given function is a "noreturn" function.
149  *
150  * For global functions which are outside the scope of this object file, we
151  * have to keep a manual list of them.
152  *
153  * For local functions, we have to detect them manually by simply looking for
154  * the lack of a return instruction.
155  */
156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
157                                 int recursion)
158 {
159         int i;
160         struct instruction *insn;
161         bool empty = true;
162
163         /*
164          * Unfortunately these have to be hard coded because the noreturn
165          * attribute isn't provided in ELF data.
166          */
167         static const char * const global_noreturns[] = {
168                 "__stack_chk_fail",
169                 "panic",
170                 "do_exit",
171                 "do_task_dead",
172                 "kthread_exit",
173                 "make_task_dead",
174                 "__module_put_and_kthread_exit",
175                 "kthread_complete_and_exit",
176                 "__reiserfs_panic",
177                 "lbug_with_loc",
178                 "fortify_panic",
179                 "usercopy_abort",
180                 "machine_real_restart",
181                 "rewind_stack_and_make_dead",
182                 "kunit_try_catch_throw",
183                 "xen_start_kernel",
184                 "cpu_bringup_and_idle",
185                 "do_group_exit",
186                 "stop_this_cpu",
187                 "__invalid_creds",
188                "cpu_startup_entry",
189         };
190
191         if (!func)
192                 return false;
193
194         if (func->bind == STB_WEAK)
195                 return false;
196
197         if (func->bind == STB_GLOBAL)
198                 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
199                         if (!strcmp(func->name, global_noreturns[i]))
200                                 return true;
201
202         if (!func->len)
203                 return false;
204
205         insn = find_insn(file, func->sec, func->offset);
206         if (!insn->func)
207                 return false;
208
209         func_for_each_insn(file, func, insn) {
210                 empty = false;
211
212                 if (insn->type == INSN_RETURN)
213                         return false;
214         }
215
216         if (empty)
217                 return false;
218
219         /*
220          * A function can have a sibling call instead of a return.  In that
221          * case, the function's dead-end status depends on whether the target
222          * of the sibling call returns.
223          */
224         func_for_each_insn(file, func, insn) {
225                 if (is_sibling_call(insn)) {
226                         struct instruction *dest = insn->jump_dest;
227
228                         if (!dest)
229                                 /* sibling call to another file */
230                                 return false;
231
232                         /* local sibling call */
233                         if (recursion == 5) {
234                                 /*
235                                  * Infinite recursion: two functions have
236                                  * sibling calls to each other.  This is a very
237                                  * rare case.  It means they aren't dead ends.
238                                  */
239                                 return false;
240                         }
241
242                         return __dead_end_function(file, dest->func, recursion+1);
243                 }
244         }
245
246         return true;
247 }
248
249 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
250 {
251         return __dead_end_function(file, func, 0);
252 }
253
254 static void init_cfi_state(struct cfi_state *cfi)
255 {
256         int i;
257
258         for (i = 0; i < CFI_NUM_REGS; i++) {
259                 cfi->regs[i].base = CFI_UNDEFINED;
260                 cfi->vals[i].base = CFI_UNDEFINED;
261         }
262         cfi->cfa.base = CFI_UNDEFINED;
263         cfi->drap_reg = CFI_UNDEFINED;
264         cfi->drap_offset = -1;
265 }
266
267 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
268                             struct section *sec)
269 {
270         memset(state, 0, sizeof(*state));
271         init_cfi_state(&state->cfi);
272
273         /*
274          * We need the full vmlinux for noinstr validation, otherwise we can
275          * not correctly determine insn->call_dest->sec (external symbols do
276          * not have a section).
277          */
278         if (opts.link && opts.noinstr && sec)
279                 state->noinstr = sec->noinstr;
280 }
281
282 static struct cfi_state *cfi_alloc(void)
283 {
284         struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
285         if (!cfi) {
286                 WARN("calloc failed");
287                 exit(1);
288         }
289         nr_cfi++;
290         return cfi;
291 }
292
293 static int cfi_bits;
294 static struct hlist_head *cfi_hash;
295
296 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
297 {
298         return memcmp((void *)cfi1 + sizeof(cfi1->hash),
299                       (void *)cfi2 + sizeof(cfi2->hash),
300                       sizeof(struct cfi_state) - sizeof(struct hlist_node));
301 }
302
303 static inline u32 cfi_key(struct cfi_state *cfi)
304 {
305         return jhash((void *)cfi + sizeof(cfi->hash),
306                      sizeof(*cfi) - sizeof(cfi->hash), 0);
307 }
308
309 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
310 {
311         struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
312         struct cfi_state *obj;
313
314         hlist_for_each_entry(obj, head, hash) {
315                 if (!cficmp(cfi, obj)) {
316                         nr_cfi_cache++;
317                         return obj;
318                 }
319         }
320
321         obj = cfi_alloc();
322         *obj = *cfi;
323         hlist_add_head(&obj->hash, head);
324
325         return obj;
326 }
327
328 static void cfi_hash_add(struct cfi_state *cfi)
329 {
330         struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
331
332         hlist_add_head(&cfi->hash, head);
333 }
334
335 static void *cfi_hash_alloc(unsigned long size)
336 {
337         cfi_bits = max(10, ilog2(size));
338         cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
339                         PROT_READ|PROT_WRITE,
340                         MAP_PRIVATE|MAP_ANON, -1, 0);
341         if (cfi_hash == (void *)-1L) {
342                 WARN("mmap fail cfi_hash");
343                 cfi_hash = NULL;
344         }  else if (opts.stats) {
345                 printf("cfi_bits: %d\n", cfi_bits);
346         }
347
348         return cfi_hash;
349 }
350
351 static unsigned long nr_insns;
352 static unsigned long nr_insns_visited;
353
354 /*
355  * Call the arch-specific instruction decoder for all the instructions and add
356  * them to the global instruction list.
357  */
358 static int decode_instructions(struct objtool_file *file)
359 {
360         struct section *sec;
361         struct symbol *func;
362         unsigned long offset;
363         struct instruction *insn;
364         int ret;
365
366         for_each_sec(file, sec) {
367
368                 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
369                         continue;
370
371                 if (strcmp(sec->name, ".altinstr_replacement") &&
372                     strcmp(sec->name, ".altinstr_aux") &&
373                     strncmp(sec->name, ".discard.", 9))
374                         sec->text = true;
375
376                 if (!strcmp(sec->name, ".noinstr.text") ||
377                     !strcmp(sec->name, ".entry.text"))
378                         sec->noinstr = true;
379
380                 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
381                         insn = malloc(sizeof(*insn));
382                         if (!insn) {
383                                 WARN("malloc failed");
384                                 return -1;
385                         }
386                         memset(insn, 0, sizeof(*insn));
387                         INIT_LIST_HEAD(&insn->alts);
388                         INIT_LIST_HEAD(&insn->stack_ops);
389                         INIT_LIST_HEAD(&insn->call_node);
390
391                         insn->sec = sec;
392                         insn->offset = offset;
393
394                         ret = arch_decode_instruction(file, sec, offset,
395                                                       sec->sh.sh_size - offset,
396                                                       &insn->len, &insn->type,
397                                                       &insn->immediate,
398                                                       &insn->stack_ops);
399                         if (ret)
400                                 goto err;
401
402                         /*
403                          * By default, "ud2" is a dead end unless otherwise
404                          * annotated, because GCC 7 inserts it for certain
405                          * divide-by-zero cases.
406                          */
407                         if (insn->type == INSN_BUG)
408                                 insn->dead_end = true;
409
410                         hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
411                         list_add_tail(&insn->list, &file->insn_list);
412                         nr_insns++;
413                 }
414
415                 list_for_each_entry(func, &sec->symbol_list, list) {
416                         if (func->type != STT_FUNC || func->alias != func)
417                                 continue;
418
419                         if (!find_insn(file, sec, func->offset)) {
420                                 WARN("%s(): can't find starting instruction",
421                                      func->name);
422                                 return -1;
423                         }
424
425                         sym_for_each_insn(file, func, insn) {
426                                 insn->func = func;
427                                 if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) {
428                                         if (insn->offset == insn->func->offset) {
429                                                 list_add_tail(&insn->call_node, &file->endbr_list);
430                                                 file->nr_endbr++;
431                                         } else {
432                                                 file->nr_endbr_int++;
433                                         }
434                                 }
435                         }
436                 }
437         }
438
439         if (opts.stats)
440                 printf("nr_insns: %lu\n", nr_insns);
441
442         return 0;
443
444 err:
445         free(insn);
446         return ret;
447 }
448
449 /*
450  * Read the pv_ops[] .data table to find the static initialized values.
451  */
452 static int add_pv_ops(struct objtool_file *file, const char *symname)
453 {
454         struct symbol *sym, *func;
455         unsigned long off, end;
456         struct reloc *rel;
457         int idx;
458
459         sym = find_symbol_by_name(file->elf, symname);
460         if (!sym)
461                 return 0;
462
463         off = sym->offset;
464         end = off + sym->len;
465         for (;;) {
466                 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
467                 if (!rel)
468                         break;
469
470                 func = rel->sym;
471                 if (func->type == STT_SECTION)
472                         func = find_symbol_by_offset(rel->sym->sec, rel->addend);
473
474                 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
475
476                 objtool_pv_add(file, idx, func);
477
478                 off = rel->offset + 1;
479                 if (off > end)
480                         break;
481         }
482
483         return 0;
484 }
485
486 /*
487  * Allocate and initialize file->pv_ops[].
488  */
489 static int init_pv_ops(struct objtool_file *file)
490 {
491         static const char *pv_ops_tables[] = {
492                 "pv_ops",
493                 "xen_cpu_ops",
494                 "xen_irq_ops",
495                 "xen_mmu_ops",
496                 NULL,
497         };
498         const char *pv_ops;
499         struct symbol *sym;
500         int idx, nr;
501
502         if (!opts.noinstr)
503                 return 0;
504
505         file->pv_ops = NULL;
506
507         sym = find_symbol_by_name(file->elf, "pv_ops");
508         if (!sym)
509                 return 0;
510
511         nr = sym->len / sizeof(unsigned long);
512         file->pv_ops = calloc(sizeof(struct pv_state), nr);
513         if (!file->pv_ops)
514                 return -1;
515
516         for (idx = 0; idx < nr; idx++)
517                 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
518
519         for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
520                 add_pv_ops(file, pv_ops);
521
522         return 0;
523 }
524
525 static struct instruction *find_last_insn(struct objtool_file *file,
526                                           struct section *sec)
527 {
528         struct instruction *insn = NULL;
529         unsigned int offset;
530         unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
531
532         for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
533                 insn = find_insn(file, sec, offset);
534
535         return insn;
536 }
537
538 /*
539  * Mark "ud2" instructions and manually annotated dead ends.
540  */
541 static int add_dead_ends(struct objtool_file *file)
542 {
543         struct section *sec;
544         struct reloc *reloc;
545         struct instruction *insn;
546
547         /*
548          * Check for manually annotated dead ends.
549          */
550         sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
551         if (!sec)
552                 goto reachable;
553
554         list_for_each_entry(reloc, &sec->reloc_list, list) {
555                 if (reloc->sym->type != STT_SECTION) {
556                         WARN("unexpected relocation symbol type in %s", sec->name);
557                         return -1;
558                 }
559                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
560                 if (insn)
561                         insn = list_prev_entry(insn, list);
562                 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
563                         insn = find_last_insn(file, reloc->sym->sec);
564                         if (!insn) {
565                                 WARN("can't find unreachable insn at %s+0x%" PRIx64,
566                                      reloc->sym->sec->name, reloc->addend);
567                                 return -1;
568                         }
569                 } else {
570                         WARN("can't find unreachable insn at %s+0x%" PRIx64,
571                              reloc->sym->sec->name, reloc->addend);
572                         return -1;
573                 }
574
575                 insn->dead_end = true;
576         }
577
578 reachable:
579         /*
580          * These manually annotated reachable checks are needed for GCC 4.4,
581          * where the Linux unreachable() macro isn't supported.  In that case
582          * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
583          * not a dead end.
584          */
585         sec = find_section_by_name(file->elf, ".rela.discard.reachable");
586         if (!sec)
587                 return 0;
588
589         list_for_each_entry(reloc, &sec->reloc_list, list) {
590                 if (reloc->sym->type != STT_SECTION) {
591                         WARN("unexpected relocation symbol type in %s", sec->name);
592                         return -1;
593                 }
594                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
595                 if (insn)
596                         insn = list_prev_entry(insn, list);
597                 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
598                         insn = find_last_insn(file, reloc->sym->sec);
599                         if (!insn) {
600                                 WARN("can't find reachable insn at %s+0x%" PRIx64,
601                                      reloc->sym->sec->name, reloc->addend);
602                                 return -1;
603                         }
604                 } else {
605                         WARN("can't find reachable insn at %s+0x%" PRIx64,
606                              reloc->sym->sec->name, reloc->addend);
607                         return -1;
608                 }
609
610                 insn->dead_end = false;
611         }
612
613         return 0;
614 }
615
616 static int create_static_call_sections(struct objtool_file *file)
617 {
618         struct section *sec;
619         struct static_call_site *site;
620         struct instruction *insn;
621         struct symbol *key_sym;
622         char *key_name, *tmp;
623         int idx;
624
625         sec = find_section_by_name(file->elf, ".static_call_sites");
626         if (sec) {
627                 INIT_LIST_HEAD(&file->static_call_list);
628                 WARN("file already has .static_call_sites section, skipping");
629                 return 0;
630         }
631
632         if (list_empty(&file->static_call_list))
633                 return 0;
634
635         idx = 0;
636         list_for_each_entry(insn, &file->static_call_list, call_node)
637                 idx++;
638
639         sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
640                                  sizeof(struct static_call_site), idx);
641         if (!sec)
642                 return -1;
643
644         idx = 0;
645         list_for_each_entry(insn, &file->static_call_list, call_node) {
646
647                 site = (struct static_call_site *)sec->data->d_buf + idx;
648                 memset(site, 0, sizeof(struct static_call_site));
649
650                 /* populate reloc for 'addr' */
651                 if (elf_add_reloc_to_insn(file->elf, sec,
652                                           idx * sizeof(struct static_call_site),
653                                           R_X86_64_PC32,
654                                           insn->sec, insn->offset))
655                         return -1;
656
657                 /* find key symbol */
658                 key_name = strdup(insn->call_dest->name);
659                 if (!key_name) {
660                         perror("strdup");
661                         return -1;
662                 }
663                 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
664                             STATIC_CALL_TRAMP_PREFIX_LEN)) {
665                         WARN("static_call: trampoline name malformed: %s", key_name);
666                         return -1;
667                 }
668                 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
669                 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
670
671                 key_sym = find_symbol_by_name(file->elf, tmp);
672                 if (!key_sym) {
673                         if (!opts.module) {
674                                 WARN("static_call: can't find static_call_key symbol: %s", tmp);
675                                 return -1;
676                         }
677
678                         /*
679                          * For modules(), the key might not be exported, which
680                          * means the module can make static calls but isn't
681                          * allowed to change them.
682                          *
683                          * In that case we temporarily set the key to be the
684                          * trampoline address.  This is fixed up in
685                          * static_call_add_module().
686                          */
687                         key_sym = insn->call_dest;
688                 }
689                 free(key_name);
690
691                 /* populate reloc for 'key' */
692                 if (elf_add_reloc(file->elf, sec,
693                                   idx * sizeof(struct static_call_site) + 4,
694                                   R_X86_64_PC32, key_sym,
695                                   is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
696                         return -1;
697
698                 idx++;
699         }
700
701         return 0;
702 }
703
704 static int create_retpoline_sites_sections(struct objtool_file *file)
705 {
706         struct instruction *insn;
707         struct section *sec;
708         int idx;
709
710         sec = find_section_by_name(file->elf, ".retpoline_sites");
711         if (sec) {
712                 WARN("file already has .retpoline_sites, skipping");
713                 return 0;
714         }
715
716         idx = 0;
717         list_for_each_entry(insn, &file->retpoline_call_list, call_node)
718                 idx++;
719
720         if (!idx)
721                 return 0;
722
723         sec = elf_create_section(file->elf, ".retpoline_sites", 0,
724                                  sizeof(int), idx);
725         if (!sec) {
726                 WARN("elf_create_section: .retpoline_sites");
727                 return -1;
728         }
729
730         idx = 0;
731         list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
732
733                 int *site = (int *)sec->data->d_buf + idx;
734                 *site = 0;
735
736                 if (elf_add_reloc_to_insn(file->elf, sec,
737                                           idx * sizeof(int),
738                                           R_X86_64_PC32,
739                                           insn->sec, insn->offset)) {
740                         WARN("elf_add_reloc_to_insn: .retpoline_sites");
741                         return -1;
742                 }
743
744                 idx++;
745         }
746
747         return 0;
748 }
749
750 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
751 {
752         struct instruction *insn;
753         struct section *sec;
754         int idx;
755
756         sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
757         if (sec) {
758                 WARN("file already has .ibt_endbr_seal, skipping");
759                 return 0;
760         }
761
762         idx = 0;
763         list_for_each_entry(insn, &file->endbr_list, call_node)
764                 idx++;
765
766         if (opts.stats) {
767                 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
768                 printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
769                 printf("ibt: superfluous ENDBR:       %d\n", idx);
770         }
771
772         if (!idx)
773                 return 0;
774
775         sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
776                                  sizeof(int), idx);
777         if (!sec) {
778                 WARN("elf_create_section: .ibt_endbr_seal");
779                 return -1;
780         }
781
782         idx = 0;
783         list_for_each_entry(insn, &file->endbr_list, call_node) {
784
785                 int *site = (int *)sec->data->d_buf + idx;
786                 *site = 0;
787
788                 if (elf_add_reloc_to_insn(file->elf, sec,
789                                           idx * sizeof(int),
790                                           R_X86_64_PC32,
791                                           insn->sec, insn->offset)) {
792                         WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
793                         return -1;
794                 }
795
796                 idx++;
797         }
798
799         return 0;
800 }
801
802 static int create_mcount_loc_sections(struct objtool_file *file)
803 {
804         struct section *sec;
805         unsigned long *loc;
806         struct instruction *insn;
807         int idx;
808
809         sec = find_section_by_name(file->elf, "__mcount_loc");
810         if (sec) {
811                 INIT_LIST_HEAD(&file->mcount_loc_list);
812                 WARN("file already has __mcount_loc section, skipping");
813                 return 0;
814         }
815
816         if (list_empty(&file->mcount_loc_list))
817                 return 0;
818
819         idx = 0;
820         list_for_each_entry(insn, &file->mcount_loc_list, call_node)
821                 idx++;
822
823         sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
824         if (!sec)
825                 return -1;
826
827         idx = 0;
828         list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
829
830                 loc = (unsigned long *)sec->data->d_buf + idx;
831                 memset(loc, 0, sizeof(unsigned long));
832
833                 if (elf_add_reloc_to_insn(file->elf, sec,
834                                           idx * sizeof(unsigned long),
835                                           R_X86_64_64,
836                                           insn->sec, insn->offset))
837                         return -1;
838
839                 idx++;
840         }
841
842         return 0;
843 }
844
845 /*
846  * Warnings shouldn't be reported for ignored functions.
847  */
848 static void add_ignores(struct objtool_file *file)
849 {
850         struct instruction *insn;
851         struct section *sec;
852         struct symbol *func;
853         struct reloc *reloc;
854
855         sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
856         if (!sec)
857                 return;
858
859         list_for_each_entry(reloc, &sec->reloc_list, list) {
860                 switch (reloc->sym->type) {
861                 case STT_FUNC:
862                         func = reloc->sym;
863                         break;
864
865                 case STT_SECTION:
866                         func = find_func_by_offset(reloc->sym->sec, reloc->addend);
867                         if (!func)
868                                 continue;
869                         break;
870
871                 default:
872                         WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
873                         continue;
874                 }
875
876                 func_for_each_insn(file, func, insn)
877                         insn->ignore = true;
878         }
879 }
880
881 /*
882  * This is a whitelist of functions that is allowed to be called with AC set.
883  * The list is meant to be minimal and only contains compiler instrumentation
884  * ABI and a few functions used to implement *_{to,from}_user() functions.
885  *
886  * These functions must not directly change AC, but may PUSHF/POPF.
887  */
888 static const char *uaccess_safe_builtin[] = {
889         /* KASAN */
890         "kasan_report",
891         "kasan_check_range",
892         /* KASAN out-of-line */
893         "__asan_loadN_noabort",
894         "__asan_load1_noabort",
895         "__asan_load2_noabort",
896         "__asan_load4_noabort",
897         "__asan_load8_noabort",
898         "__asan_load16_noabort",
899         "__asan_storeN_noabort",
900         "__asan_store1_noabort",
901         "__asan_store2_noabort",
902         "__asan_store4_noabort",
903         "__asan_store8_noabort",
904         "__asan_store16_noabort",
905         "__kasan_check_read",
906         "__kasan_check_write",
907         /* KASAN in-line */
908         "__asan_report_load_n_noabort",
909         "__asan_report_load1_noabort",
910         "__asan_report_load2_noabort",
911         "__asan_report_load4_noabort",
912         "__asan_report_load8_noabort",
913         "__asan_report_load16_noabort",
914         "__asan_report_store_n_noabort",
915         "__asan_report_store1_noabort",
916         "__asan_report_store2_noabort",
917         "__asan_report_store4_noabort",
918         "__asan_report_store8_noabort",
919         "__asan_report_store16_noabort",
920         /* KCSAN */
921         "__kcsan_check_access",
922         "__kcsan_mb",
923         "__kcsan_wmb",
924         "__kcsan_rmb",
925         "__kcsan_release",
926         "kcsan_found_watchpoint",
927         "kcsan_setup_watchpoint",
928         "kcsan_check_scoped_accesses",
929         "kcsan_disable_current",
930         "kcsan_enable_current_nowarn",
931         /* KCSAN/TSAN */
932         "__tsan_func_entry",
933         "__tsan_func_exit",
934         "__tsan_read_range",
935         "__tsan_write_range",
936         "__tsan_read1",
937         "__tsan_read2",
938         "__tsan_read4",
939         "__tsan_read8",
940         "__tsan_read16",
941         "__tsan_write1",
942         "__tsan_write2",
943         "__tsan_write4",
944         "__tsan_write8",
945         "__tsan_write16",
946         "__tsan_read_write1",
947         "__tsan_read_write2",
948         "__tsan_read_write4",
949         "__tsan_read_write8",
950         "__tsan_read_write16",
951         "__tsan_atomic8_load",
952         "__tsan_atomic16_load",
953         "__tsan_atomic32_load",
954         "__tsan_atomic64_load",
955         "__tsan_atomic8_store",
956         "__tsan_atomic16_store",
957         "__tsan_atomic32_store",
958         "__tsan_atomic64_store",
959         "__tsan_atomic8_exchange",
960         "__tsan_atomic16_exchange",
961         "__tsan_atomic32_exchange",
962         "__tsan_atomic64_exchange",
963         "__tsan_atomic8_fetch_add",
964         "__tsan_atomic16_fetch_add",
965         "__tsan_atomic32_fetch_add",
966         "__tsan_atomic64_fetch_add",
967         "__tsan_atomic8_fetch_sub",
968         "__tsan_atomic16_fetch_sub",
969         "__tsan_atomic32_fetch_sub",
970         "__tsan_atomic64_fetch_sub",
971         "__tsan_atomic8_fetch_and",
972         "__tsan_atomic16_fetch_and",
973         "__tsan_atomic32_fetch_and",
974         "__tsan_atomic64_fetch_and",
975         "__tsan_atomic8_fetch_or",
976         "__tsan_atomic16_fetch_or",
977         "__tsan_atomic32_fetch_or",
978         "__tsan_atomic64_fetch_or",
979         "__tsan_atomic8_fetch_xor",
980         "__tsan_atomic16_fetch_xor",
981         "__tsan_atomic32_fetch_xor",
982         "__tsan_atomic64_fetch_xor",
983         "__tsan_atomic8_fetch_nand",
984         "__tsan_atomic16_fetch_nand",
985         "__tsan_atomic32_fetch_nand",
986         "__tsan_atomic64_fetch_nand",
987         "__tsan_atomic8_compare_exchange_strong",
988         "__tsan_atomic16_compare_exchange_strong",
989         "__tsan_atomic32_compare_exchange_strong",
990         "__tsan_atomic64_compare_exchange_strong",
991         "__tsan_atomic8_compare_exchange_weak",
992         "__tsan_atomic16_compare_exchange_weak",
993         "__tsan_atomic32_compare_exchange_weak",
994         "__tsan_atomic64_compare_exchange_weak",
995         "__tsan_atomic8_compare_exchange_val",
996         "__tsan_atomic16_compare_exchange_val",
997         "__tsan_atomic32_compare_exchange_val",
998         "__tsan_atomic64_compare_exchange_val",
999         "__tsan_atomic_thread_fence",
1000         "__tsan_atomic_signal_fence",
1001         /* KCOV */
1002         "write_comp_data",
1003         "check_kcov_mode",
1004         "__sanitizer_cov_trace_pc",
1005         "__sanitizer_cov_trace_const_cmp1",
1006         "__sanitizer_cov_trace_const_cmp2",
1007         "__sanitizer_cov_trace_const_cmp4",
1008         "__sanitizer_cov_trace_const_cmp8",
1009         "__sanitizer_cov_trace_cmp1",
1010         "__sanitizer_cov_trace_cmp2",
1011         "__sanitizer_cov_trace_cmp4",
1012         "__sanitizer_cov_trace_cmp8",
1013         "__sanitizer_cov_trace_switch",
1014         /* UBSAN */
1015         "ubsan_type_mismatch_common",
1016         "__ubsan_handle_type_mismatch",
1017         "__ubsan_handle_type_mismatch_v1",
1018         "__ubsan_handle_shift_out_of_bounds",
1019         /* misc */
1020         "csum_partial_copy_generic",
1021         "copy_mc_fragile",
1022         "copy_mc_fragile_handle_tail",
1023         "copy_mc_enhanced_fast_string",
1024         "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1025         NULL
1026 };
1027
1028 static void add_uaccess_safe(struct objtool_file *file)
1029 {
1030         struct symbol *func;
1031         const char **name;
1032
1033         if (!opts.uaccess)
1034                 return;
1035
1036         for (name = uaccess_safe_builtin; *name; name++) {
1037                 func = find_symbol_by_name(file->elf, *name);
1038                 if (!func)
1039                         continue;
1040
1041                 func->uaccess_safe = true;
1042         }
1043 }
1044
1045 /*
1046  * FIXME: For now, just ignore any alternatives which add retpolines.  This is
1047  * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1048  * But it at least allows objtool to understand the control flow *around* the
1049  * retpoline.
1050  */
1051 static int add_ignore_alternatives(struct objtool_file *file)
1052 {
1053         struct section *sec;
1054         struct reloc *reloc;
1055         struct instruction *insn;
1056
1057         sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1058         if (!sec)
1059                 return 0;
1060
1061         list_for_each_entry(reloc, &sec->reloc_list, list) {
1062                 if (reloc->sym->type != STT_SECTION) {
1063                         WARN("unexpected relocation symbol type in %s", sec->name);
1064                         return -1;
1065                 }
1066
1067                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1068                 if (!insn) {
1069                         WARN("bad .discard.ignore_alts entry");
1070                         return -1;
1071                 }
1072
1073                 insn->ignore_alts = true;
1074         }
1075
1076         return 0;
1077 }
1078
1079 __weak bool arch_is_retpoline(struct symbol *sym)
1080 {
1081         return false;
1082 }
1083
1084 #define NEGATIVE_RELOC  ((void *)-1L)
1085
1086 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1087 {
1088         if (insn->reloc == NEGATIVE_RELOC)
1089                 return NULL;
1090
1091         if (!insn->reloc) {
1092                 if (!file)
1093                         return NULL;
1094
1095                 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1096                                                        insn->offset, insn->len);
1097                 if (!insn->reloc) {
1098                         insn->reloc = NEGATIVE_RELOC;
1099                         return NULL;
1100                 }
1101         }
1102
1103         return insn->reloc;
1104 }
1105
1106 static void remove_insn_ops(struct instruction *insn)
1107 {
1108         struct stack_op *op, *tmp;
1109
1110         list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1111                 list_del(&op->list);
1112                 free(op);
1113         }
1114 }
1115
1116 static void annotate_call_site(struct objtool_file *file,
1117                                struct instruction *insn, bool sibling)
1118 {
1119         struct reloc *reloc = insn_reloc(file, insn);
1120         struct symbol *sym = insn->call_dest;
1121
1122         if (!sym)
1123                 sym = reloc->sym;
1124
1125         /*
1126          * Alternative replacement code is just template code which is
1127          * sometimes copied to the original instruction. For now, don't
1128          * annotate it. (In the future we might consider annotating the
1129          * original instruction if/when it ever makes sense to do so.)
1130          */
1131         if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1132                 return;
1133
1134         if (sym->static_call_tramp) {
1135                 list_add_tail(&insn->call_node, &file->static_call_list);
1136                 return;
1137         }
1138
1139         if (sym->retpoline_thunk) {
1140                 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1141                 return;
1142         }
1143
1144         /*
1145          * Many compilers cannot disable KCOV or sanitizer calls with a function
1146          * attribute so they need a little help, NOP out any such calls from
1147          * noinstr text.
1148          */
1149         if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1150                 if (reloc) {
1151                         reloc->type = R_NONE;
1152                         elf_write_reloc(file->elf, reloc);
1153                 }
1154
1155                 elf_write_insn(file->elf, insn->sec,
1156                                insn->offset, insn->len,
1157                                sibling ? arch_ret_insn(insn->len)
1158                                        : arch_nop_insn(insn->len));
1159
1160                 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1161
1162                 if (sibling) {
1163                         /*
1164                          * We've replaced the tail-call JMP insn by two new
1165                          * insn: RET; INT3, except we only have a single struct
1166                          * insn here. Mark it retpoline_safe to avoid the SLS
1167                          * warning, instead of adding another insn.
1168                          */
1169                         insn->retpoline_safe = true;
1170                 }
1171
1172                 return;
1173         }
1174
1175         if (opts.mcount && sym->fentry) {
1176                 if (sibling)
1177                         WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1178
1179                 if (reloc) {
1180                         reloc->type = R_NONE;
1181                         elf_write_reloc(file->elf, reloc);
1182                 }
1183
1184                 elf_write_insn(file->elf, insn->sec,
1185                                insn->offset, insn->len,
1186                                arch_nop_insn(insn->len));
1187
1188                 insn->type = INSN_NOP;
1189
1190                 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1191                 return;
1192         }
1193
1194         if (!sibling && dead_end_function(file, sym))
1195                 insn->dead_end = true;
1196 }
1197
1198 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1199                           struct symbol *dest, bool sibling)
1200 {
1201         insn->call_dest = dest;
1202         if (!dest)
1203                 return;
1204
1205         /*
1206          * Whatever stack impact regular CALLs have, should be undone
1207          * by the RETURN of the called function.
1208          *
1209          * Annotated intra-function calls retain the stack_ops but
1210          * are converted to JUMP, see read_intra_function_calls().
1211          */
1212         remove_insn_ops(insn);
1213
1214         annotate_call_site(file, insn, sibling);
1215 }
1216
1217 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1218 {
1219         /*
1220          * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1221          * so convert them accordingly.
1222          */
1223         switch (insn->type) {
1224         case INSN_CALL:
1225                 insn->type = INSN_CALL_DYNAMIC;
1226                 break;
1227         case INSN_JUMP_UNCONDITIONAL:
1228                 insn->type = INSN_JUMP_DYNAMIC;
1229                 break;
1230         case INSN_JUMP_CONDITIONAL:
1231                 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1232                 break;
1233         default:
1234                 return;
1235         }
1236
1237         insn->retpoline_safe = true;
1238
1239         /*
1240          * Whatever stack impact regular CALLs have, should be undone
1241          * by the RETURN of the called function.
1242          *
1243          * Annotated intra-function calls retain the stack_ops but
1244          * are converted to JUMP, see read_intra_function_calls().
1245          */
1246         remove_insn_ops(insn);
1247
1248         annotate_call_site(file, insn, false);
1249 }
1250
1251 static bool same_function(struct instruction *insn1, struct instruction *insn2)
1252 {
1253         return insn1->func->pfunc == insn2->func->pfunc;
1254 }
1255
1256 static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
1257 {
1258         if (insn->offset == insn->func->offset)
1259                 return true;
1260
1261         if (opts.ibt) {
1262                 struct instruction *prev = prev_insn_same_sym(file, insn);
1263
1264                 if (prev && prev->type == INSN_ENDBR &&
1265                     insn->offset == insn->func->offset + prev->len)
1266                         return true;
1267         }
1268
1269         return false;
1270 }
1271
1272 /*
1273  * Find the destination instructions for all jumps.
1274  */
1275 static int add_jump_destinations(struct objtool_file *file)
1276 {
1277         struct instruction *insn, *jump_dest;
1278         struct reloc *reloc;
1279         struct section *dest_sec;
1280         unsigned long dest_off;
1281
1282         for_each_insn(file, insn) {
1283                 if (insn->jump_dest) {
1284                         /*
1285                          * handle_group_alt() may have previously set
1286                          * 'jump_dest' for some alternatives.
1287                          */
1288                         continue;
1289                 }
1290                 if (!is_static_jump(insn))
1291                         continue;
1292
1293                 reloc = insn_reloc(file, insn);
1294                 if (!reloc) {
1295                         dest_sec = insn->sec;
1296                         dest_off = arch_jump_destination(insn);
1297                 } else if (reloc->sym->type == STT_SECTION) {
1298                         dest_sec = reloc->sym->sec;
1299                         dest_off = arch_dest_reloc_offset(reloc->addend);
1300                 } else if (reloc->sym->retpoline_thunk) {
1301                         add_retpoline_call(file, insn);
1302                         continue;
1303                 } else if (insn->func) {
1304                         /*
1305                          * External sibling call or internal sibling call with
1306                          * STT_FUNC reloc.
1307                          */
1308                         add_call_dest(file, insn, reloc->sym, true);
1309                         continue;
1310                 } else if (reloc->sym->sec->idx) {
1311                         dest_sec = reloc->sym->sec;
1312                         dest_off = reloc->sym->sym.st_value +
1313                                    arch_dest_reloc_offset(reloc->addend);
1314                 } else {
1315                         /* non-func asm code jumping to another file */
1316                         continue;
1317                 }
1318
1319                 jump_dest = find_insn(file, dest_sec, dest_off);
1320                 if (!jump_dest) {
1321                         WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1322                                   insn->sec, insn->offset, dest_sec->name,
1323                                   dest_off);
1324                         return -1;
1325                 }
1326
1327                 /*
1328                  * Cross-function jump.
1329                  */
1330                 if (insn->func && jump_dest->func &&
1331                     insn->func != jump_dest->func) {
1332
1333                         /*
1334                          * For GCC 8+, create parent/child links for any cold
1335                          * subfunctions.  This is _mostly_ redundant with a
1336                          * similar initialization in read_symbols().
1337                          *
1338                          * If a function has aliases, we want the *first* such
1339                          * function in the symbol table to be the subfunction's
1340                          * parent.  In that case we overwrite the
1341                          * initialization done in read_symbols().
1342                          *
1343                          * However this code can't completely replace the
1344                          * read_symbols() code because this doesn't detect the
1345                          * case where the parent function's only reference to a
1346                          * subfunction is through a jump table.
1347                          */
1348                         if (!strstr(insn->func->name, ".cold") &&
1349                             strstr(jump_dest->func->name, ".cold")) {
1350                                 insn->func->cfunc = jump_dest->func;
1351                                 jump_dest->func->pfunc = insn->func;
1352
1353                         } else if (!same_function(insn, jump_dest) &&
1354                                    is_first_func_insn(file, jump_dest)) {
1355                                 /*
1356                                  * Internal sibling call without reloc or with
1357                                  * STT_SECTION reloc.
1358                                  */
1359                                 add_call_dest(file, insn, jump_dest->func, true);
1360                                 continue;
1361                         }
1362                 }
1363
1364                 insn->jump_dest = jump_dest;
1365         }
1366
1367         return 0;
1368 }
1369
1370 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1371 {
1372         struct symbol *call_dest;
1373
1374         call_dest = find_func_by_offset(sec, offset);
1375         if (!call_dest)
1376                 call_dest = find_symbol_by_offset(sec, offset);
1377
1378         return call_dest;
1379 }
1380
1381 /*
1382  * Find the destination instructions for all calls.
1383  */
1384 static int add_call_destinations(struct objtool_file *file)
1385 {
1386         struct instruction *insn;
1387         unsigned long dest_off;
1388         struct symbol *dest;
1389         struct reloc *reloc;
1390
1391         for_each_insn(file, insn) {
1392                 if (insn->type != INSN_CALL)
1393                         continue;
1394
1395                 reloc = insn_reloc(file, insn);
1396                 if (!reloc) {
1397                         dest_off = arch_jump_destination(insn);
1398                         dest = find_call_destination(insn->sec, dest_off);
1399
1400                         add_call_dest(file, insn, dest, false);
1401
1402                         if (insn->ignore)
1403                                 continue;
1404
1405                         if (!insn->call_dest) {
1406                                 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1407                                 return -1;
1408                         }
1409
1410                         if (insn->func && insn->call_dest->type != STT_FUNC) {
1411                                 WARN_FUNC("unsupported call to non-function",
1412                                           insn->sec, insn->offset);
1413                                 return -1;
1414                         }
1415
1416                 } else if (reloc->sym->type == STT_SECTION) {
1417                         dest_off = arch_dest_reloc_offset(reloc->addend);
1418                         dest = find_call_destination(reloc->sym->sec, dest_off);
1419                         if (!dest) {
1420                                 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1421                                           insn->sec, insn->offset,
1422                                           reloc->sym->sec->name,
1423                                           dest_off);
1424                                 return -1;
1425                         }
1426
1427                         add_call_dest(file, insn, dest, false);
1428
1429                 } else if (reloc->sym->retpoline_thunk) {
1430                         add_retpoline_call(file, insn);
1431
1432                 } else
1433                         add_call_dest(file, insn, reloc->sym, false);
1434         }
1435
1436         return 0;
1437 }
1438
1439 /*
1440  * The .alternatives section requires some extra special care over and above
1441  * other special sections because alternatives are patched in place.
1442  */
1443 static int handle_group_alt(struct objtool_file *file,
1444                             struct special_alt *special_alt,
1445                             struct instruction *orig_insn,
1446                             struct instruction **new_insn)
1447 {
1448         struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1449         struct alt_group *orig_alt_group, *new_alt_group;
1450         unsigned long dest_off;
1451
1452
1453         orig_alt_group = malloc(sizeof(*orig_alt_group));
1454         if (!orig_alt_group) {
1455                 WARN("malloc failed");
1456                 return -1;
1457         }
1458         orig_alt_group->cfi = calloc(special_alt->orig_len,
1459                                      sizeof(struct cfi_state *));
1460         if (!orig_alt_group->cfi) {
1461                 WARN("calloc failed");
1462                 return -1;
1463         }
1464
1465         last_orig_insn = NULL;
1466         insn = orig_insn;
1467         sec_for_each_insn_from(file, insn) {
1468                 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1469                         break;
1470
1471                 insn->alt_group = orig_alt_group;
1472                 last_orig_insn = insn;
1473         }
1474         orig_alt_group->orig_group = NULL;
1475         orig_alt_group->first_insn = orig_insn;
1476         orig_alt_group->last_insn = last_orig_insn;
1477
1478
1479         new_alt_group = malloc(sizeof(*new_alt_group));
1480         if (!new_alt_group) {
1481                 WARN("malloc failed");
1482                 return -1;
1483         }
1484
1485         if (special_alt->new_len < special_alt->orig_len) {
1486                 /*
1487                  * Insert a fake nop at the end to make the replacement
1488                  * alt_group the same size as the original.  This is needed to
1489                  * allow propagate_alt_cfi() to do its magic.  When the last
1490                  * instruction affects the stack, the instruction after it (the
1491                  * nop) will propagate the new state to the shared CFI array.
1492                  */
1493                 nop = malloc(sizeof(*nop));
1494                 if (!nop) {
1495                         WARN("malloc failed");
1496                         return -1;
1497                 }
1498                 memset(nop, 0, sizeof(*nop));
1499                 INIT_LIST_HEAD(&nop->alts);
1500                 INIT_LIST_HEAD(&nop->stack_ops);
1501
1502                 nop->sec = special_alt->new_sec;
1503                 nop->offset = special_alt->new_off + special_alt->new_len;
1504                 nop->len = special_alt->orig_len - special_alt->new_len;
1505                 nop->type = INSN_NOP;
1506                 nop->func = orig_insn->func;
1507                 nop->alt_group = new_alt_group;
1508                 nop->ignore = orig_insn->ignore_alts;
1509         }
1510
1511         if (!special_alt->new_len) {
1512                 *new_insn = nop;
1513                 goto end;
1514         }
1515
1516         insn = *new_insn;
1517         sec_for_each_insn_from(file, insn) {
1518                 struct reloc *alt_reloc;
1519
1520                 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1521                         break;
1522
1523                 last_new_insn = insn;
1524
1525                 insn->ignore = orig_insn->ignore_alts;
1526                 insn->func = orig_insn->func;
1527                 insn->alt_group = new_alt_group;
1528
1529                 /*
1530                  * Since alternative replacement code is copy/pasted by the
1531                  * kernel after applying relocations, generally such code can't
1532                  * have relative-address relocation references to outside the
1533                  * .altinstr_replacement section, unless the arch's
1534                  * alternatives code can adjust the relative offsets
1535                  * accordingly.
1536                  */
1537                 alt_reloc = insn_reloc(file, insn);
1538                 if (alt_reloc &&
1539                     !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1540
1541                         WARN_FUNC("unsupported relocation in alternatives section",
1542                                   insn->sec, insn->offset);
1543                         return -1;
1544                 }
1545
1546                 if (!is_static_jump(insn))
1547                         continue;
1548
1549                 if (!insn->immediate)
1550                         continue;
1551
1552                 dest_off = arch_jump_destination(insn);
1553                 if (dest_off == special_alt->new_off + special_alt->new_len) {
1554                         insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1555                         if (!insn->jump_dest) {
1556                                 WARN_FUNC("can't find alternative jump destination",
1557                                           insn->sec, insn->offset);
1558                                 return -1;
1559                         }
1560                 }
1561         }
1562
1563         if (!last_new_insn) {
1564                 WARN_FUNC("can't find last new alternative instruction",
1565                           special_alt->new_sec, special_alt->new_off);
1566                 return -1;
1567         }
1568
1569         if (nop)
1570                 list_add(&nop->list, &last_new_insn->list);
1571 end:
1572         new_alt_group->orig_group = orig_alt_group;
1573         new_alt_group->first_insn = *new_insn;
1574         new_alt_group->last_insn = nop ? : last_new_insn;
1575         new_alt_group->cfi = orig_alt_group->cfi;
1576         return 0;
1577 }
1578
1579 /*
1580  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1581  * If the original instruction is a jump, make the alt entry an effective nop
1582  * by just skipping the original instruction.
1583  */
1584 static int handle_jump_alt(struct objtool_file *file,
1585                            struct special_alt *special_alt,
1586                            struct instruction *orig_insn,
1587                            struct instruction **new_insn)
1588 {
1589         if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1590             orig_insn->type != INSN_NOP) {
1591
1592                 WARN_FUNC("unsupported instruction at jump label",
1593                           orig_insn->sec, orig_insn->offset);
1594                 return -1;
1595         }
1596
1597         if (opts.hack_jump_label && special_alt->key_addend & 2) {
1598                 struct reloc *reloc = insn_reloc(file, orig_insn);
1599
1600                 if (reloc) {
1601                         reloc->type = R_NONE;
1602                         elf_write_reloc(file->elf, reloc);
1603                 }
1604                 elf_write_insn(file->elf, orig_insn->sec,
1605                                orig_insn->offset, orig_insn->len,
1606                                arch_nop_insn(orig_insn->len));
1607                 orig_insn->type = INSN_NOP;
1608         }
1609
1610         if (orig_insn->type == INSN_NOP) {
1611                 if (orig_insn->len == 2)
1612                         file->jl_nop_short++;
1613                 else
1614                         file->jl_nop_long++;
1615
1616                 return 0;
1617         }
1618
1619         if (orig_insn->len == 2)
1620                 file->jl_short++;
1621         else
1622                 file->jl_long++;
1623
1624         *new_insn = list_next_entry(orig_insn, list);
1625         return 0;
1626 }
1627
1628 /*
1629  * Read all the special sections which have alternate instructions which can be
1630  * patched in or redirected to at runtime.  Each instruction having alternate
1631  * instruction(s) has them added to its insn->alts list, which will be
1632  * traversed in validate_branch().
1633  */
1634 static int add_special_section_alts(struct objtool_file *file)
1635 {
1636         struct list_head special_alts;
1637         struct instruction *orig_insn, *new_insn;
1638         struct special_alt *special_alt, *tmp;
1639         struct alternative *alt;
1640         int ret;
1641
1642         ret = special_get_alts(file->elf, &special_alts);
1643         if (ret)
1644                 return ret;
1645
1646         list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1647
1648                 orig_insn = find_insn(file, special_alt->orig_sec,
1649                                       special_alt->orig_off);
1650                 if (!orig_insn) {
1651                         WARN_FUNC("special: can't find orig instruction",
1652                                   special_alt->orig_sec, special_alt->orig_off);
1653                         ret = -1;
1654                         goto out;
1655                 }
1656
1657                 new_insn = NULL;
1658                 if (!special_alt->group || special_alt->new_len) {
1659                         new_insn = find_insn(file, special_alt->new_sec,
1660                                              special_alt->new_off);
1661                         if (!new_insn) {
1662                                 WARN_FUNC("special: can't find new instruction",
1663                                           special_alt->new_sec,
1664                                           special_alt->new_off);
1665                                 ret = -1;
1666                                 goto out;
1667                         }
1668                 }
1669
1670                 if (special_alt->group) {
1671                         if (!special_alt->orig_len) {
1672                                 WARN_FUNC("empty alternative entry",
1673                                           orig_insn->sec, orig_insn->offset);
1674                                 continue;
1675                         }
1676
1677                         ret = handle_group_alt(file, special_alt, orig_insn,
1678                                                &new_insn);
1679                         if (ret)
1680                                 goto out;
1681                 } else if (special_alt->jump_or_nop) {
1682                         ret = handle_jump_alt(file, special_alt, orig_insn,
1683                                               &new_insn);
1684                         if (ret)
1685                                 goto out;
1686                 }
1687
1688                 alt = malloc(sizeof(*alt));
1689                 if (!alt) {
1690                         WARN("malloc failed");
1691                         ret = -1;
1692                         goto out;
1693                 }
1694
1695                 alt->insn = new_insn;
1696                 alt->skip_orig = special_alt->skip_orig;
1697                 orig_insn->ignore_alts |= special_alt->skip_alt;
1698                 list_add_tail(&alt->list, &orig_insn->alts);
1699
1700                 list_del(&special_alt->list);
1701                 free(special_alt);
1702         }
1703
1704         if (opts.stats) {
1705                 printf("jl\\\tNOP\tJMP\n");
1706                 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1707                 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1708         }
1709
1710 out:
1711         return ret;
1712 }
1713
1714 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1715                             struct reloc *table)
1716 {
1717         struct reloc *reloc = table;
1718         struct instruction *dest_insn;
1719         struct alternative *alt;
1720         struct symbol *pfunc = insn->func->pfunc;
1721         unsigned int prev_offset = 0;
1722
1723         /*
1724          * Each @reloc is a switch table relocation which points to the target
1725          * instruction.
1726          */
1727         list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1728
1729                 /* Check for the end of the table: */
1730                 if (reloc != table && reloc->jump_table_start)
1731                         break;
1732
1733                 /* Make sure the table entries are consecutive: */
1734                 if (prev_offset && reloc->offset != prev_offset + 8)
1735                         break;
1736
1737                 /* Detect function pointers from contiguous objects: */
1738                 if (reloc->sym->sec == pfunc->sec &&
1739                     reloc->addend == pfunc->offset)
1740                         break;
1741
1742                 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1743                 if (!dest_insn)
1744                         break;
1745
1746                 /* Make sure the destination is in the same function: */
1747                 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1748                         break;
1749
1750                 alt = malloc(sizeof(*alt));
1751                 if (!alt) {
1752                         WARN("malloc failed");
1753                         return -1;
1754                 }
1755
1756                 alt->insn = dest_insn;
1757                 list_add_tail(&alt->list, &insn->alts);
1758                 prev_offset = reloc->offset;
1759         }
1760
1761         if (!prev_offset) {
1762                 WARN_FUNC("can't find switch jump table",
1763                           insn->sec, insn->offset);
1764                 return -1;
1765         }
1766
1767         return 0;
1768 }
1769
1770 /*
1771  * find_jump_table() - Given a dynamic jump, find the switch jump table
1772  * associated with it.
1773  */
1774 static struct reloc *find_jump_table(struct objtool_file *file,
1775                                       struct symbol *func,
1776                                       struct instruction *insn)
1777 {
1778         struct reloc *table_reloc;
1779         struct instruction *dest_insn, *orig_insn = insn;
1780
1781         /*
1782          * Backward search using the @first_jump_src links, these help avoid
1783          * much of the 'in between' code. Which avoids us getting confused by
1784          * it.
1785          */
1786         for (;
1787              insn && insn->func && insn->func->pfunc == func;
1788              insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1789
1790                 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1791                         break;
1792
1793                 /* allow small jumps within the range */
1794                 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1795                     insn->jump_dest &&
1796                     (insn->jump_dest->offset <= insn->offset ||
1797                      insn->jump_dest->offset > orig_insn->offset))
1798                     break;
1799
1800                 table_reloc = arch_find_switch_table(file, insn);
1801                 if (!table_reloc)
1802                         continue;
1803                 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1804                 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1805                         continue;
1806
1807                 return table_reloc;
1808         }
1809
1810         return NULL;
1811 }
1812
1813 /*
1814  * First pass: Mark the head of each jump table so that in the next pass,
1815  * we know when a given jump table ends and the next one starts.
1816  */
1817 static void mark_func_jump_tables(struct objtool_file *file,
1818                                     struct symbol *func)
1819 {
1820         struct instruction *insn, *last = NULL;
1821         struct reloc *reloc;
1822
1823         func_for_each_insn(file, func, insn) {
1824                 if (!last)
1825                         last = insn;
1826
1827                 /*
1828                  * Store back-pointers for unconditional forward jumps such
1829                  * that find_jump_table() can back-track using those and
1830                  * avoid some potentially confusing code.
1831                  */
1832                 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1833                     insn->offset > last->offset &&
1834                     insn->jump_dest->offset > insn->offset &&
1835                     !insn->jump_dest->first_jump_src) {
1836
1837                         insn->jump_dest->first_jump_src = insn;
1838                         last = insn->jump_dest;
1839                 }
1840
1841                 if (insn->type != INSN_JUMP_DYNAMIC)
1842                         continue;
1843
1844                 reloc = find_jump_table(file, func, insn);
1845                 if (reloc) {
1846                         reloc->jump_table_start = true;
1847                         insn->jump_table = reloc;
1848                 }
1849         }
1850 }
1851
1852 static int add_func_jump_tables(struct objtool_file *file,
1853                                   struct symbol *func)
1854 {
1855         struct instruction *insn;
1856         int ret;
1857
1858         func_for_each_insn(file, func, insn) {
1859                 if (!insn->jump_table)
1860                         continue;
1861
1862                 ret = add_jump_table(file, insn, insn->jump_table);
1863                 if (ret)
1864                         return ret;
1865         }
1866
1867         return 0;
1868 }
1869
1870 /*
1871  * For some switch statements, gcc generates a jump table in the .rodata
1872  * section which contains a list of addresses within the function to jump to.
1873  * This finds these jump tables and adds them to the insn->alts lists.
1874  */
1875 static int add_jump_table_alts(struct objtool_file *file)
1876 {
1877         struct section *sec;
1878         struct symbol *func;
1879         int ret;
1880
1881         if (!file->rodata)
1882                 return 0;
1883
1884         for_each_sec(file, sec) {
1885                 list_for_each_entry(func, &sec->symbol_list, list) {
1886                         if (func->type != STT_FUNC)
1887                                 continue;
1888
1889                         mark_func_jump_tables(file, func);
1890                         ret = add_func_jump_tables(file, func);
1891                         if (ret)
1892                                 return ret;
1893                 }
1894         }
1895
1896         return 0;
1897 }
1898
1899 static void set_func_state(struct cfi_state *state)
1900 {
1901         state->cfa = initial_func_cfi.cfa;
1902         memcpy(&state->regs, &initial_func_cfi.regs,
1903                CFI_NUM_REGS * sizeof(struct cfi_reg));
1904         state->stack_size = initial_func_cfi.cfa.offset;
1905 }
1906
1907 static int read_unwind_hints(struct objtool_file *file)
1908 {
1909         struct cfi_state cfi = init_cfi;
1910         struct section *sec, *relocsec;
1911         struct unwind_hint *hint;
1912         struct instruction *insn;
1913         struct reloc *reloc;
1914         int i;
1915
1916         sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1917         if (!sec)
1918                 return 0;
1919
1920         relocsec = sec->reloc;
1921         if (!relocsec) {
1922                 WARN("missing .rela.discard.unwind_hints section");
1923                 return -1;
1924         }
1925
1926         if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1927                 WARN("struct unwind_hint size mismatch");
1928                 return -1;
1929         }
1930
1931         file->hints = true;
1932
1933         for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1934                 hint = (struct unwind_hint *)sec->data->d_buf + i;
1935
1936                 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1937                 if (!reloc) {
1938                         WARN("can't find reloc for unwind_hints[%d]", i);
1939                         return -1;
1940                 }
1941
1942                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1943                 if (!insn) {
1944                         WARN("can't find insn for unwind_hints[%d]", i);
1945                         return -1;
1946                 }
1947
1948                 insn->hint = true;
1949
1950                 if (opts.ibt && hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
1951                         struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
1952
1953                         if (sym && sym->bind == STB_GLOBAL &&
1954                             insn->type != INSN_ENDBR && !insn->noendbr) {
1955                                 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
1956                                           insn->sec, insn->offset);
1957                         }
1958                 }
1959
1960                 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1961                         insn->cfi = &func_cfi;
1962                         continue;
1963                 }
1964
1965                 if (insn->cfi)
1966                         cfi = *(insn->cfi);
1967
1968                 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1969                         WARN_FUNC("unsupported unwind_hint sp base reg %d",
1970                                   insn->sec, insn->offset, hint->sp_reg);
1971                         return -1;
1972                 }
1973
1974                 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1975                 cfi.type = hint->type;
1976                 cfi.end = hint->end;
1977
1978                 insn->cfi = cfi_hash_find_or_add(&cfi);
1979         }
1980
1981         return 0;
1982 }
1983
1984 static int read_noendbr_hints(struct objtool_file *file)
1985 {
1986         struct section *sec;
1987         struct instruction *insn;
1988         struct reloc *reloc;
1989
1990         sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
1991         if (!sec)
1992                 return 0;
1993
1994         list_for_each_entry(reloc, &sec->reloc_list, list) {
1995                 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
1996                 if (!insn) {
1997                         WARN("bad .discard.noendbr entry");
1998                         return -1;
1999                 }
2000
2001                 if (insn->type == INSN_ENDBR)
2002                         WARN_FUNC("ANNOTATE_NOENDBR on ENDBR", insn->sec, insn->offset);
2003
2004                 insn->noendbr = 1;
2005         }
2006
2007         return 0;
2008 }
2009
2010 static int read_retpoline_hints(struct objtool_file *file)
2011 {
2012         struct section *sec;
2013         struct instruction *insn;
2014         struct reloc *reloc;
2015
2016         sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2017         if (!sec)
2018                 return 0;
2019
2020         list_for_each_entry(reloc, &sec->reloc_list, list) {
2021                 if (reloc->sym->type != STT_SECTION) {
2022                         WARN("unexpected relocation symbol type in %s", sec->name);
2023                         return -1;
2024                 }
2025
2026                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2027                 if (!insn) {
2028                         WARN("bad .discard.retpoline_safe entry");
2029                         return -1;
2030                 }
2031
2032                 if (insn->type != INSN_JUMP_DYNAMIC &&
2033                     insn->type != INSN_CALL_DYNAMIC) {
2034                         WARN_FUNC("retpoline_safe hint not an indirect jump/call",
2035                                   insn->sec, insn->offset);
2036                         return -1;
2037                 }
2038
2039                 insn->retpoline_safe = true;
2040         }
2041
2042         return 0;
2043 }
2044
2045 static int read_instr_hints(struct objtool_file *file)
2046 {
2047         struct section *sec;
2048         struct instruction *insn;
2049         struct reloc *reloc;
2050
2051         sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2052         if (!sec)
2053                 return 0;
2054
2055         list_for_each_entry(reloc, &sec->reloc_list, list) {
2056                 if (reloc->sym->type != STT_SECTION) {
2057                         WARN("unexpected relocation symbol type in %s", sec->name);
2058                         return -1;
2059                 }
2060
2061                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2062                 if (!insn) {
2063                         WARN("bad .discard.instr_end entry");
2064                         return -1;
2065                 }
2066
2067                 insn->instr--;
2068         }
2069
2070         sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2071         if (!sec)
2072                 return 0;
2073
2074         list_for_each_entry(reloc, &sec->reloc_list, list) {
2075                 if (reloc->sym->type != STT_SECTION) {
2076                         WARN("unexpected relocation symbol type in %s", sec->name);
2077                         return -1;
2078                 }
2079
2080                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2081                 if (!insn) {
2082                         WARN("bad .discard.instr_begin entry");
2083                         return -1;
2084                 }
2085
2086                 insn->instr++;
2087         }
2088
2089         return 0;
2090 }
2091
2092 static int read_intra_function_calls(struct objtool_file *file)
2093 {
2094         struct instruction *insn;
2095         struct section *sec;
2096         struct reloc *reloc;
2097
2098         sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2099         if (!sec)
2100                 return 0;
2101
2102         list_for_each_entry(reloc, &sec->reloc_list, list) {
2103                 unsigned long dest_off;
2104
2105                 if (reloc->sym->type != STT_SECTION) {
2106                         WARN("unexpected relocation symbol type in %s",
2107                              sec->name);
2108                         return -1;
2109                 }
2110
2111                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2112                 if (!insn) {
2113                         WARN("bad .discard.intra_function_call entry");
2114                         return -1;
2115                 }
2116
2117                 if (insn->type != INSN_CALL) {
2118                         WARN_FUNC("intra_function_call not a direct call",
2119                                   insn->sec, insn->offset);
2120                         return -1;
2121                 }
2122
2123                 /*
2124                  * Treat intra-function CALLs as JMPs, but with a stack_op.
2125                  * See add_call_destinations(), which strips stack_ops from
2126                  * normal CALLs.
2127                  */
2128                 insn->type = INSN_JUMP_UNCONDITIONAL;
2129
2130                 dest_off = insn->offset + insn->len + insn->immediate;
2131                 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2132                 if (!insn->jump_dest) {
2133                         WARN_FUNC("can't find call dest at %s+0x%lx",
2134                                   insn->sec, insn->offset,
2135                                   insn->sec->name, dest_off);
2136                         return -1;
2137                 }
2138         }
2139
2140         return 0;
2141 }
2142
2143 /*
2144  * Return true if name matches an instrumentation function, where calls to that
2145  * function from noinstr code can safely be removed, but compilers won't do so.
2146  */
2147 static bool is_profiling_func(const char *name)
2148 {
2149         /*
2150          * Many compilers cannot disable KCOV with a function attribute.
2151          */
2152         if (!strncmp(name, "__sanitizer_cov_", 16))
2153                 return true;
2154
2155         /*
2156          * Some compilers currently do not remove __tsan_func_entry/exit nor
2157          * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2158          * the __no_sanitize_thread attribute, remove them. Once the kernel's
2159          * minimum Clang version is 14.0, this can be removed.
2160          */
2161         if (!strncmp(name, "__tsan_func_", 12) ||
2162             !strcmp(name, "__tsan_atomic_signal_fence"))
2163                 return true;
2164
2165         return false;
2166 }
2167
2168 static int classify_symbols(struct objtool_file *file)
2169 {
2170         struct section *sec;
2171         struct symbol *func;
2172
2173         for_each_sec(file, sec) {
2174                 list_for_each_entry(func, &sec->symbol_list, list) {
2175                         if (func->bind != STB_GLOBAL)
2176                                 continue;
2177
2178                         if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2179                                      strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2180                                 func->static_call_tramp = true;
2181
2182                         if (arch_is_retpoline(func))
2183                                 func->retpoline_thunk = true;
2184
2185                         if (!strcmp(func->name, "__fentry__"))
2186                                 func->fentry = true;
2187
2188                         if (is_profiling_func(func->name))
2189                                 func->profiling_func = true;
2190                 }
2191         }
2192
2193         return 0;
2194 }
2195
2196 static void mark_rodata(struct objtool_file *file)
2197 {
2198         struct section *sec;
2199         bool found = false;
2200
2201         /*
2202          * Search for the following rodata sections, each of which can
2203          * potentially contain jump tables:
2204          *
2205          * - .rodata: can contain GCC switch tables
2206          * - .rodata.<func>: same, if -fdata-sections is being used
2207          * - .rodata..c_jump_table: contains C annotated jump tables
2208          *
2209          * .rodata.str1.* sections are ignored; they don't contain jump tables.
2210          */
2211         for_each_sec(file, sec) {
2212                 if (!strncmp(sec->name, ".rodata", 7) &&
2213                     !strstr(sec->name, ".str1.")) {
2214                         sec->rodata = true;
2215                         found = true;
2216                 }
2217         }
2218
2219         file->rodata = found;
2220 }
2221
2222 static int decode_sections(struct objtool_file *file)
2223 {
2224         int ret;
2225
2226         mark_rodata(file);
2227
2228         ret = init_pv_ops(file);
2229         if (ret)
2230                 return ret;
2231
2232         ret = decode_instructions(file);
2233         if (ret)
2234                 return ret;
2235
2236         add_ignores(file);
2237         add_uaccess_safe(file);
2238
2239         ret = add_ignore_alternatives(file);
2240         if (ret)
2241                 return ret;
2242
2243         /*
2244          * Must be before read_unwind_hints() since that needs insn->noendbr.
2245          */
2246         ret = read_noendbr_hints(file);
2247         if (ret)
2248                 return ret;
2249
2250         /*
2251          * Must be before add_{jump_call}_destination.
2252          */
2253         ret = classify_symbols(file);
2254         if (ret)
2255                 return ret;
2256
2257         /*
2258          * Must be before add_jump_destinations(), which depends on 'func'
2259          * being set for alternatives, to enable proper sibling call detection.
2260          */
2261         ret = add_special_section_alts(file);
2262         if (ret)
2263                 return ret;
2264
2265         ret = add_jump_destinations(file);
2266         if (ret)
2267                 return ret;
2268
2269         /*
2270          * Must be before add_call_destination(); it changes INSN_CALL to
2271          * INSN_JUMP.
2272          */
2273         ret = read_intra_function_calls(file);
2274         if (ret)
2275                 return ret;
2276
2277         ret = add_call_destinations(file);
2278         if (ret)
2279                 return ret;
2280
2281         /*
2282          * Must be after add_call_destinations() such that it can override
2283          * dead_end_function() marks.
2284          */
2285         ret = add_dead_ends(file);
2286         if (ret)
2287                 return ret;
2288
2289         ret = add_jump_table_alts(file);
2290         if (ret)
2291                 return ret;
2292
2293         ret = read_unwind_hints(file);
2294         if (ret)
2295                 return ret;
2296
2297         ret = read_retpoline_hints(file);
2298         if (ret)
2299                 return ret;
2300
2301         ret = read_instr_hints(file);
2302         if (ret)
2303                 return ret;
2304
2305         return 0;
2306 }
2307
2308 static bool is_fentry_call(struct instruction *insn)
2309 {
2310         if (insn->type == INSN_CALL &&
2311             insn->call_dest &&
2312             insn->call_dest->fentry)
2313                 return true;
2314
2315         return false;
2316 }
2317
2318 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2319 {
2320         struct cfi_state *cfi = &state->cfi;
2321         int i;
2322
2323         if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2324                 return true;
2325
2326         if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2327                 return true;
2328
2329         if (cfi->stack_size != initial_func_cfi.cfa.offset)
2330                 return true;
2331
2332         for (i = 0; i < CFI_NUM_REGS; i++) {
2333                 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2334                     cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2335                         return true;
2336         }
2337
2338         return false;
2339 }
2340
2341 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2342                                 int expected_offset)
2343 {
2344         return reg->base == CFI_CFA &&
2345                reg->offset == expected_offset;
2346 }
2347
2348 static bool has_valid_stack_frame(struct insn_state *state)
2349 {
2350         struct cfi_state *cfi = &state->cfi;
2351
2352         if (cfi->cfa.base == CFI_BP &&
2353             check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2354             check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2355                 return true;
2356
2357         if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2358                 return true;
2359
2360         return false;
2361 }
2362
2363 static int update_cfi_state_regs(struct instruction *insn,
2364                                   struct cfi_state *cfi,
2365                                   struct stack_op *op)
2366 {
2367         struct cfi_reg *cfa = &cfi->cfa;
2368
2369         if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2370                 return 0;
2371
2372         /* push */
2373         if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2374                 cfa->offset += 8;
2375
2376         /* pop */
2377         if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2378                 cfa->offset -= 8;
2379
2380         /* add immediate to sp */
2381         if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2382             op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2383                 cfa->offset -= op->src.offset;
2384
2385         return 0;
2386 }
2387
2388 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2389 {
2390         if (arch_callee_saved_reg(reg) &&
2391             cfi->regs[reg].base == CFI_UNDEFINED) {
2392                 cfi->regs[reg].base = base;
2393                 cfi->regs[reg].offset = offset;
2394         }
2395 }
2396
2397 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2398 {
2399         cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2400         cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2401 }
2402
2403 /*
2404  * A note about DRAP stack alignment:
2405  *
2406  * GCC has the concept of a DRAP register, which is used to help keep track of
2407  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2408  * register.  The typical DRAP pattern is:
2409  *
2410  *   4c 8d 54 24 08             lea    0x8(%rsp),%r10
2411  *   48 83 e4 c0                and    $0xffffffffffffffc0,%rsp
2412  *   41 ff 72 f8                pushq  -0x8(%r10)
2413  *   55                         push   %rbp
2414  *   48 89 e5                   mov    %rsp,%rbp
2415  *                              (more pushes)
2416  *   41 52                      push   %r10
2417  *                              ...
2418  *   41 5a                      pop    %r10
2419  *                              (more pops)
2420  *   5d                         pop    %rbp
2421  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2422  *   c3                         retq
2423  *
2424  * There are some variations in the epilogues, like:
2425  *
2426  *   5b                         pop    %rbx
2427  *   41 5a                      pop    %r10
2428  *   41 5c                      pop    %r12
2429  *   41 5d                      pop    %r13
2430  *   41 5e                      pop    %r14
2431  *   c9                         leaveq
2432  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2433  *   c3                         retq
2434  *
2435  * and:
2436  *
2437  *   4c 8b 55 e8                mov    -0x18(%rbp),%r10
2438  *   48 8b 5d e0                mov    -0x20(%rbp),%rbx
2439  *   4c 8b 65 f0                mov    -0x10(%rbp),%r12
2440  *   4c 8b 6d f8                mov    -0x8(%rbp),%r13
2441  *   c9                         leaveq
2442  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2443  *   c3                         retq
2444  *
2445  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2446  * restored beforehand:
2447  *
2448  *   41 55                      push   %r13
2449  *   4c 8d 6c 24 10             lea    0x10(%rsp),%r13
2450  *   48 83 e4 f0                and    $0xfffffffffffffff0,%rsp
2451  *                              ...
2452  *   49 8d 65 f0                lea    -0x10(%r13),%rsp
2453  *   41 5d                      pop    %r13
2454  *   c3                         retq
2455  */
2456 static int update_cfi_state(struct instruction *insn,
2457                             struct instruction *next_insn,
2458                             struct cfi_state *cfi, struct stack_op *op)
2459 {
2460         struct cfi_reg *cfa = &cfi->cfa;
2461         struct cfi_reg *regs = cfi->regs;
2462
2463         /* stack operations don't make sense with an undefined CFA */
2464         if (cfa->base == CFI_UNDEFINED) {
2465                 if (insn->func) {
2466                         WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2467                         return -1;
2468                 }
2469                 return 0;
2470         }
2471
2472         if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2473             cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2474                 return update_cfi_state_regs(insn, cfi, op);
2475
2476         switch (op->dest.type) {
2477
2478         case OP_DEST_REG:
2479                 switch (op->src.type) {
2480
2481                 case OP_SRC_REG:
2482                         if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2483                             cfa->base == CFI_SP &&
2484                             check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2485
2486                                 /* mov %rsp, %rbp */
2487                                 cfa->base = op->dest.reg;
2488                                 cfi->bp_scratch = false;
2489                         }
2490
2491                         else if (op->src.reg == CFI_SP &&
2492                                  op->dest.reg == CFI_BP && cfi->drap) {
2493
2494                                 /* drap: mov %rsp, %rbp */
2495                                 regs[CFI_BP].base = CFI_BP;
2496                                 regs[CFI_BP].offset = -cfi->stack_size;
2497                                 cfi->bp_scratch = false;
2498                         }
2499
2500                         else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2501
2502                                 /*
2503                                  * mov %rsp, %reg
2504                                  *
2505                                  * This is needed for the rare case where GCC
2506                                  * does:
2507                                  *
2508                                  *   mov    %rsp, %rax
2509                                  *   ...
2510                                  *   mov    %rax, %rsp
2511                                  */
2512                                 cfi->vals[op->dest.reg].base = CFI_CFA;
2513                                 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2514                         }
2515
2516                         else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2517                                  (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2518
2519                                 /*
2520                                  * mov %rbp, %rsp
2521                                  *
2522                                  * Restore the original stack pointer (Clang).
2523                                  */
2524                                 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2525                         }
2526
2527                         else if (op->dest.reg == cfa->base) {
2528
2529                                 /* mov %reg, %rsp */
2530                                 if (cfa->base == CFI_SP &&
2531                                     cfi->vals[op->src.reg].base == CFI_CFA) {
2532
2533                                         /*
2534                                          * This is needed for the rare case
2535                                          * where GCC does something dumb like:
2536                                          *
2537                                          *   lea    0x8(%rsp), %rcx
2538                                          *   ...
2539                                          *   mov    %rcx, %rsp
2540                                          */
2541                                         cfa->offset = -cfi->vals[op->src.reg].offset;
2542                                         cfi->stack_size = cfa->offset;
2543
2544                                 } else if (cfa->base == CFI_SP &&
2545                                            cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2546                                            cfi->vals[op->src.reg].offset == cfa->offset) {
2547
2548                                         /*
2549                                          * Stack swizzle:
2550                                          *
2551                                          * 1: mov %rsp, (%[tos])
2552                                          * 2: mov %[tos], %rsp
2553                                          *    ...
2554                                          * 3: pop %rsp
2555                                          *
2556                                          * Where:
2557                                          *
2558                                          * 1 - places a pointer to the previous
2559                                          *     stack at the Top-of-Stack of the
2560                                          *     new stack.
2561                                          *
2562                                          * 2 - switches to the new stack.
2563                                          *
2564                                          * 3 - pops the Top-of-Stack to restore
2565                                          *     the original stack.
2566                                          *
2567                                          * Note: we set base to SP_INDIRECT
2568                                          * here and preserve offset. Therefore
2569                                          * when the unwinder reaches ToS it
2570                                          * will dereference SP and then add the
2571                                          * offset to find the next frame, IOW:
2572                                          * (%rsp) + offset.
2573                                          */
2574                                         cfa->base = CFI_SP_INDIRECT;
2575
2576                                 } else {
2577                                         cfa->base = CFI_UNDEFINED;
2578                                         cfa->offset = 0;
2579                                 }
2580                         }
2581
2582                         else if (op->dest.reg == CFI_SP &&
2583                                  cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2584                                  cfi->vals[op->src.reg].offset == cfa->offset) {
2585
2586                                 /*
2587                                  * The same stack swizzle case 2) as above. But
2588                                  * because we can't change cfa->base, case 3)
2589                                  * will become a regular POP. Pretend we're a
2590                                  * PUSH so things don't go unbalanced.
2591                                  */
2592                                 cfi->stack_size += 8;
2593                         }
2594
2595
2596                         break;
2597
2598                 case OP_SRC_ADD:
2599                         if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2600
2601                                 /* add imm, %rsp */
2602                                 cfi->stack_size -= op->src.offset;
2603                                 if (cfa->base == CFI_SP)
2604                                         cfa->offset -= op->src.offset;
2605                                 break;
2606                         }
2607
2608                         if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2609
2610                                 /* lea disp(%rbp), %rsp */
2611                                 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2612                                 break;
2613                         }
2614
2615                         if (!cfi->drap && op->src.reg == CFI_SP &&
2616                             op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2617                             check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2618
2619                                 /* lea disp(%rsp), %rbp */
2620                                 cfa->base = CFI_BP;
2621                                 cfa->offset -= op->src.offset;
2622                                 cfi->bp_scratch = false;
2623                                 break;
2624                         }
2625
2626                         if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2627
2628                                 /* drap: lea disp(%rsp), %drap */
2629                                 cfi->drap_reg = op->dest.reg;
2630
2631                                 /*
2632                                  * lea disp(%rsp), %reg
2633                                  *
2634                                  * This is needed for the rare case where GCC
2635                                  * does something dumb like:
2636                                  *
2637                                  *   lea    0x8(%rsp), %rcx
2638                                  *   ...
2639                                  *   mov    %rcx, %rsp
2640                                  */
2641                                 cfi->vals[op->dest.reg].base = CFI_CFA;
2642                                 cfi->vals[op->dest.reg].offset = \
2643                                         -cfi->stack_size + op->src.offset;
2644
2645                                 break;
2646                         }
2647
2648                         if (cfi->drap && op->dest.reg == CFI_SP &&
2649                             op->src.reg == cfi->drap_reg) {
2650
2651                                  /* drap: lea disp(%drap), %rsp */
2652                                 cfa->base = CFI_SP;
2653                                 cfa->offset = cfi->stack_size = -op->src.offset;
2654                                 cfi->drap_reg = CFI_UNDEFINED;
2655                                 cfi->drap = false;
2656                                 break;
2657                         }
2658
2659                         if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2660                                 WARN_FUNC("unsupported stack register modification",
2661                                           insn->sec, insn->offset);
2662                                 return -1;
2663                         }
2664
2665                         break;
2666
2667                 case OP_SRC_AND:
2668                         if (op->dest.reg != CFI_SP ||
2669                             (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2670                             (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2671                                 WARN_FUNC("unsupported stack pointer realignment",
2672                                           insn->sec, insn->offset);
2673                                 return -1;
2674                         }
2675
2676                         if (cfi->drap_reg != CFI_UNDEFINED) {
2677                                 /* drap: and imm, %rsp */
2678                                 cfa->base = cfi->drap_reg;
2679                                 cfa->offset = cfi->stack_size = 0;
2680                                 cfi->drap = true;
2681                         }
2682
2683                         /*
2684                          * Older versions of GCC (4.8ish) realign the stack
2685                          * without DRAP, with a frame pointer.
2686                          */
2687
2688                         break;
2689
2690                 case OP_SRC_POP:
2691                 case OP_SRC_POPF:
2692                         if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2693
2694                                 /* pop %rsp; # restore from a stack swizzle */
2695                                 cfa->base = CFI_SP;
2696                                 break;
2697                         }
2698
2699                         if (!cfi->drap && op->dest.reg == cfa->base) {
2700
2701                                 /* pop %rbp */
2702                                 cfa->base = CFI_SP;
2703                         }
2704
2705                         if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2706                             op->dest.reg == cfi->drap_reg &&
2707                             cfi->drap_offset == -cfi->stack_size) {
2708
2709                                 /* drap: pop %drap */
2710                                 cfa->base = cfi->drap_reg;
2711                                 cfa->offset = 0;
2712                                 cfi->drap_offset = -1;
2713
2714                         } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2715
2716                                 /* pop %reg */
2717                                 restore_reg(cfi, op->dest.reg);
2718                         }
2719
2720                         cfi->stack_size -= 8;
2721                         if (cfa->base == CFI_SP)
2722                                 cfa->offset -= 8;
2723
2724                         break;
2725
2726                 case OP_SRC_REG_INDIRECT:
2727                         if (!cfi->drap && op->dest.reg == cfa->base &&
2728                             op->dest.reg == CFI_BP) {
2729
2730                                 /* mov disp(%rsp), %rbp */
2731                                 cfa->base = CFI_SP;
2732                                 cfa->offset = cfi->stack_size;
2733                         }
2734
2735                         if (cfi->drap && op->src.reg == CFI_BP &&
2736                             op->src.offset == cfi->drap_offset) {
2737
2738                                 /* drap: mov disp(%rbp), %drap */
2739                                 cfa->base = cfi->drap_reg;
2740                                 cfa->offset = 0;
2741                                 cfi->drap_offset = -1;
2742                         }
2743
2744                         if (cfi->drap && op->src.reg == CFI_BP &&
2745                             op->src.offset == regs[op->dest.reg].offset) {
2746
2747                                 /* drap: mov disp(%rbp), %reg */
2748                                 restore_reg(cfi, op->dest.reg);
2749
2750                         } else if (op->src.reg == cfa->base &&
2751                             op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2752
2753                                 /* mov disp(%rbp), %reg */
2754                                 /* mov disp(%rsp), %reg */
2755                                 restore_reg(cfi, op->dest.reg);
2756
2757                         } else if (op->src.reg == CFI_SP &&
2758                                    op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2759
2760                                 /* mov disp(%rsp), %reg */
2761                                 restore_reg(cfi, op->dest.reg);
2762                         }
2763
2764                         break;
2765
2766                 default:
2767                         WARN_FUNC("unknown stack-related instruction",
2768                                   insn->sec, insn->offset);
2769                         return -1;
2770                 }
2771
2772                 break;
2773
2774         case OP_DEST_PUSH:
2775         case OP_DEST_PUSHF:
2776                 cfi->stack_size += 8;
2777                 if (cfa->base == CFI_SP)
2778                         cfa->offset += 8;
2779
2780                 if (op->src.type != OP_SRC_REG)
2781                         break;
2782
2783                 if (cfi->drap) {
2784                         if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2785
2786                                 /* drap: push %drap */
2787                                 cfa->base = CFI_BP_INDIRECT;
2788                                 cfa->offset = -cfi->stack_size;
2789
2790                                 /* save drap so we know when to restore it */
2791                                 cfi->drap_offset = -cfi->stack_size;
2792
2793                         } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2794
2795                                 /* drap: push %rbp */
2796                                 cfi->stack_size = 0;
2797
2798                         } else {
2799
2800                                 /* drap: push %reg */
2801                                 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2802                         }
2803
2804                 } else {
2805
2806                         /* push %reg */
2807                         save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2808                 }
2809
2810                 /* detect when asm code uses rbp as a scratch register */
2811                 if (opts.stackval && insn->func && op->src.reg == CFI_BP &&
2812                     cfa->base != CFI_BP)
2813                         cfi->bp_scratch = true;
2814                 break;
2815
2816         case OP_DEST_REG_INDIRECT:
2817
2818                 if (cfi->drap) {
2819                         if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2820
2821                                 /* drap: mov %drap, disp(%rbp) */
2822                                 cfa->base = CFI_BP_INDIRECT;
2823                                 cfa->offset = op->dest.offset;
2824
2825                                 /* save drap offset so we know when to restore it */
2826                                 cfi->drap_offset = op->dest.offset;
2827                         } else {
2828
2829                                 /* drap: mov reg, disp(%rbp) */
2830                                 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2831                         }
2832
2833                 } else if (op->dest.reg == cfa->base) {
2834
2835                         /* mov reg, disp(%rbp) */
2836                         /* mov reg, disp(%rsp) */
2837                         save_reg(cfi, op->src.reg, CFI_CFA,
2838                                  op->dest.offset - cfi->cfa.offset);
2839
2840                 } else if (op->dest.reg == CFI_SP) {
2841
2842                         /* mov reg, disp(%rsp) */
2843                         save_reg(cfi, op->src.reg, CFI_CFA,
2844                                  op->dest.offset - cfi->stack_size);
2845
2846                 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2847
2848                         /* mov %rsp, (%reg); # setup a stack swizzle. */
2849                         cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2850                         cfi->vals[op->dest.reg].offset = cfa->offset;
2851                 }
2852
2853                 break;
2854
2855         case OP_DEST_MEM:
2856                 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2857                         WARN_FUNC("unknown stack-related memory operation",
2858                                   insn->sec, insn->offset);
2859                         return -1;
2860                 }
2861
2862                 /* pop mem */
2863                 cfi->stack_size -= 8;
2864                 if (cfa->base == CFI_SP)
2865                         cfa->offset -= 8;
2866
2867                 break;
2868
2869         default:
2870                 WARN_FUNC("unknown stack-related instruction",
2871                           insn->sec, insn->offset);
2872                 return -1;
2873         }
2874
2875         return 0;
2876 }
2877
2878 /*
2879  * The stack layouts of alternatives instructions can sometimes diverge when
2880  * they have stack modifications.  That's fine as long as the potential stack
2881  * layouts don't conflict at any given potential instruction boundary.
2882  *
2883  * Flatten the CFIs of the different alternative code streams (both original
2884  * and replacement) into a single shared CFI array which can be used to detect
2885  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2886  */
2887 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2888 {
2889         struct cfi_state **alt_cfi;
2890         int group_off;
2891
2892         if (!insn->alt_group)
2893                 return 0;
2894
2895         if (!insn->cfi) {
2896                 WARN("CFI missing");
2897                 return -1;
2898         }
2899
2900         alt_cfi = insn->alt_group->cfi;
2901         group_off = insn->offset - insn->alt_group->first_insn->offset;
2902
2903         if (!alt_cfi[group_off]) {
2904                 alt_cfi[group_off] = insn->cfi;
2905         } else {
2906                 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2907                         WARN_FUNC("stack layout conflict in alternatives",
2908                                   insn->sec, insn->offset);
2909                         return -1;
2910                 }
2911         }
2912
2913         return 0;
2914 }
2915
2916 static int handle_insn_ops(struct instruction *insn,
2917                            struct instruction *next_insn,
2918                            struct insn_state *state)
2919 {
2920         struct stack_op *op;
2921
2922         list_for_each_entry(op, &insn->stack_ops, list) {
2923
2924                 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2925                         return 1;
2926
2927                 if (!insn->alt_group)
2928                         continue;
2929
2930                 if (op->dest.type == OP_DEST_PUSHF) {
2931                         if (!state->uaccess_stack) {
2932                                 state->uaccess_stack = 1;
2933                         } else if (state->uaccess_stack >> 31) {
2934                                 WARN_FUNC("PUSHF stack exhausted",
2935                                           insn->sec, insn->offset);
2936                                 return 1;
2937                         }
2938                         state->uaccess_stack <<= 1;
2939                         state->uaccess_stack  |= state->uaccess;
2940                 }
2941
2942                 if (op->src.type == OP_SRC_POPF) {
2943                         if (state->uaccess_stack) {
2944                                 state->uaccess = state->uaccess_stack & 1;
2945                                 state->uaccess_stack >>= 1;
2946                                 if (state->uaccess_stack == 1)
2947                                         state->uaccess_stack = 0;
2948                         }
2949                 }
2950         }
2951
2952         return 0;
2953 }
2954
2955 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2956 {
2957         struct cfi_state *cfi1 = insn->cfi;
2958         int i;
2959
2960         if (!cfi1) {
2961                 WARN("CFI missing");
2962                 return false;
2963         }
2964
2965         if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2966
2967                 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2968                           insn->sec, insn->offset,
2969                           cfi1->cfa.base, cfi1->cfa.offset,
2970                           cfi2->cfa.base, cfi2->cfa.offset);
2971
2972         } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2973                 for (i = 0; i < CFI_NUM_REGS; i++) {
2974                         if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2975                                     sizeof(struct cfi_reg)))
2976                                 continue;
2977
2978                         WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2979                                   insn->sec, insn->offset,
2980                                   i, cfi1->regs[i].base, cfi1->regs[i].offset,
2981                                   i, cfi2->regs[i].base, cfi2->regs[i].offset);
2982                         break;
2983                 }
2984
2985         } else if (cfi1->type != cfi2->type) {
2986
2987                 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2988                           insn->sec, insn->offset, cfi1->type, cfi2->type);
2989
2990         } else if (cfi1->drap != cfi2->drap ||
2991                    (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2992                    (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2993
2994                 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2995                           insn->sec, insn->offset,
2996                           cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2997                           cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2998
2999         } else
3000                 return true;
3001
3002         return false;
3003 }
3004
3005 static inline bool func_uaccess_safe(struct symbol *func)
3006 {
3007         if (func)
3008                 return func->uaccess_safe;
3009
3010         return false;
3011 }
3012
3013 static inline const char *call_dest_name(struct instruction *insn)
3014 {
3015         static char pvname[19];
3016         struct reloc *rel;
3017         int idx;
3018
3019         if (insn->call_dest)
3020                 return insn->call_dest->name;
3021
3022         rel = insn_reloc(NULL, insn);
3023         if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3024                 idx = (rel->addend / sizeof(void *));
3025                 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3026                 return pvname;
3027         }
3028
3029         return "{dynamic}";
3030 }
3031
3032 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3033 {
3034         struct symbol *target;
3035         struct reloc *rel;
3036         int idx;
3037
3038         rel = insn_reloc(file, insn);
3039         if (!rel || strcmp(rel->sym->name, "pv_ops"))
3040                 return false;
3041
3042         idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3043
3044         if (file->pv_ops[idx].clean)
3045                 return true;
3046
3047         file->pv_ops[idx].clean = true;
3048
3049         list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3050                 if (!target->sec->noinstr) {
3051                         WARN("pv_ops[%d]: %s", idx, target->name);
3052                         file->pv_ops[idx].clean = false;
3053                 }
3054         }
3055
3056         return file->pv_ops[idx].clean;
3057 }
3058
3059 static inline bool noinstr_call_dest(struct objtool_file *file,
3060                                      struct instruction *insn,
3061                                      struct symbol *func)
3062 {
3063         /*
3064          * We can't deal with indirect function calls at present;
3065          * assume they're instrumented.
3066          */
3067         if (!func) {
3068                 if (file->pv_ops)
3069                         return pv_call_dest(file, insn);
3070
3071                 return false;
3072         }
3073
3074         /*
3075          * If the symbol is from a noinstr section; we good.
3076          */
3077         if (func->sec->noinstr)
3078                 return true;
3079
3080         /*
3081          * The __ubsan_handle_*() calls are like WARN(), they only happen when
3082          * something 'BAD' happened. At the risk of taking the machine down,
3083          * let them proceed to get the message out.
3084          */
3085         if (!strncmp(func->name, "__ubsan_handle_", 15))
3086                 return true;
3087
3088         return false;
3089 }
3090
3091 static int validate_call(struct objtool_file *file,
3092                          struct instruction *insn,
3093                          struct insn_state *state)
3094 {
3095         if (state->noinstr && state->instr <= 0 &&
3096             !noinstr_call_dest(file, insn, insn->call_dest)) {
3097                 WARN_FUNC("call to %s() leaves .noinstr.text section",
3098                                 insn->sec, insn->offset, call_dest_name(insn));
3099                 return 1;
3100         }
3101
3102         if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
3103                 WARN_FUNC("call to %s() with UACCESS enabled",
3104                                 insn->sec, insn->offset, call_dest_name(insn));
3105                 return 1;
3106         }
3107
3108         if (state->df) {
3109                 WARN_FUNC("call to %s() with DF set",
3110                                 insn->sec, insn->offset, call_dest_name(insn));
3111                 return 1;
3112         }
3113
3114         return 0;
3115 }
3116
3117 static int validate_sibling_call(struct objtool_file *file,
3118                                  struct instruction *insn,
3119                                  struct insn_state *state)
3120 {
3121         if (has_modified_stack_frame(insn, state)) {
3122                 WARN_FUNC("sibling call from callable instruction with modified stack frame",
3123                                 insn->sec, insn->offset);
3124                 return 1;
3125         }
3126
3127         return validate_call(file, insn, state);
3128 }
3129
3130 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3131 {
3132         if (state->noinstr && state->instr > 0) {
3133                 WARN_FUNC("return with instrumentation enabled",
3134                           insn->sec, insn->offset);
3135                 return 1;
3136         }
3137
3138         if (state->uaccess && !func_uaccess_safe(func)) {
3139                 WARN_FUNC("return with UACCESS enabled",
3140                           insn->sec, insn->offset);
3141                 return 1;
3142         }
3143
3144         if (!state->uaccess && func_uaccess_safe(func)) {
3145                 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3146                           insn->sec, insn->offset);
3147                 return 1;
3148         }
3149
3150         if (state->df) {
3151                 WARN_FUNC("return with DF set",
3152                           insn->sec, insn->offset);
3153                 return 1;
3154         }
3155
3156         if (func && has_modified_stack_frame(insn, state)) {
3157                 WARN_FUNC("return with modified stack frame",
3158                           insn->sec, insn->offset);
3159                 return 1;
3160         }
3161
3162         if (state->cfi.bp_scratch) {
3163                 WARN_FUNC("BP used as a scratch register",
3164                           insn->sec, insn->offset);
3165                 return 1;
3166         }
3167
3168         return 0;
3169 }
3170
3171 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3172                                                  struct instruction *insn)
3173 {
3174         struct alt_group *alt_group = insn->alt_group;
3175
3176         /*
3177          * Simulate the fact that alternatives are patched in-place.  When the
3178          * end of a replacement alt_group is reached, redirect objtool flow to
3179          * the end of the original alt_group.
3180          */
3181         if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
3182                 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3183
3184         return next_insn_same_sec(file, insn);
3185 }
3186
3187 /*
3188  * Follow the branch starting at the given instruction, and recursively follow
3189  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3190  * each instruction and validate all the rules described in
3191  * tools/objtool/Documentation/stack-validation.txt.
3192  */
3193 static int validate_branch(struct objtool_file *file, struct symbol *func,
3194                            struct instruction *insn, struct insn_state state)
3195 {
3196         struct alternative *alt;
3197         struct instruction *next_insn, *prev_insn = NULL;
3198         struct section *sec;
3199         u8 visited;
3200         int ret;
3201
3202         sec = insn->sec;
3203
3204         while (1) {
3205                 next_insn = next_insn_to_validate(file, insn);
3206
3207                 if (func && insn->func && func != insn->func->pfunc) {
3208                         WARN("%s() falls through to next function %s()",
3209                              func->name, insn->func->name);
3210                         return 1;
3211                 }
3212
3213                 if (func && insn->ignore) {
3214                         WARN_FUNC("BUG: why am I validating an ignored function?",
3215                                   sec, insn->offset);
3216                         return 1;
3217                 }
3218
3219                 visited = 1 << state.uaccess;
3220                 if (insn->visited) {
3221                         if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3222                                 return 1;
3223
3224                         if (insn->visited & visited)
3225                                 return 0;
3226                 } else {
3227                         nr_insns_visited++;
3228                 }
3229
3230                 if (state.noinstr)
3231                         state.instr += insn->instr;
3232
3233                 if (insn->hint) {
3234                         state.cfi = *insn->cfi;
3235                 } else {
3236                         /* XXX track if we actually changed state.cfi */
3237
3238                         if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3239                                 insn->cfi = prev_insn->cfi;
3240                                 nr_cfi_reused++;
3241                         } else {
3242                                 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3243                         }
3244                 }
3245
3246                 insn->visited |= visited;
3247
3248                 if (propagate_alt_cfi(file, insn))
3249                         return 1;
3250
3251                 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3252                         bool skip_orig = false;
3253
3254                         list_for_each_entry(alt, &insn->alts, list) {
3255                                 if (alt->skip_orig)
3256                                         skip_orig = true;
3257
3258                                 ret = validate_branch(file, func, alt->insn, state);
3259                                 if (ret) {
3260                                         if (opts.backtrace)
3261                                                 BT_FUNC("(alt)", insn);
3262                                         return ret;
3263                                 }
3264                         }
3265
3266                         if (skip_orig)
3267                                 return 0;
3268                 }
3269
3270                 if (handle_insn_ops(insn, next_insn, &state))
3271                         return 1;
3272
3273                 switch (insn->type) {
3274
3275                 case INSN_RETURN:
3276                         return validate_return(func, insn, &state);
3277
3278                 case INSN_CALL:
3279                 case INSN_CALL_DYNAMIC:
3280                         ret = validate_call(file, insn, &state);
3281                         if (ret)
3282                                 return ret;
3283
3284                         if (opts.stackval && func && !is_fentry_call(insn) &&
3285                             !has_valid_stack_frame(&state)) {
3286                                 WARN_FUNC("call without frame pointer save/setup",
3287                                           sec, insn->offset);
3288                                 return 1;
3289                         }
3290
3291                         if (insn->dead_end)
3292                                 return 0;
3293
3294                         break;
3295
3296                 case INSN_JUMP_CONDITIONAL:
3297                 case INSN_JUMP_UNCONDITIONAL:
3298                         if (is_sibling_call(insn)) {
3299                                 ret = validate_sibling_call(file, insn, &state);
3300                                 if (ret)
3301                                         return ret;
3302
3303                         } else if (insn->jump_dest) {
3304                                 ret = validate_branch(file, func,
3305                                                       insn->jump_dest, state);
3306                                 if (ret) {
3307                                         if (opts.backtrace)
3308                                                 BT_FUNC("(branch)", insn);
3309                                         return ret;
3310                                 }
3311                         }
3312
3313                         if (insn->type == INSN_JUMP_UNCONDITIONAL)
3314                                 return 0;
3315
3316                         break;
3317
3318                 case INSN_JUMP_DYNAMIC:
3319                 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3320                         if (is_sibling_call(insn)) {
3321                                 ret = validate_sibling_call(file, insn, &state);
3322                                 if (ret)
3323                                         return ret;
3324                         }
3325
3326                         if (insn->type == INSN_JUMP_DYNAMIC)
3327                                 return 0;
3328
3329                         break;
3330
3331                 case INSN_CONTEXT_SWITCH:
3332                         if (func && (!next_insn || !next_insn->hint)) {
3333                                 WARN_FUNC("unsupported instruction in callable function",
3334                                           sec, insn->offset);
3335                                 return 1;
3336                         }
3337                         return 0;
3338
3339                 case INSN_STAC:
3340                         if (state.uaccess) {
3341                                 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3342                                 return 1;
3343                         }
3344
3345                         state.uaccess = true;
3346                         break;
3347
3348                 case INSN_CLAC:
3349                         if (!state.uaccess && func) {
3350                                 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3351                                 return 1;
3352                         }
3353
3354                         if (func_uaccess_safe(func) && !state.uaccess_stack) {
3355                                 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3356                                 return 1;
3357                         }
3358
3359                         state.uaccess = false;
3360                         break;
3361
3362                 case INSN_STD:
3363                         if (state.df) {
3364                                 WARN_FUNC("recursive STD", sec, insn->offset);
3365                                 return 1;
3366                         }
3367
3368                         state.df = true;
3369                         break;
3370
3371                 case INSN_CLD:
3372                         if (!state.df && func) {
3373                                 WARN_FUNC("redundant CLD", sec, insn->offset);
3374                                 return 1;
3375                         }
3376
3377                         state.df = false;
3378                         break;
3379
3380                 default:
3381                         break;
3382                 }
3383
3384                 if (insn->dead_end)
3385                         return 0;
3386
3387                 if (!next_insn) {
3388                         if (state.cfi.cfa.base == CFI_UNDEFINED)
3389                                 return 0;
3390                         WARN("%s: unexpected end of section", sec->name);
3391                         return 1;
3392                 }
3393
3394                 prev_insn = insn;
3395                 insn = next_insn;
3396         }
3397
3398         return 0;
3399 }
3400
3401 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3402 {
3403         struct instruction *insn;
3404         struct insn_state state;
3405         int ret, warnings = 0;
3406
3407         if (!file->hints)
3408                 return 0;
3409
3410         init_insn_state(file, &state, sec);
3411
3412         if (sec) {
3413                 insn = find_insn(file, sec, 0);
3414                 if (!insn)
3415                         return 0;
3416         } else {
3417                 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3418         }
3419
3420         while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3421                 if (insn->hint && !insn->visited && !insn->ignore) {
3422                         ret = validate_branch(file, insn->func, insn, state);
3423                         if (ret && opts.backtrace)
3424                                 BT_FUNC("<=== (hint)", insn);
3425                         warnings += ret;
3426                 }
3427
3428                 insn = list_next_entry(insn, list);
3429         }
3430
3431         return warnings;
3432 }
3433
3434 static int validate_retpoline(struct objtool_file *file)
3435 {
3436         struct instruction *insn;
3437         int warnings = 0;
3438
3439         for_each_insn(file, insn) {
3440                 if (insn->type != INSN_JUMP_DYNAMIC &&
3441                     insn->type != INSN_CALL_DYNAMIC)
3442                         continue;
3443
3444                 if (insn->retpoline_safe)
3445                         continue;
3446
3447                 /*
3448                  * .init.text code is ran before userspace and thus doesn't
3449                  * strictly need retpolines, except for modules which are
3450                  * loaded late, they very much do need retpoline in their
3451                  * .init.text
3452                  */
3453                 if (!strcmp(insn->sec->name, ".init.text") && !opts.module)
3454                         continue;
3455
3456                 WARN_FUNC("indirect %s found in RETPOLINE build",
3457                           insn->sec, insn->offset,
3458                           insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3459
3460                 warnings++;
3461         }
3462
3463         return warnings;
3464 }
3465
3466 static bool is_kasan_insn(struct instruction *insn)
3467 {
3468         return (insn->type == INSN_CALL &&
3469                 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3470 }
3471
3472 static bool is_ubsan_insn(struct instruction *insn)
3473 {
3474         return (insn->type == INSN_CALL &&
3475                 !strcmp(insn->call_dest->name,
3476                         "__ubsan_handle_builtin_unreachable"));
3477 }
3478
3479 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3480 {
3481         int i;
3482         struct instruction *prev_insn;
3483
3484         if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3485                 return true;
3486
3487         /*
3488          * Ignore alternative replacement instructions.  This can happen
3489          * when a whitelisted function uses one of the ALTERNATIVE macros.
3490          */
3491         if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3492             !strcmp(insn->sec->name, ".altinstr_aux"))
3493                 return true;
3494
3495         /*
3496          * Whole archive runs might encounter dead code from weak symbols.
3497          * This is where the linker will have dropped the weak symbol in
3498          * favour of a regular symbol, but leaves the code in place.
3499          *
3500          * In this case we'll find a piece of code (whole function) that is not
3501          * covered by a !section symbol. Ignore them.
3502          */
3503         if (opts.link && !insn->func) {
3504                 int size = find_symbol_hole_containing(insn->sec, insn->offset);
3505                 unsigned long end = insn->offset + size;
3506
3507                 if (!size) /* not a hole */
3508                         return false;
3509
3510                 if (size < 0) /* hole until the end */
3511                         return true;
3512
3513                 sec_for_each_insn_continue(file, insn) {
3514                         /*
3515                          * If we reach a visited instruction at or before the
3516                          * end of the hole, ignore the unreachable.
3517                          */
3518                         if (insn->visited)
3519                                 return true;
3520
3521                         if (insn->offset >= end)
3522                                 break;
3523
3524                         /*
3525                          * If this hole jumps to a .cold function, mark it ignore too.
3526                          */
3527                         if (insn->jump_dest && insn->jump_dest->func &&
3528                             strstr(insn->jump_dest->func->name, ".cold")) {
3529                                 struct instruction *dest = insn->jump_dest;
3530                                 func_for_each_insn(file, dest->func, dest)
3531                                         dest->ignore = true;
3532                         }
3533                 }
3534
3535                 return false;
3536         }
3537
3538         if (!insn->func)
3539                 return false;
3540
3541         if (insn->func->static_call_tramp)
3542                 return true;
3543
3544         /*
3545          * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3546          * __builtin_unreachable().  The BUG() macro has an unreachable() after
3547          * the UD2, which causes GCC's undefined trap logic to emit another UD2
3548          * (or occasionally a JMP to UD2).
3549          *
3550          * It may also insert a UD2 after calling a __noreturn function.
3551          */
3552         prev_insn = list_prev_entry(insn, list);
3553         if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3554             (insn->type == INSN_BUG ||
3555              (insn->type == INSN_JUMP_UNCONDITIONAL &&
3556               insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3557                 return true;
3558
3559         /*
3560          * Check if this (or a subsequent) instruction is related to
3561          * CONFIG_UBSAN or CONFIG_KASAN.
3562          *
3563          * End the search at 5 instructions to avoid going into the weeds.
3564          */
3565         for (i = 0; i < 5; i++) {
3566
3567                 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3568                         return true;
3569
3570                 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3571                         if (insn->jump_dest &&
3572                             insn->jump_dest->func == insn->func) {
3573                                 insn = insn->jump_dest;
3574                                 continue;
3575                         }
3576
3577                         break;
3578                 }
3579
3580                 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3581                         break;
3582
3583                 insn = list_next_entry(insn, list);
3584         }
3585
3586         return false;
3587 }
3588
3589 static int validate_symbol(struct objtool_file *file, struct section *sec,
3590                            struct symbol *sym, struct insn_state *state)
3591 {
3592         struct instruction *insn;
3593         int ret;
3594
3595         if (!sym->len) {
3596                 WARN("%s() is missing an ELF size annotation", sym->name);
3597                 return 1;
3598         }
3599
3600         if (sym->pfunc != sym || sym->alias != sym)
3601                 return 0;
3602
3603         insn = find_insn(file, sec, sym->offset);
3604         if (!insn || insn->ignore || insn->visited)
3605                 return 0;
3606
3607         state->uaccess = sym->uaccess_safe;
3608
3609         ret = validate_branch(file, insn->func, insn, *state);
3610         if (ret && opts.backtrace)
3611                 BT_FUNC("<=== (sym)", insn);
3612         return ret;
3613 }
3614
3615 static int validate_section(struct objtool_file *file, struct section *sec)
3616 {
3617         struct insn_state state;
3618         struct symbol *func;
3619         int warnings = 0;
3620
3621         list_for_each_entry(func, &sec->symbol_list, list) {
3622                 if (func->type != STT_FUNC)
3623                         continue;
3624
3625                 init_insn_state(file, &state, sec);
3626                 set_func_state(&state.cfi);
3627
3628                 warnings += validate_symbol(file, sec, func, &state);
3629         }
3630
3631         return warnings;
3632 }
3633
3634 static int validate_noinstr_sections(struct objtool_file *file)
3635 {
3636         struct section *sec;
3637         int warnings = 0;
3638
3639         sec = find_section_by_name(file->elf, ".noinstr.text");
3640         if (sec) {
3641                 warnings += validate_section(file, sec);
3642                 warnings += validate_unwind_hints(file, sec);
3643         }
3644
3645         sec = find_section_by_name(file->elf, ".entry.text");
3646         if (sec) {
3647                 warnings += validate_section(file, sec);
3648                 warnings += validate_unwind_hints(file, sec);
3649         }
3650
3651         return warnings;
3652 }
3653
3654 static int validate_functions(struct objtool_file *file)
3655 {
3656         struct section *sec;
3657         int warnings = 0;
3658
3659         for_each_sec(file, sec) {
3660                 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3661                         continue;
3662
3663                 warnings += validate_section(file, sec);
3664         }
3665
3666         return warnings;
3667 }
3668
3669 static void mark_endbr_used(struct instruction *insn)
3670 {
3671         if (!list_empty(&insn->call_node))
3672                 list_del_init(&insn->call_node);
3673 }
3674
3675 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
3676 {
3677         struct instruction *dest;
3678         struct reloc *reloc;
3679         unsigned long off;
3680         int warnings = 0;
3681
3682         /*
3683          * Looking for function pointer load relocations.  Ignore
3684          * direct/indirect branches:
3685          */
3686         switch (insn->type) {
3687         case INSN_CALL:
3688         case INSN_CALL_DYNAMIC:
3689         case INSN_JUMP_CONDITIONAL:
3690         case INSN_JUMP_UNCONDITIONAL:
3691         case INSN_JUMP_DYNAMIC:
3692         case INSN_JUMP_DYNAMIC_CONDITIONAL:
3693         case INSN_RETURN:
3694         case INSN_NOP:
3695                 return 0;
3696         default:
3697                 break;
3698         }
3699
3700         for (reloc = insn_reloc(file, insn);
3701              reloc;
3702              reloc = find_reloc_by_dest_range(file->elf, insn->sec,
3703                                               reloc->offset + 1,
3704                                               (insn->offset + insn->len) - (reloc->offset + 1))) {
3705
3706                 /*
3707                  * static_call_update() references the trampoline, which
3708                  * doesn't have (or need) ENDBR.  Skip warning in that case.
3709                  */
3710                 if (reloc->sym->static_call_tramp)
3711                         continue;
3712
3713                 off = reloc->sym->offset;
3714                 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
3715                         off += arch_dest_reloc_offset(reloc->addend);
3716                 else
3717                         off += reloc->addend;
3718
3719                 dest = find_insn(file, reloc->sym->sec, off);
3720                 if (!dest)
3721                         continue;
3722
3723                 if (dest->type == INSN_ENDBR) {
3724                         mark_endbr_used(dest);
3725                         continue;
3726                 }
3727
3728                 if (dest->func && dest->func == insn->func) {
3729                         /*
3730                          * Anything from->to self is either _THIS_IP_ or
3731                          * IRET-to-self.
3732                          *
3733                          * There is no sane way to annotate _THIS_IP_ since the
3734                          * compiler treats the relocation as a constant and is
3735                          * happy to fold in offsets, skewing any annotation we
3736                          * do, leading to vast amounts of false-positives.
3737                          *
3738                          * There's also compiler generated _THIS_IP_ through
3739                          * KCOV and such which we have no hope of annotating.
3740                          *
3741                          * As such, blanket accept self-references without
3742                          * issue.
3743                          */
3744                         continue;
3745                 }
3746
3747                 if (dest->noendbr)
3748                         continue;
3749
3750                 WARN_FUNC("relocation to !ENDBR: %s",
3751                           insn->sec, insn->offset,
3752                           offstr(dest->sec, dest->offset));
3753
3754                 warnings++;
3755         }
3756
3757         return warnings;
3758 }
3759
3760 static int validate_ibt_data_reloc(struct objtool_file *file,
3761                                    struct reloc *reloc)
3762 {
3763         struct instruction *dest;
3764
3765         dest = find_insn(file, reloc->sym->sec,
3766                          reloc->sym->offset + reloc->addend);
3767         if (!dest)
3768                 return 0;
3769
3770         if (dest->type == INSN_ENDBR) {
3771                 mark_endbr_used(dest);
3772                 return 0;
3773         }
3774
3775         if (dest->noendbr)
3776                 return 0;
3777
3778         WARN_FUNC("data relocation to !ENDBR: %s",
3779                   reloc->sec->base, reloc->offset,
3780                   offstr(dest->sec, dest->offset));
3781
3782         return 1;
3783 }
3784
3785 /*
3786  * Validate IBT rules and remove used ENDBR instructions from the seal list.
3787  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
3788  * NOPs) later, in create_ibt_endbr_seal_sections().
3789  */
3790 static int validate_ibt(struct objtool_file *file)
3791 {
3792         struct section *sec;
3793         struct reloc *reloc;
3794         struct instruction *insn;
3795         int warnings = 0;
3796
3797         for_each_insn(file, insn)
3798                 warnings += validate_ibt_insn(file, insn);
3799
3800         for_each_sec(file, sec) {
3801
3802                 /* Already done by validate_ibt_insn() */
3803                 if (sec->sh.sh_flags & SHF_EXECINSTR)
3804                         continue;
3805
3806                 if (!sec->reloc)
3807                         continue;
3808
3809                 /*
3810                  * These sections can reference text addresses, but not with
3811                  * the intent to indirect branch to them.
3812                  */
3813                 if (!strncmp(sec->name, ".discard", 8)                  ||
3814                     !strncmp(sec->name, ".debug", 6)                    ||
3815                     !strcmp(sec->name, ".altinstructions")              ||
3816                     !strcmp(sec->name, ".ibt_endbr_seal")               ||
3817                     !strcmp(sec->name, ".orc_unwind_ip")                ||
3818                     !strcmp(sec->name, ".parainstructions")             ||
3819                     !strcmp(sec->name, ".retpoline_sites")              ||
3820                     !strcmp(sec->name, ".smp_locks")                    ||
3821                     !strcmp(sec->name, ".static_call_sites")            ||
3822                     !strcmp(sec->name, "_error_injection_whitelist")    ||
3823                     !strcmp(sec->name, "_kprobe_blacklist")             ||
3824                     !strcmp(sec->name, "__bug_table")                   ||
3825                     !strcmp(sec->name, "__ex_table")                    ||
3826                     !strcmp(sec->name, "__jump_table")                  ||
3827                     !strcmp(sec->name, "__mcount_loc")                  ||
3828                     !strcmp(sec->name, "__tracepoints"))
3829                         continue;
3830
3831                 list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
3832                         warnings += validate_ibt_data_reloc(file, reloc);
3833         }
3834
3835         return warnings;
3836 }
3837
3838 static int validate_sls(struct objtool_file *file)
3839 {
3840         struct instruction *insn, *next_insn;
3841         int warnings = 0;
3842
3843         for_each_insn(file, insn) {
3844                 next_insn = next_insn_same_sec(file, insn);
3845
3846                 if (insn->retpoline_safe)
3847                         continue;
3848
3849                 switch (insn->type) {
3850                 case INSN_RETURN:
3851                         if (!next_insn || next_insn->type != INSN_TRAP) {
3852                                 WARN_FUNC("missing int3 after ret",
3853                                           insn->sec, insn->offset);
3854                                 warnings++;
3855                         }
3856
3857                         break;
3858                 case INSN_JUMP_DYNAMIC:
3859                         if (!next_insn || next_insn->type != INSN_TRAP) {
3860                                 WARN_FUNC("missing int3 after indirect jump",
3861                                           insn->sec, insn->offset);
3862                                 warnings++;
3863                         }
3864                         break;
3865                 default:
3866                         break;
3867                 }
3868         }
3869
3870         return warnings;
3871 }
3872
3873 static int validate_reachable_instructions(struct objtool_file *file)
3874 {
3875         struct instruction *insn;
3876
3877         if (file->ignore_unreachables)
3878                 return 0;
3879
3880         for_each_insn(file, insn) {
3881                 if (insn->visited || ignore_unreachable_insn(file, insn))
3882                         continue;
3883
3884                 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3885                 return 1;
3886         }
3887
3888         return 0;
3889 }
3890
3891 int check(struct objtool_file *file)
3892 {
3893         int ret, warnings = 0;
3894
3895         arch_initial_func_cfi_state(&initial_func_cfi);
3896         init_cfi_state(&init_cfi);
3897         init_cfi_state(&func_cfi);
3898         set_func_state(&func_cfi);
3899
3900         if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3901                 goto out;
3902
3903         cfi_hash_add(&init_cfi);
3904         cfi_hash_add(&func_cfi);
3905
3906         ret = decode_sections(file);
3907         if (ret < 0)
3908                 goto out;
3909
3910         warnings += ret;
3911
3912         if (list_empty(&file->insn_list))
3913                 goto out;
3914
3915         if (opts.retpoline) {
3916                 ret = validate_retpoline(file);
3917                 if (ret < 0)
3918                         return ret;
3919                 warnings += ret;
3920         }
3921
3922         if (opts.stackval || opts.orc || opts.uaccess) {
3923                 ret = validate_functions(file);
3924                 if (ret < 0)
3925                         goto out;
3926                 warnings += ret;
3927
3928                 ret = validate_unwind_hints(file, NULL);
3929                 if (ret < 0)
3930                         goto out;
3931                 warnings += ret;
3932
3933                 if (!warnings) {
3934                         ret = validate_reachable_instructions(file);
3935                         if (ret < 0)
3936                                 goto out;
3937                         warnings += ret;
3938                 }
3939
3940         } else if (opts.noinstr) {
3941                 ret = validate_noinstr_sections(file);
3942                 if (ret < 0)
3943                         goto out;
3944                 warnings += ret;
3945         }
3946
3947         if (opts.ibt) {
3948                 ret = validate_ibt(file);
3949                 if (ret < 0)
3950                         goto out;
3951                 warnings += ret;
3952         }
3953
3954         if (opts.sls) {
3955                 ret = validate_sls(file);
3956                 if (ret < 0)
3957                         goto out;
3958                 warnings += ret;
3959         }
3960
3961         if (opts.static_call) {
3962                 ret = create_static_call_sections(file);
3963                 if (ret < 0)
3964                         goto out;
3965                 warnings += ret;
3966         }
3967
3968         if (opts.retpoline) {
3969                 ret = create_retpoline_sites_sections(file);
3970                 if (ret < 0)
3971                         goto out;
3972                 warnings += ret;
3973         }
3974
3975         if (opts.mcount) {
3976                 ret = create_mcount_loc_sections(file);
3977                 if (ret < 0)
3978                         goto out;
3979                 warnings += ret;
3980         }
3981
3982         if (opts.ibt) {
3983                 ret = create_ibt_endbr_seal_sections(file);
3984                 if (ret < 0)
3985                         goto out;
3986                 warnings += ret;
3987         }
3988
3989         if (opts.orc && !list_empty(&file->insn_list)) {
3990                 ret = orc_create(file);
3991                 if (ret < 0)
3992                         goto out;
3993                 warnings += ret;
3994         }
3995
3996
3997         if (opts.stats) {
3998                 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3999                 printf("nr_cfi: %ld\n", nr_cfi);
4000                 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4001                 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4002         }
4003
4004 out:
4005         /*
4006          *  For now, don't fail the kernel build on fatal warnings.  These
4007          *  errors are still fairly common due to the growing matrix of
4008          *  supported toolchains and their recent pace of change.
4009          */
4010         return 0;
4011 }