x86/ibt: Annotate text references
authorPeter Zijlstra <peterz@infradead.org>
Tue, 8 Mar 2022 15:30:40 +0000 (16:30 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 15 Mar 2022 09:32:40 +0000 (10:32 +0100)
Annotate away some of the generic code references. This is things
where we take the address of a symbol for exception handling or return
addresses (eg. context switch).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154318.877758523@infradead.org
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/kernel/alternative.c
arch/x86/kernel/head_64.S
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/relocate_kernel_64.S
arch/x86/lib/error-inject.c
arch/x86/lib/retpoline.S

index 50b6118..d76f14f 100644 (file)
@@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm)
 .pushsection .text, "ax"
 SYM_CODE_START(ret_from_fork)
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR // copy_thread
        movq    %rax, %rdi
        call    schedule_tail                   /* rdi: 'prev' task parameter */
 
@@ -569,6 +570,7 @@ __irqentry_text_start:
        .align 16
        .globl __irqentry_text_end
 __irqentry_text_end:
+       ANNOTATE_NOENDBR
 
 SYM_CODE_START_LOCAL(common_interrupt_return)
 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
@@ -650,6 +652,7 @@ SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
 #endif
 
 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
+       ANNOTATE_NOENDBR // exc_double_fault
        /*
         * This may fault.  Non-paranoid faults on return to userspace are
         * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
@@ -744,6 +747,7 @@ SYM_FUNC_START(asm_load_gs_index)
        FRAME_BEGIN
        swapgs
 .Lgs_change:
+       ANNOTATE_NOENDBR // error_entry
        movl    %edi, %gs
 2:     ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
        swapgs
@@ -1322,6 +1326,7 @@ first_nmi:
 #endif
 
 repeat_nmi:
+       ANNOTATE_NOENDBR // this code
        /*
         * If there was a nested NMI, the first NMI's iret will return
         * here. But NMIs are still enabled and we can take another
@@ -1350,6 +1355,7 @@ repeat_nmi:
        .endr
        subq    $(5*8), %rsp
 end_repeat_nmi:
+       ANNOTATE_NOENDBR // this code
 
        /*
         * Everything below this point can be preempted by a nested NMI.
index 35a0e69..74208a1 100644 (file)
@@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
        popfq
        jmp     .Lsysenter_flags_fixed
 SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
+       ANNOTATE_NOENDBR // is_sysenter_singlestep
 SYM_CODE_END(entry_SYSENTER_compat)
 
 /*
index 14d1003..954d39c 100644 (file)
@@ -713,6 +713,7 @@ asm (
 "      .pushsection    .init.text, \"ax\", @progbits\n"
 "      .type           int3_magic, @function\n"
 "int3_magic:\n"
+       ANNOTATE_NOENDBR
 "      movl    $1, (%" _ASM_ARG1 ")\n"
        ASM_RET
 "      .size           int3_magic, .-int3_magic\n"
@@ -724,16 +725,19 @@ extern void int3_selftest_ip(void); /* defined in asm below */
 static int __init
 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 {
+       unsigned long selftest = (unsigned long)&int3_selftest_ip;
        struct die_args *args = data;
        struct pt_regs *regs = args->regs;
 
+       OPTIMIZER_HIDE_VAR(selftest);
+
        if (!regs || user_mode(regs))
                return NOTIFY_DONE;
 
        if (val != DIE_INT3)
                return NOTIFY_DONE;
 
-       if (regs->ip - INT3_INSN_SIZE != (unsigned long)&int3_selftest_ip)
+       if (regs->ip - INT3_INSN_SIZE != selftest)
                return NOTIFY_DONE;
 
        int3_emulate_call(regs, (unsigned long)&int3_magic);
@@ -757,7 +761,9 @@ static noinline void __init int3_selftest(void)
         * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
         * notifier above will emulate CALL for us.
         */
-       asm volatile ("int3_selftest_ip: int3; nop; nop; nop; nop\n\t"
+       asm volatile ("int3_selftest_ip:\n\t"
+                     ANNOTATE_NOENDBR
+                     "    int3; nop; nop; nop; nop\n\t"
                      : ASM_CALL_CONSTRAINT
                      : __ASM_SEL_RAW(a, D) (&val)
                      : "memory");
index 9b6fa76..462cc1e 100644 (file)
@@ -99,6 +99,7 @@ SYM_CODE_END(startup_64)
 
 SYM_CODE_START(secondary_startup_64)
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
        /*
         * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
         * and someone has loaded a mapped page table.
@@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64)
         */
 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
 
        /*
         * Retrieve the modifier (SME encryption mask if SME is active) to be
@@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        jmp     *%rax
 1:
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR // above
 
        /*
         * We must switch to a new descriptor in kernel space for the GDT
@@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        pushq   %rax            # target address in negative space
        lretq
 .Lafter_lret:
+       ANNOTATE_NOENDBR
 SYM_CODE_END(secondary_startup_64)
 
 #include "verify_cpu.S"
index 9ea0e3e..8ef933c 100644 (file)
@@ -1033,6 +1033,7 @@ asm(
        ".type __kretprobe_trampoline, @function\n"
        "__kretprobe_trampoline:\n"
 #ifdef CONFIG_X86_64
+       ANNOTATE_NOENDBR
        /* Push a fake return address to tell the unwinder it's a kretprobe. */
        "       pushq $__kretprobe_trampoline\n"
        UNWIND_HINT_FUNC
index 5b65f6e..c1d8626 100644 (file)
@@ -42,6 +42,7 @@
        .code64
 SYM_CODE_START_NOALIGN(relocate_kernel)
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
        /*
         * %rdi indirection_page
         * %rsi page_list
@@ -223,6 +224,7 @@ SYM_CODE_END(identity_mapped)
 
 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR // RET target, above
        movq    RSP(%r8), %rsp
        movq    CR4(%r8), %rax
        movq    %rax, %cr4
index 5208970..1e3de07 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <linux/error-injection.h>
 #include <linux/kprobes.h>
+#include <linux/objtool.h>
 
 asmlinkage void just_return_func(void);
 
@@ -11,6 +12,7 @@ asm(
        ".type just_return_func, @function\n"
        ".globl just_return_func\n"
        "just_return_func:\n"
+               ANNOTATE_NOENDBR
                ASM_RET
        ".size just_return_func, .-just_return_func\n"
 );
index afbdda5..5f87bab 100644 (file)
@@ -55,6 +55,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
 
        .align RETPOLINE_THUNK_SIZE
 SYM_CODE_START(__x86_indirect_thunk_array)
+       ANNOTATE_NOENDBR // apply_retpolines
 
 #define GEN(reg) THUNK reg
 #include <asm/GEN-for-each-reg.h>