KVM: SVM: move more vmentry code to assembly
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 13 Apr 2020 07:17:58 +0000 (03:17 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 14 Apr 2020 08:21:21 +0000 (04:21 -0400)
Manipulate IF around vmload/vmsave to remove the confusing usage of
local_irq_enable where interrupts are actually disabled via GIF.
And stuff the RSB immediately without waiting for a RET to avoid
Spectre-v2 attacks.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/nospec-branch.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/vmenter.S

index 07e95dc..7e9a281 100644 (file)
@@ -237,27 +237,6 @@ enum ssb_mitigation {
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
-/*
- * On VMEXIT we must ensure that no RSB predictions learned in the guest
- * can be followed in the host, by overwriting the RSB completely. Both
- * retpoline and IBRS mitigations for Spectre v2 need this; only on future
- * CPUs with IBRS_ALL *might* it be avoided.
- */
-static inline void vmexit_fill_RSB(void)
-{
-#ifdef CONFIG_RETPOLINE
-       unsigned long loops;
-
-       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
-                     ALTERNATIVE("jmp 910f",
-                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
-                                 X86_FEATURE_RETPOLINE)
-                     "910:"
-                     : "=r" (loops), ASM_CALL_CONSTRAINT
-                     : : "memory" );
-#endif
-}
-
 static __always_inline
 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
 {
index 2be5bba..117bb0b 100644 (file)
@@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         */
        x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
-       local_irq_enable();
-
        __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
 
-       /* Eliminate branch target predictions from guest mode */
-       vmexit_fill_RSB();
-
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
@@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        reload_tss(vcpu);
 
-       local_irq_disable();
-
        x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
        vcpu->arch.cr2 = svm->vmcb->save.cr2;
index fa1af90..723887e 100644 (file)
@@ -3,6 +3,7 @@
 #include <asm/asm.h>
 #include <asm/bitsperlong.h>
 #include <asm/kvm_vcpu_regs.h>
+#include <asm/nospec-branch.h>
 
 #define WORD_SIZE (BITS_PER_LONG / 8)
 
@@ -78,6 +79,7 @@ SYM_FUNC_START(__svm_vcpu_run)
        pop %_ASM_AX
 
        /* Enter guest mode */
+       sti
 1:     vmload %_ASM_AX
        jmp 3f
 2:     cmpb $0, kvm_rebooting
@@ -99,6 +101,13 @@ SYM_FUNC_START(__svm_vcpu_run)
        ud2
        _ASM_EXTABLE(5b, 6b)
 7:
+       cli
+
+#ifdef CONFIG_RETPOLINE
+       /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+#endif
+
        /* "POP" @regs to RAX. */
        pop %_ASM_AX