KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
[linux-2.6-microblaze.git] / arch / x86 / include / asm / nospec-branch.h
index ce1acb5..ccde87e 100644 (file)
        .popsection
 .endm
 
+/*
+ * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
+ * vs RETBleed validation.
+ */
+#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
+
+/*
+ * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
+ * eventually turn into it's own annotation.
+ */
+.macro ANNOTATE_UNRET_END
+#ifdef CONFIG_DEBUG_ENTRY
+       ANNOTATE_RETPOLINE_SAFE
+       nop
+#endif
+.endm
+
 /*
  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
  * indirect jmp/call which may be susceptible to the Spectre variant 2
   * monstrosity above, manually.
   */
 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
-#ifdef CONFIG_RETPOLINE
        ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
        __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
 .Lskip_rsb_\@:
-#endif
 .endm
 
 /*
  * return thunk isn't mapped into the userspace tables (then again, AMD
  * typically has NO_MELTDOWN).
  *
- * Doesn't clobber any registers but does require a stable stack.
+ * While zen_untrain_ret() doesn't clobber anything but requires stack,
+ * entry_ibpb() will clobber AX, CX, DX.
  *
  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
  * where we have a stack but before any RET instruction.
  */
 .macro UNTRAIN_RET
 #ifdef CONFIG_RETPOLINE
-       ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET
+       ANNOTATE_UNRET_END
+       ALTERNATIVE_2 "",                                               \
+                     "call zen_untrain_ret", X86_FEATURE_UNRET,        \
+                     "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
 #endif
 .endm
 
@@ -141,6 +160,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
 
 extern void __x86_return_thunk(void);
 extern void zen_untrain_ret(void);
+extern void entry_ibpb(void);
 
 #ifdef CONFIG_RETPOLINE
 
@@ -254,6 +274,7 @@ static inline void indirect_branch_prediction_barrier(void)
 
 /* The Intel SPEC CTRL MSR base value cache */
 extern u64 x86_spec_ctrl_base;
+extern u64 x86_spec_ctrl_current;
 extern void write_spec_ctrl_current(u64 val, bool force);
 extern u64 spec_ctrl_current(void);
 
@@ -265,18 +286,16 @@ extern u64 spec_ctrl_current(void);
  */
 #define firmware_restrict_branch_speculation_start()                   \
 do {                                                                   \
-       u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
-                                                                       \
        preempt_disable();                                              \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
+                             spec_ctrl_current() | SPEC_CTRL_IBRS,     \
                              X86_FEATURE_USE_IBRS_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
 do {                                                                   \
-       u64 val = x86_spec_ctrl_base;                                   \
-                                                                       \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
+                             spec_ctrl_current(),                      \
                              X86_FEATURE_USE_IBRS_FW);                 \
        preempt_enable();                                               \
 } while (0)