x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:18 +0000 (22:04 -0400)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 3 May 2018 11:55:47 +0000 (13:55 +0200)
The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all
the other bits as reserved. The Intel SDM glossary defines reserved as
implementation specific - aka unknown.

As such at bootup this must be taken it into account and proper masking for
the bits in use applied.

A copy of this document is available at
https://bugzilla.kernel.org/show_bug.cgi?id=199511

[ tglx: Made x86_spec_ctrl_base __ro_after_init ]

Suggested-by: Jon Masters <jcm@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c

index 870acfc..9ec3d4d 100644 (file)
@@ -217,6 +217,17 @@ enum spectre_v2_mitigation {
        SPECTRE_V2_IBRS,
 };
 
+/*
+ * The Intel specification for the SPEC_CTRL MSR requires that we
+ * preserve any already set reserved bits at boot time (e.g. for
+ * future additions that this kernel is not currently aware of).
+ * We then set any additional mitigation bits that we want
+ * ourselves and always use this as the base for SPEC_CTRL.
+ * We also use this when handling guest entry/exit as below.
+ */
+extern void x86_spec_ctrl_set(u64);
+extern u64 x86_spec_ctrl_get_default(void);
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
@@ -254,8 +265,9 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
-                             X86_FEATURE_USE_IBPB);
+       u64 val = PRED_CMD_IBPB;
+
+       alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
 /*
@@ -266,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
  */
 #define firmware_restrict_branch_speculation_start()                   \
 do {                                                                   \
+       u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS;         \
+                                                                       \
        preempt_disable();                                              \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
 do {                                                                   \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,                    \
+       u64 val = x86_spec_ctrl_get_default();                          \
+                                                                       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
        preempt_enable();                                               \
 } while (0)
index ad613f7..6ed84f5 100644 (file)
 
 static void __init spectre_v2_select_mitigation(void);
 
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+static u64 __ro_after_init x86_spec_ctrl_base;
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -37,6 +43,13 @@ void __init check_bugs(void)
                print_cpu_info(&boot_cpu_data);
        }
 
+       /*
+        * Read the SPEC_CTRL MSR to account for reserved bits which may
+        * have unknown values.
+        */
+       if (boot_cpu_has(X86_FEATURE_IBRS))
+               rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
        /* Select the proper spectre mitigation before patching alternatives */
        spectre_v2_select_mitigation();
 
@@ -95,6 +108,21 @@ static const char *spectre_v2_strings[] = {
 
 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
 
+void x86_spec_ctrl_set(u64 val)
+{
+       if (val & ~SPEC_CTRL_IBRS)
+               WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+       else
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+u64 x86_spec_ctrl_get_default(void)
+{
+       return x86_spec_ctrl_base;
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;