Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / arch / x86 / kernel / cpu / common.c
index d078092..8be042d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/syscore_ops.h>
+#include <linux/pgtable.h>
 
 #include <asm/stackprotector.h>
 #include <asm/perf_event.h>
@@ -35,7 +36,6 @@
 #include <asm/vsyscall.h>
 #include <linux/topology.h>
 #include <linux/cpumask.h>
-#include <asm/pgtable.h>
 #include <linux/atomic.h>
 #include <asm/proto.h>
 #include <asm/setup.h>
@@ -387,7 +387,30 @@ set_register:
                          bits_missing);
        }
 }
-EXPORT_SYMBOL(native_write_cr4);
+#if IS_MODULE(CONFIG_LKDTM)
+EXPORT_SYMBOL_GPL(native_write_cr4);
+#endif
+
+void cr4_update_irqsoff(unsigned long set, unsigned long clear)
+{
+       unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
+
+       lockdep_assert_irqs_disabled();
+
+       newval = (cr4 & ~clear) | set;
+       if (newval != cr4) {
+               this_cpu_write(cpu_tlbstate.cr4, newval);
+               __write_cr4(newval);
+       }
+}
+EXPORT_SYMBOL(cr4_update_irqsoff);
+
+/* Read the CR4 shadow. */
+unsigned long cr4_read_shadow(void)
+{
+       return this_cpu_read(cpu_tlbstate.cr4);
+}
+EXPORT_SYMBOL_GPL(cr4_read_shadow);
 
 void cr4_init(void)
 {
@@ -1050,9 +1073,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        {}
 };
 
-static bool __init cpu_matches(unsigned long which)
+#define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                  \
+       X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
+                                           INTEL_FAM6_##model, steppings, \
+                                           X86_FEATURE_ANY, issues)
+
+#define SRBDS          BIT(0)
+
+static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+       VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL,         X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL_L,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL_G,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0xC),        SRBDS),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0xD),        SRBDS),
+       {}
+};
+
+static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
 {
-       const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+       const struct x86_cpu_id *m = x86_match_cpu(table);
 
        return m && !!(m->driver_data & which);
 }
@@ -1072,31 +1116,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        u64 ia32_cap = x86_read_arch_cap_msr();
 
        /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
-       if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+       if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+           !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
                setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
 
-       if (cpu_matches(NO_SPECULATION))
+       if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
 
-       if (!cpu_matches(NO_SPECTRE_V2))
+       if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
                setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
-       if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+       if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+           !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
        if (ia32_cap & ARCH_CAP_IBRS_ALL)
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
 
-       if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+       if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+           !(ia32_cap & ARCH_CAP_MDS_NO)) {
                setup_force_cpu_bug(X86_BUG_MDS);
-               if (cpu_matches(MSBDS_ONLY))
+               if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
        }
 
-       if (!cpu_matches(NO_SWAPGS))
+       if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
                setup_force_cpu_bug(X86_BUG_SWAPGS);
 
        /*
@@ -1114,7 +1161,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
             (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
                setup_force_cpu_bug(X86_BUG_TAA);
 
-       if (cpu_matches(NO_MELTDOWN))
+       /*
+        * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
+        * in the vulnerability blacklist.
+        */
+       if ((cpu_has(c, X86_FEATURE_RDRAND) ||
+            cpu_has(c, X86_FEATURE_RDSEED)) &&
+           cpu_matches(cpu_vuln_blacklist, SRBDS))
+                   setup_force_cpu_bug(X86_BUG_SRBDS);
+
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
        /* Rogue Data Cache Load? No! */
@@ -1123,7 +1179,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-       if (cpu_matches(NO_L1TF))
+       if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
                return;
 
        setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1551,6 +1607,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
        mtrr_ap_init();
        validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
+       update_srbds_msr();
 }
 
 static __init int setup_noclflush(char *arg)