Merge tag 'for-linus-4.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kernel / cpu / common.c
index 2322d0c..84dee5a 100644 (file)
@@ -661,33 +661,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
                tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        u32 eax, ebx, ecx, edx;
-       int index_msb, core_bits;
-       static bool printed;
 
        if (!cpu_has(c, X86_FEATURE_HT))
-               return;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               goto out;
+               return -1;
 
        if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
-               return;
+               return -1;
 
        cpuid(1, &eax, &ebx, &ecx, &edx);
 
        smp_num_siblings = (ebx & 0xff0000) >> 16;
-
-       if (smp_num_siblings == 1) {
+       if (smp_num_siblings == 1)
                pr_info_once("CPU0: Hyper-Threading is disabled\n");
-               goto out;
-       }
+#endif
+       return 0;
+}
 
-       if (smp_num_siblings <= 1)
-               goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       int index_msb, core_bits;
+
+       if (detect_ht_early(c) < 0)
+               return;
 
        index_msb = get_count_order(smp_num_siblings);
        c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -700,15 +703,6 @@ void detect_ht(struct cpuinfo_x86 *c)
 
        c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
                                       ((1 << core_bits) - 1);
-
-out:
-       if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
-               pr_info("CPU: Physical Processor ID: %d\n",
-                       c->phys_proc_id);
-               pr_info("CPU: Processor Core ID: %d\n",
-                       c->cpu_core_id);
-               printed = 1;
-       }
 #endif
 }
 
@@ -987,6 +981,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
        {}
 };
 
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+       /* in addition to cpu_no_speculation */
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MERRIFIELD      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MOOREFIELD      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GOLDMONT        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_DENVERTON       },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_GEMINI_LAKE     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+       {}
+};
+
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = 0;
@@ -1005,6 +1014,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
+       if (ia32_cap & ARCH_CAP_IBRS_ALL)
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
        if (x86_match_cpu(cpu_no_meltdown))
                return;
 
@@ -1013,6 +1025,29 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                return;
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+       if (x86_match_cpu(cpu_no_l1tf))
+               return;
+
+       setup_force_cpu_bug(X86_BUG_L1TF);
+}
+
+/*
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
+ * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
+ */
+static void detect_nopl(void)
+{
+#ifdef CONFIG_X86_32
+       setup_clear_cpu_cap(X86_FEATURE_NOPL);
+#else
+       setup_force_cpu_cap(X86_FEATURE_NOPL);
+#endif
 }
 
 /*
@@ -1089,6 +1124,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
         */
        if (!pgtable_l5_enabled())
                setup_clear_cpu_cap(X86_FEATURE_LA57);
+
+       detect_nopl();
 }
 
 void __init early_cpu_init(void)
@@ -1124,24 +1161,6 @@ void __init early_cpu_init(void)
        early_identify_cpu(&boot_cpu_data);
 }
 
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
-       clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
-       set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
 static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_64
@@ -1204,8 +1223,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
        get_model_name(c); /* Default name */
 
-       detect_nopl(c);
-
        detect_null_seg_behavior(c);
 
        /*
@@ -1804,11 +1821,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, curr);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
        set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);