1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/thread_info.h>
10 #include <linux/init.h>
11 #include <linux/uaccess.h>
13 #include <asm/cpufeature.h>
14 #include <asm/pgtable.h>
18 #include <asm/intel-family.h>
19 #include <asm/microcode_intel.h>
20 #include <asm/hwcap2.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/cmdline.h>
24 #include <asm/traps.h>
27 #include <linux/topology.h>
32 #ifdef CONFIG_X86_LOCAL_APIC
33 #include <asm/mpspec.h>
37 enum split_lock_detect_state {
44 * Default to sld_off because most systems do not support split lock detection
45 * split_lock_setup() will switch this to sld_warn on systems that support
46 * split lock detect, unless there is a command line override.
48 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
49 static u64 msr_test_ctrl_cache __ro_after_init;
52 * Processors which have self-snooping capability can handle conflicting
53 * memory type across CPUs by snooping its own cache. However, there exists
54 * CPU models in which having conflicting memory types still leads to
55 * unpredictable behavior, machine check errors, or hangs. Clear this
56 * feature to prevent its use on machines with known erratas.
58 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
60 switch (c->x86_model) {
61 case INTEL_FAM6_CORE_YONAH:
62 case INTEL_FAM6_CORE2_MEROM:
63 case INTEL_FAM6_CORE2_MEROM_L:
64 case INTEL_FAM6_CORE2_PENRYN:
65 case INTEL_FAM6_CORE2_DUNNINGTON:
66 case INTEL_FAM6_NEHALEM:
67 case INTEL_FAM6_NEHALEM_G:
68 case INTEL_FAM6_NEHALEM_EP:
69 case INTEL_FAM6_NEHALEM_EX:
70 case INTEL_FAM6_WESTMERE:
71 case INTEL_FAM6_WESTMERE_EP:
72 case INTEL_FAM6_SANDYBRIDGE:
73 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
77 static bool ring3mwait_disabled __read_mostly;
79 static int __init ring3mwait_disable(char *__unused)
81 ring3mwait_disabled = true;
84 __setup("ring3mwait=disable", ring3mwait_disable);
86 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
89 * Ring 3 MONITOR/MWAIT feature cannot be detected without
90 * cpu model and family comparison.
94 switch (c->x86_model) {
95 case INTEL_FAM6_XEON_PHI_KNL:
96 case INTEL_FAM6_XEON_PHI_KNM:
102 if (ring3mwait_disabled)
105 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
106 this_cpu_or(msr_misc_features_shadow,
107 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
109 if (c == &boot_cpu_data)
110 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
114 * Early microcode releases for the Spectre v2 mitigation were broken.
115 * Information taken from;
116 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
117 * - https://kb.vmware.com/s/article/52345
118 * - Microcode revisions observed in the wild
119 * - Release note from 20180108 microcode release
121 struct sku_microcode {
126 static const struct sku_microcode spectre_bad_microcodes[] = {
127 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
128 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
129 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
130 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
131 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
132 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
133 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
134 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
135 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
136 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
137 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
138 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
139 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
140 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
141 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
142 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
143 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
144 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
145 /* Observed in the wild */
146 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
147 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
150 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
155 * We know that the hypervisor lie to us on the microcode version so
156 * we may as well hope that it is running the correct version.
158 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
164 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
165 if (c->x86_model == spectre_bad_microcodes[i].model &&
166 c->x86_stepping == spectre_bad_microcodes[i].stepping)
167 return (c->microcode <= spectre_bad_microcodes[i].microcode);
172 static void early_init_intel(struct cpuinfo_x86 *c)
176 /* Unmask CPUID levels if masked: */
177 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
178 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
179 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
180 c->cpuid_level = cpuid_eax(0);
185 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
186 (c->x86 == 0x6 && c->x86_model >= 0x0e))
187 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
189 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
190 c->microcode = intel_get_microcode_revision();
192 /* Now if any of them are set, check the blacklist and clear the lot */
193 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
194 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
195 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
196 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
197 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
198 setup_clear_cpu_cap(X86_FEATURE_IBRS);
199 setup_clear_cpu_cap(X86_FEATURE_IBPB);
200 setup_clear_cpu_cap(X86_FEATURE_STIBP);
201 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
202 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
203 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
204 setup_clear_cpu_cap(X86_FEATURE_SSBD);
205 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
209 * Atom erratum AAE44/AAF40/AAG38/AAH41:
211 * A race condition between speculative fetches and invalidating
212 * a large page. This is worked around in microcode, but we
213 * need the microcode to have already been loaded... so if it is
214 * not, recommend a BIOS update and disable large pages.
216 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
217 c->microcode < 0x20e) {
218 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
219 clear_cpu_cap(c, X86_FEATURE_PSE);
223 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
225 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
226 if (c->x86 == 15 && c->x86_cache_alignment == 64)
227 c->x86_cache_alignment = 128;
230 /* CPUID workaround for 0F33/0F34 CPU */
231 if (c->x86 == 0xF && c->x86_model == 0x3
232 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
233 c->x86_phys_bits = 36;
236 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
237 * with P/T states and does not stop in deep C-states.
239 * It is also reliable across cores and sockets. (but not across
240 * cabinets - we turn it off in that case explicitly.)
242 if (c->x86_power & (1 << 8)) {
243 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
244 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
247 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
249 switch (c->x86_model) {
250 case INTEL_FAM6_ATOM_SALTWELL_MID:
251 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
252 case INTEL_FAM6_ATOM_SILVERMONT_MID:
253 case INTEL_FAM6_ATOM_AIRMONT_NP:
254 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
262 * There is a known erratum on Pentium III and Core Solo
264 * " Page with PAT set to WC while associated MTRR is UC
265 * may consolidate to UC "
266 * Because of this erratum, it is better to stick with
267 * setting WC in MTRR rather than using PAT on these CPUs.
269 * Enable PAT WC only on P4, Core 2 or later CPUs.
271 if (c->x86 == 6 && c->x86_model < 15)
272 clear_cpu_cap(c, X86_FEATURE_PAT);
275 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
276 * clear the fast string and enhanced fast string CPU capabilities.
278 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
279 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
280 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
281 pr_info("Disabled fast string operations\n");
282 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
283 setup_clear_cpu_cap(X86_FEATURE_ERMS);
288 * Intel Quark Core DevMan_001.pdf section 6.4.11
289 * "The operating system also is required to invalidate (i.e., flush)
290 * the TLB when any changes are made to any of the page table entries.
291 * The operating system must reload CR3 to cause the TLB to be flushed"
293 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
294 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
297 if (c->x86 == 5 && c->x86_model == 9) {
298 pr_info("Disabling PGE capability bit\n");
299 setup_clear_cpu_cap(X86_FEATURE_PGE);
302 if (c->cpuid_level >= 0x00000001) {
303 u32 eax, ebx, ecx, edx;
305 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
307 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
308 * apicids which are reserved per package. Store the resulting
309 * shift value for the package management code.
311 if (edx & (1U << 28))
312 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
315 check_memory_type_self_snoop_errata(c);
318 * Get the number of SMT siblings early from the extended topology
319 * leaf, if available. Otherwise try the legacy SMT detection.
321 if (detect_extended_topology_early(c) < 0)
327 * Early probe support logic for ppro memory erratum #50
329 * This is called before we do cpu ident work
332 int ppro_with_ram_bug(void)
334 /* Uses data from early_cpu_detect now */
335 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
336 boot_cpu_data.x86 == 6 &&
337 boot_cpu_data.x86_model == 1 &&
338 boot_cpu_data.x86_stepping < 8) {
339 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
345 static void intel_smp_check(struct cpuinfo_x86 *c)
347 /* calling is from identify_secondary_cpu() ? */
352 * Mask B, Pentium, but not Pentium MMX
355 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
358 * Remember we have B step Pentia with bugs
360 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
361 "with B stepping processors.\n");
366 static int __init forcepae_setup(char *__unused)
371 __setup("forcepae", forcepae_setup);
373 static void intel_workarounds(struct cpuinfo_x86 *c)
375 #ifdef CONFIG_X86_F00F_BUG
377 * All models of Pentium and Pentium with MMX technology CPUs
378 * have the F0 0F bug, which lets nonprivileged users lock up the
379 * system. Announce that the fault handler will be checking for it.
380 * The Quark is also family 5, but does not have the same bug.
382 clear_cpu_bug(c, X86_BUG_F00F);
383 if (c->x86 == 5 && c->x86_model < 9) {
384 static int f00f_workaround_enabled;
386 set_cpu_bug(c, X86_BUG_F00F);
387 if (!f00f_workaround_enabled) {
388 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
389 f00f_workaround_enabled = 1;
395 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
398 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
399 clear_cpu_cap(c, X86_FEATURE_SEP);
402 * PAE CPUID issue: many Pentium M report no PAE but may have a
403 * functionally usable PAE implementation.
404 * Forcefully enable PAE if kernel parameter "forcepae" is present.
407 pr_warn("PAE forced!\n");
408 set_cpu_cap(c, X86_FEATURE_PAE);
409 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
413 * P4 Xeon erratum 037 workaround.
414 * Hardware prefetcher may cause stale data to be loaded into the cache.
416 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
417 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
418 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
419 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
420 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
425 * See if we have a good local APIC by checking for buggy Pentia,
426 * i.e. all B steppings and the C2 stepping of P54C when using their
427 * integrated APIC (see 11AP erratum in "Pentium Processor
428 * Specification Update").
430 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
431 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
432 set_cpu_bug(c, X86_BUG_11AP);
435 #ifdef CONFIG_X86_INTEL_USERCOPY
437 * Set up the preferred alignment for movsl bulk memory moves
440 case 4: /* 486: untested */
442 case 5: /* Old Pentia: untested */
444 case 6: /* PII/PIII only like movsl with 8-byte alignment */
447 case 15: /* P4 is OK down to 8-byte alignment */
456 static void intel_workarounds(struct cpuinfo_x86 *c)
461 static void srat_detect_node(struct cpuinfo_x86 *c)
465 int cpu = smp_processor_id();
467 /* Don't do the funky fallback heuristics the AMD version employs
469 node = numa_cpu_node(cpu);
470 if (node == NUMA_NO_NODE || !node_online(node)) {
471 /* reuse the value from init_cpu_to_node() */
472 node = cpu_to_node(cpu);
474 numa_set_node(cpu, node);
478 #define MSR_IA32_TME_ACTIVATE 0x982
480 /* Helpers to access TME_ACTIVATE MSR */
481 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
482 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
484 #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
485 #define TME_ACTIVATE_POLICY_AES_XTS_128 0
487 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
489 #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
490 #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
492 /* Values for mktme_status (SW only construct) */
493 #define MKTME_ENABLED 0
494 #define MKTME_DISABLED 1
495 #define MKTME_UNINITIALIZED 2
496 static int mktme_status = MKTME_UNINITIALIZED;
498 static void detect_tme(struct cpuinfo_x86 *c)
500 u64 tme_activate, tme_policy, tme_crypto_algs;
501 int keyid_bits = 0, nr_keyids = 0;
502 static u64 tme_activate_cpu0 = 0;
504 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
506 if (mktme_status != MKTME_UNINITIALIZED) {
507 if (tme_activate != tme_activate_cpu0) {
509 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
510 pr_err_once("x86/tme: MKTME is not usable\n");
511 mktme_status = MKTME_DISABLED;
513 /* Proceed. We may need to exclude bits from x86_phys_bits. */
516 tme_activate_cpu0 = tme_activate;
519 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
520 pr_info_once("x86/tme: not enabled by BIOS\n");
521 mktme_status = MKTME_DISABLED;
525 if (mktme_status != MKTME_UNINITIALIZED)
526 goto detect_keyid_bits;
528 pr_info("x86/tme: enabled by BIOS\n");
530 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
531 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
532 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
534 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
535 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
536 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
538 mktme_status = MKTME_DISABLED;
541 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
542 nr_keyids = (1UL << keyid_bits) - 1;
544 pr_info_once("x86/mktme: enabled by BIOS\n");
545 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
547 pr_info_once("x86/mktme: disabled by BIOS\n");
550 if (mktme_status == MKTME_UNINITIALIZED) {
551 /* MKTME is usable */
552 mktme_status = MKTME_ENABLED;
556 * KeyID bits effectively lower the number of physical address
557 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
559 c->x86_phys_bits -= keyid_bits;
562 static void init_cpuid_fault(struct cpuinfo_x86 *c)
566 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
567 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
568 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
572 static void init_intel_misc_features(struct cpuinfo_x86 *c)
576 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
579 /* Clear all MISC features */
580 this_cpu_write(msr_misc_features_shadow, 0);
582 /* Check features and update capabilities and shadow control bits */
584 probe_xeon_phi_r3mwait(c);
586 msr = this_cpu_read(msr_misc_features_shadow);
587 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
590 static void split_lock_init(void);
592 static void init_intel(struct cpuinfo_x86 *c)
596 intel_workarounds(c);
599 * Detect the extended topology information if available. This
600 * will reinitialise the initial_apicid which will be used
601 * in init_intel_cacheinfo()
603 detect_extended_topology(c);
605 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
607 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
610 detect_num_cpu_cores(c);
616 init_intel_cacheinfo(c);
618 if (c->cpuid_level > 9) {
619 unsigned eax = cpuid_eax(10);
620 /* Check for version and the number of counters */
621 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
622 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
625 if (cpu_has(c, X86_FEATURE_XMM2))
626 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
628 if (boot_cpu_has(X86_FEATURE_DS)) {
631 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
633 set_cpu_cap(c, X86_FEATURE_BTS);
635 set_cpu_cap(c, X86_FEATURE_PEBS);
638 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
639 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
640 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
642 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
643 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
644 set_cpu_bug(c, X86_BUG_MONITOR);
648 c->x86_cache_alignment = c->x86_clflush_size * 2;
650 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
653 * Names for the Pentium II/Celeron processors
654 * detectable only by also checking the cache size.
655 * Dixon is NOT a Celeron.
658 unsigned int l2 = c->x86_cache_size;
661 switch (c->x86_model) {
664 p = "Celeron (Covington)";
666 p = "Mobile Pentium II (Dixon)";
671 p = "Celeron (Mendocino)";
672 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
678 p = "Celeron (Coppermine)";
683 strcpy(c->x86_model_id, p);
687 set_cpu_cap(c, X86_FEATURE_P4);
689 set_cpu_cap(c, X86_FEATURE_P3);
692 /* Work around errata */
695 init_ia32_feat_ctl(c);
697 if (cpu_has(c, X86_FEATURE_TME))
700 init_intel_misc_features(c);
702 if (tsx_ctrl_state == TSX_CTRL_ENABLE)
704 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
711 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
714 * Intel PIII Tualatin. This comes in two flavours.
715 * One has 256kb of cache, the other 512. We have no way
716 * to determine which, so we use a boottime override
717 * for the 512kb model, and assume 256 otherwise.
719 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
723 * Intel Quark SoC X1000 contains a 4-way set associative
724 * 16K cache with a 16 byte cache line and 256 lines per tag
726 if ((c->x86 == 5) && (c->x86_model == 9))
732 #define TLB_INST_4K 0x01
733 #define TLB_INST_4M 0x02
734 #define TLB_INST_2M_4M 0x03
736 #define TLB_INST_ALL 0x05
737 #define TLB_INST_1G 0x06
739 #define TLB_DATA_4K 0x11
740 #define TLB_DATA_4M 0x12
741 #define TLB_DATA_2M_4M 0x13
742 #define TLB_DATA_4K_4M 0x14
744 #define TLB_DATA_1G 0x16
746 #define TLB_DATA0_4K 0x21
747 #define TLB_DATA0_4M 0x22
748 #define TLB_DATA0_2M_4M 0x23
751 #define STLB_4K_2M 0x42
753 static const struct _tlb_table intel_tlb_table[] = {
754 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
755 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
756 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
757 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
758 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
759 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
760 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
761 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
762 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
763 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
764 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
765 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
766 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
767 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
768 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
769 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
770 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
771 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
772 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
773 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
774 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
775 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
776 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
777 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
778 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
779 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
780 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
781 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
782 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
783 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
784 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
785 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
786 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
787 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
788 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
789 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
793 static void intel_tlb_lookup(const unsigned char desc)
799 /* look up this descriptor in the table */
800 for (k = 0; intel_tlb_table[k].descriptor != desc &&
801 intel_tlb_table[k].descriptor != 0; k++)
804 if (intel_tlb_table[k].tlb_type == 0)
807 switch (intel_tlb_table[k].tlb_type) {
809 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
810 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
811 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
812 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
815 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
816 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
817 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
818 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
819 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
820 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
821 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
822 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
823 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
825 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
831 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
832 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
833 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
834 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
837 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
838 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
841 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
842 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
845 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
846 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
847 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
848 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
852 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
853 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
857 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
858 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
861 case TLB_DATA0_2M_4M:
862 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
863 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
864 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
865 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
868 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
869 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
870 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
871 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
874 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
875 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
880 static void intel_detect_tlb(struct cpuinfo_x86 *c)
883 unsigned int regs[4];
884 unsigned char *desc = (unsigned char *)regs;
886 if (c->cpuid_level < 2)
889 /* Number of times to iterate */
890 n = cpuid_eax(2) & 0xFF;
892 for (i = 0 ; i < n ; i++) {
893 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
895 /* If bit 31 is set, this is an unknown format */
896 for (j = 0 ; j < 3 ; j++)
897 if (regs[j] & (1 << 31))
900 /* Byte 0 is level count, not a descriptor */
901 for (j = 1 ; j < 16 ; j++)
902 intel_tlb_lookup(desc[j]);
906 static const struct cpu_dev intel_cpu_dev = {
908 .c_ident = { "GenuineIntel" },
911 { .family = 4, .model_names =
913 [0] = "486 DX-25/33",
924 { .family = 5, .model_names =
926 [0] = "Pentium 60/66 A-step",
927 [1] = "Pentium 60/66",
928 [2] = "Pentium 75 - 200",
929 [3] = "OverDrive PODP5V83",
931 [7] = "Mobile Pentium 75 - 200",
932 [8] = "Mobile Pentium MMX",
933 [9] = "Quark SoC X1000",
936 { .family = 6, .model_names =
938 [0] = "Pentium Pro A-step",
940 [3] = "Pentium II (Klamath)",
941 [4] = "Pentium II (Deschutes)",
942 [5] = "Pentium II (Deschutes)",
943 [6] = "Mobile Pentium II",
944 [7] = "Pentium III (Katmai)",
945 [8] = "Pentium III (Coppermine)",
946 [10] = "Pentium III (Cascades)",
947 [11] = "Pentium III (Tualatin)",
950 { .family = 15, .model_names =
952 [0] = "Pentium 4 (Unknown)",
953 [1] = "Pentium 4 (Willamette)",
954 [2] = "Pentium 4 (Northwood)",
955 [4] = "Pentium 4 (Foster)",
956 [5] = "Pentium 4 (Foster)",
960 .legacy_cache_size = intel_size_cache,
962 .c_detect_tlb = intel_detect_tlb,
963 .c_early_init = early_init_intel,
964 .c_init = init_intel,
965 .c_x86_vendor = X86_VENDOR_INTEL,
968 cpu_dev_register(intel_cpu_dev);
971 #define pr_fmt(fmt) "x86/split lock detection: " fmt
973 static const struct {
975 enum split_lock_detect_state state;
976 } sld_options[] __initconst = {
978 { "warn", sld_warn },
979 { "fatal", sld_fatal },
982 static inline bool match_option(const char *arg, int arglen, const char *opt)
984 int len = strlen(opt);
986 return len == arglen && !strncmp(arg, opt, len);
989 static bool split_lock_verify_msr(bool on)
993 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
996 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
998 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
999 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1001 rdmsrl(MSR_TEST_CTRL, tmp);
1005 static void __init split_lock_setup(void)
1007 enum split_lock_detect_state state = sld_warn;
1011 if (!split_lock_verify_msr(false)) {
1012 pr_info("MSR access failed: Disabled\n");
1016 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1019 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1020 if (match_option(arg, ret, sld_options[i].option)) {
1021 state = sld_options[i].state;
1029 pr_info("disabled\n");
1032 pr_info("warning about user-space split_locks\n");
1035 pr_info("sending SIGBUS on user-space split_locks\n");
1039 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1041 if (!split_lock_verify_msr(true)) {
1042 pr_info("MSR access failed: Disabled\n");
1047 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1051 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1052 * is not implemented as one thread could undo the setting of the other
1053 * thread immediately after dropping the lock anyway.
1055 static void sld_update_msr(bool on)
1057 u64 test_ctrl_val = msr_test_ctrl_cache;
1060 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1062 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1065 static void split_lock_init(void)
1067 split_lock_verify_msr(sld_state != sld_off);
1070 static void split_lock_warn(unsigned long ip)
1072 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1073 current->comm, current->pid, ip);
1076 * Disable the split lock detection for this task so it can make
1077 * progress and set TIF_SLD so the detection is re-enabled via
1078 * switch_to_sld() when the task is scheduled out.
1080 sld_update_msr(false);
1081 set_tsk_thread_flag(current, TIF_SLD);
1084 bool handle_guest_split_lock(unsigned long ip)
1086 if (sld_state == sld_warn) {
1087 split_lock_warn(ip);
1091 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1092 current->comm, current->pid,
1093 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1095 current->thread.error_code = 0;
1096 current->thread.trap_nr = X86_TRAP_AC;
1097 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1100 EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1102 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1104 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1106 split_lock_warn(regs->ip);
1111 * This function is called only when switching between tasks with
1112 * different split-lock detection modes. It sets the MSR for the
1113 * mode of the new task. This is right most of the time, but since
1114 * the MSR is shared by hyperthreads on a physical core there can
1115 * be glitches when the two threads need different modes.
1117 void switch_to_sld(unsigned long tifn)
1119 sld_update_msr(!(tifn & _TIF_SLD));
1122 #define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY}
1125 * The following processors have the split lock detection feature. But
1126 * since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot
1127 * be enumerated. Enable it by family and model matching on these
1130 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1131 SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X),
1132 SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L),
1136 void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
1138 u64 ia32_core_caps = 0;
1140 if (c->x86_vendor != X86_VENDOR_INTEL)
1142 if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) {
1143 /* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */
1144 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1145 } else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1146 /* Enumerate split lock detection by family and model. */
1147 if (x86_match_cpu(split_lock_cpu_ids))
1148 ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT;
1151 if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)