1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/pgtable.h>
5 #include <linux/string.h>
6 #include <linux/bitops.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/semaphore.h>
11 #include <linux/thread_info.h>
12 #include <linux/init.h>
13 #include <linux/uaccess.h>
14 #include <linux/workqueue.h>
15 #include <linux/delay.h>
16 #include <linux/cpuhotplug.h>
18 #include <asm/cpufeature.h>
22 #include <asm/intel-family.h>
23 #include <asm/microcode_intel.h>
24 #include <asm/hwcap2.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/cmdline.h>
28 #include <asm/traps.h>
29 #include <asm/resctrl.h>
31 #include <asm/thermal.h>
34 #include <linux/topology.h>
39 #ifdef CONFIG_X86_LOCAL_APIC
40 #include <asm/mpspec.h>
44 enum split_lock_detect_state {
52 * Default to sld_off because most systems do not support split lock detection.
53 * sld_state_setup() will switch this to sld_warn on systems that support
54 * split lock/bus lock detect, unless there is a command line override.
56 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
57 static u64 msr_test_ctrl_cache __ro_after_init;
60 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
61 * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
62 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
64 static bool cpu_model_supports_sld __ro_after_init;
67 * Processors which have self-snooping capability can handle conflicting
68 * memory type across CPUs by snooping its own cache. However, there exists
69 * CPU models in which having conflicting memory types still leads to
70 * unpredictable behavior, machine check errors, or hangs. Clear this
71 * feature to prevent its use on machines with known erratas.
73 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
75 switch (c->x86_model) {
76 case INTEL_FAM6_CORE_YONAH:
77 case INTEL_FAM6_CORE2_MEROM:
78 case INTEL_FAM6_CORE2_MEROM_L:
79 case INTEL_FAM6_CORE2_PENRYN:
80 case INTEL_FAM6_CORE2_DUNNINGTON:
81 case INTEL_FAM6_NEHALEM:
82 case INTEL_FAM6_NEHALEM_G:
83 case INTEL_FAM6_NEHALEM_EP:
84 case INTEL_FAM6_NEHALEM_EX:
85 case INTEL_FAM6_WESTMERE:
86 case INTEL_FAM6_WESTMERE_EP:
87 case INTEL_FAM6_SANDYBRIDGE:
88 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
92 static bool ring3mwait_disabled __read_mostly;
94 static int __init ring3mwait_disable(char *__unused)
96 ring3mwait_disabled = true;
99 __setup("ring3mwait=disable", ring3mwait_disable);
101 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
104 * Ring 3 MONITOR/MWAIT feature cannot be detected without
105 * cpu model and family comparison.
109 switch (c->x86_model) {
110 case INTEL_FAM6_XEON_PHI_KNL:
111 case INTEL_FAM6_XEON_PHI_KNM:
117 if (ring3mwait_disabled)
120 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
121 this_cpu_or(msr_misc_features_shadow,
122 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
124 if (c == &boot_cpu_data)
125 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
129 * Early microcode releases for the Spectre v2 mitigation were broken.
130 * Information taken from;
131 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
132 * - https://kb.vmware.com/s/article/52345
133 * - Microcode revisions observed in the wild
134 * - Release note from 20180108 microcode release
136 struct sku_microcode {
141 static const struct sku_microcode spectre_bad_microcodes[] = {
142 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
143 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
144 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
145 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
146 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
147 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
148 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
149 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
150 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
151 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
152 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
153 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
154 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
155 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
156 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
157 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
158 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
159 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
160 /* Observed in the wild */
161 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
162 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
165 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
170 * We know that the hypervisor lie to us on the microcode version so
171 * we may as well hope that it is running the correct version.
173 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
179 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
180 if (c->x86_model == spectre_bad_microcodes[i].model &&
181 c->x86_stepping == spectre_bad_microcodes[i].stepping)
182 return (c->microcode <= spectre_bad_microcodes[i].microcode);
187 int intel_cpu_collect_info(struct ucode_cpu_info *uci)
190 unsigned int family, model;
191 struct cpu_signature csig = { 0 };
192 unsigned int eax, ebx, ecx, edx;
194 memset(uci, 0, sizeof(*uci));
198 native_cpuid(&eax, &ebx, &ecx, &edx);
201 family = x86_family(eax);
202 model = x86_model(eax);
204 if (model >= 5 || family > 6) {
205 /* get processor flags from MSR 0x17 */
206 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
207 csig.pf = 1 << ((val[1] >> 18) & 7);
210 csig.rev = intel_get_microcode_revision();
216 EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
219 * Returns 1 if update has been found, 0 otherwise.
221 int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
223 struct microcode_header_intel *mc_hdr = mc;
224 struct extended_sigtable *ext_hdr;
225 struct extended_signature *ext_sig;
228 if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
231 /* Look for ext. headers: */
232 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
235 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
236 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
238 for (i = 0; i < ext_hdr->count; i++) {
239 if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
245 EXPORT_SYMBOL_GPL(intel_find_matching_signature);
248 * intel_microcode_sanity_check() - Sanity check microcode file.
249 * @mc: Pointer to the microcode file contents.
250 * @print_err: Display failure reason if true, silent if false.
251 * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
252 * Validate if the microcode header type matches with the type
255 * Validate certain header fields and verify if computed checksum matches
256 * with the one specified in the header.
258 * Return: 0 if the file passes all the checks, -EINVAL if any of the checks
261 int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
263 unsigned long total_size, data_size, ext_table_size;
264 struct microcode_header_intel *mc_header = mc;
265 struct extended_sigtable *ext_header = NULL;
266 u32 sum, orig_sum, ext_sigcount = 0, i;
267 struct extended_signature *ext_sig;
269 total_size = get_totalsize(mc_header);
270 data_size = get_datasize(mc_header);
272 if (data_size + MC_HEADER_SIZE > total_size) {
274 pr_err("Error: bad microcode data file size.\n");
278 if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {
280 pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
285 ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
286 if (ext_table_size) {
287 u32 ext_table_sum = 0;
290 if (ext_table_size < EXT_HEADER_SIZE ||
291 ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
293 pr_err("Error: truncated extended signature table.\n");
297 ext_header = mc + MC_HEADER_SIZE + data_size;
298 if (ext_table_size != exttable_size(ext_header)) {
300 pr_err("Error: extended signature table size mismatch.\n");
304 ext_sigcount = ext_header->count;
307 * Check extended table checksum: the sum of all dwords that
308 * comprise a valid table must be 0.
310 ext_tablep = (u32 *)ext_header;
312 i = ext_table_size / sizeof(u32);
314 ext_table_sum += ext_tablep[i];
318 pr_warn("Bad extended signature table checksum, aborting.\n");
324 * Calculate the checksum of update data and header. The checksum of
325 * valid update data and header including the extended signature table
329 i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
331 orig_sum += ((u32 *)mc)[i];
335 pr_err("Bad microcode data checksum, aborting.\n");
343 * Check extended signature checksum: 0 => valid.
345 for (i = 0; i < ext_sigcount; i++) {
346 ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
347 EXT_SIGNATURE_SIZE * i;
349 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
350 (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
353 pr_err("Bad extended signature checksum, aborting.\n");
359 EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
361 static void early_init_intel(struct cpuinfo_x86 *c)
365 /* Unmask CPUID levels if masked: */
366 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
367 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
368 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
369 c->cpuid_level = cpuid_eax(0);
374 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
375 (c->x86 == 0x6 && c->x86_model >= 0x0e))
376 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
378 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
379 c->microcode = intel_get_microcode_revision();
381 /* Now if any of them are set, check the blacklist and clear the lot */
382 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
383 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
384 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
385 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
386 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
387 setup_clear_cpu_cap(X86_FEATURE_IBRS);
388 setup_clear_cpu_cap(X86_FEATURE_IBPB);
389 setup_clear_cpu_cap(X86_FEATURE_STIBP);
390 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
391 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
392 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
393 setup_clear_cpu_cap(X86_FEATURE_SSBD);
394 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
398 * Atom erratum AAE44/AAF40/AAG38/AAH41:
400 * A race condition between speculative fetches and invalidating
401 * a large page. This is worked around in microcode, but we
402 * need the microcode to have already been loaded... so if it is
403 * not, recommend a BIOS update and disable large pages.
405 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
406 c->microcode < 0x20e) {
407 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
408 clear_cpu_cap(c, X86_FEATURE_PSE);
412 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
414 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
415 if (c->x86 == 15 && c->x86_cache_alignment == 64)
416 c->x86_cache_alignment = 128;
419 /* CPUID workaround for 0F33/0F34 CPU */
420 if (c->x86 == 0xF && c->x86_model == 0x3
421 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
422 c->x86_phys_bits = 36;
425 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
426 * with P/T states and does not stop in deep C-states.
428 * It is also reliable across cores and sockets. (but not across
429 * cabinets - we turn it off in that case explicitly.)
431 if (c->x86_power & (1 << 8)) {
432 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
433 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
436 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
438 switch (c->x86_model) {
439 case INTEL_FAM6_ATOM_SALTWELL_MID:
440 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
441 case INTEL_FAM6_ATOM_SILVERMONT_MID:
442 case INTEL_FAM6_ATOM_AIRMONT_NP:
443 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
451 * There is a known erratum on Pentium III and Core Solo
453 * " Page with PAT set to WC while associated MTRR is UC
454 * may consolidate to UC "
455 * Because of this erratum, it is better to stick with
456 * setting WC in MTRR rather than using PAT on these CPUs.
458 * Enable PAT WC only on P4, Core 2 or later CPUs.
460 if (c->x86 == 6 && c->x86_model < 15)
461 clear_cpu_cap(c, X86_FEATURE_PAT);
464 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
465 * clear the fast string and enhanced fast string CPU capabilities.
467 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
468 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
469 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
470 pr_info("Disabled fast string operations\n");
471 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
472 setup_clear_cpu_cap(X86_FEATURE_ERMS);
477 * Intel Quark Core DevMan_001.pdf section 6.4.11
478 * "The operating system also is required to invalidate (i.e., flush)
479 * the TLB when any changes are made to any of the page table entries.
480 * The operating system must reload CR3 to cause the TLB to be flushed"
482 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
483 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
486 if (c->x86 == 5 && c->x86_model == 9) {
487 pr_info("Disabling PGE capability bit\n");
488 setup_clear_cpu_cap(X86_FEATURE_PGE);
491 if (c->cpuid_level >= 0x00000001) {
492 u32 eax, ebx, ecx, edx;
494 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
496 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
497 * apicids which are reserved per package. Store the resulting
498 * shift value for the package management code.
500 if (edx & (1U << 28))
501 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
504 check_memory_type_self_snoop_errata(c);
507 * Get the number of SMT siblings early from the extended topology
508 * leaf, if available. Otherwise try the legacy SMT detection.
510 if (detect_extended_topology_early(c) < 0)
514 static void bsp_init_intel(struct cpuinfo_x86 *c)
516 resctrl_cpu_detect(c);
521 * Early probe support logic for ppro memory erratum #50
523 * This is called before we do cpu ident work
526 int ppro_with_ram_bug(void)
528 /* Uses data from early_cpu_detect now */
529 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
530 boot_cpu_data.x86 == 6 &&
531 boot_cpu_data.x86_model == 1 &&
532 boot_cpu_data.x86_stepping < 8) {
533 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
539 static void intel_smp_check(struct cpuinfo_x86 *c)
541 /* calling is from identify_secondary_cpu() ? */
546 * Mask B, Pentium, but not Pentium MMX
549 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
552 * Remember we have B step Pentia with bugs
554 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
555 "with B stepping processors.\n");
560 static int __init forcepae_setup(char *__unused)
565 __setup("forcepae", forcepae_setup);
567 static void intel_workarounds(struct cpuinfo_x86 *c)
569 #ifdef CONFIG_X86_F00F_BUG
571 * All models of Pentium and Pentium with MMX technology CPUs
572 * have the F0 0F bug, which lets nonprivileged users lock up the
573 * system. Announce that the fault handler will be checking for it.
574 * The Quark is also family 5, but does not have the same bug.
576 clear_cpu_bug(c, X86_BUG_F00F);
577 if (c->x86 == 5 && c->x86_model < 9) {
578 static int f00f_workaround_enabled;
580 set_cpu_bug(c, X86_BUG_F00F);
581 if (!f00f_workaround_enabled) {
582 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
583 f00f_workaround_enabled = 1;
589 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
592 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
593 clear_cpu_cap(c, X86_FEATURE_SEP);
596 * PAE CPUID issue: many Pentium M report no PAE but may have a
597 * functionally usable PAE implementation.
598 * Forcefully enable PAE if kernel parameter "forcepae" is present.
601 pr_warn("PAE forced!\n");
602 set_cpu_cap(c, X86_FEATURE_PAE);
603 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
607 * P4 Xeon erratum 037 workaround.
608 * Hardware prefetcher may cause stale data to be loaded into the cache.
610 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
611 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
612 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
613 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
614 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
619 * See if we have a good local APIC by checking for buggy Pentia,
620 * i.e. all B steppings and the C2 stepping of P54C when using their
621 * integrated APIC (see 11AP erratum in "Pentium Processor
622 * Specification Update").
624 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
625 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
626 set_cpu_bug(c, X86_BUG_11AP);
629 #ifdef CONFIG_X86_INTEL_USERCOPY
631 * Set up the preferred alignment for movsl bulk memory moves
634 case 4: /* 486: untested */
636 case 5: /* Old Pentia: untested */
638 case 6: /* PII/PIII only like movsl with 8-byte alignment */
641 case 15: /* P4 is OK down to 8-byte alignment */
650 static void intel_workarounds(struct cpuinfo_x86 *c)
655 static void srat_detect_node(struct cpuinfo_x86 *c)
659 int cpu = smp_processor_id();
661 /* Don't do the funky fallback heuristics the AMD version employs
663 node = numa_cpu_node(cpu);
664 if (node == NUMA_NO_NODE || !node_online(node)) {
665 /* reuse the value from init_cpu_to_node() */
666 node = cpu_to_node(cpu);
668 numa_set_node(cpu, node);
672 #define MSR_IA32_TME_ACTIVATE 0x982
674 /* Helpers to access TME_ACTIVATE MSR */
675 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
676 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
678 #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
679 #define TME_ACTIVATE_POLICY_AES_XTS_128 0
681 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
683 #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
684 #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
686 /* Values for mktme_status (SW only construct) */
687 #define MKTME_ENABLED 0
688 #define MKTME_DISABLED 1
689 #define MKTME_UNINITIALIZED 2
690 static int mktme_status = MKTME_UNINITIALIZED;
692 static void detect_tme(struct cpuinfo_x86 *c)
694 u64 tme_activate, tme_policy, tme_crypto_algs;
695 int keyid_bits = 0, nr_keyids = 0;
696 static u64 tme_activate_cpu0 = 0;
698 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
700 if (mktme_status != MKTME_UNINITIALIZED) {
701 if (tme_activate != tme_activate_cpu0) {
703 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
704 pr_err_once("x86/tme: MKTME is not usable\n");
705 mktme_status = MKTME_DISABLED;
707 /* Proceed. We may need to exclude bits from x86_phys_bits. */
710 tme_activate_cpu0 = tme_activate;
713 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
714 pr_info_once("x86/tme: not enabled by BIOS\n");
715 mktme_status = MKTME_DISABLED;
719 if (mktme_status != MKTME_UNINITIALIZED)
720 goto detect_keyid_bits;
722 pr_info("x86/tme: enabled by BIOS\n");
724 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
725 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
726 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
728 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
729 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
730 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
732 mktme_status = MKTME_DISABLED;
735 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
736 nr_keyids = (1UL << keyid_bits) - 1;
738 pr_info_once("x86/mktme: enabled by BIOS\n");
739 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
741 pr_info_once("x86/mktme: disabled by BIOS\n");
744 if (mktme_status == MKTME_UNINITIALIZED) {
745 /* MKTME is usable */
746 mktme_status = MKTME_ENABLED;
750 * KeyID bits effectively lower the number of physical address
751 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
753 c->x86_phys_bits -= keyid_bits;
756 static void init_cpuid_fault(struct cpuinfo_x86 *c)
760 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
761 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
762 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
766 static void init_intel_misc_features(struct cpuinfo_x86 *c)
770 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
773 /* Clear all MISC features */
774 this_cpu_write(msr_misc_features_shadow, 0);
776 /* Check features and update capabilities and shadow control bits */
778 probe_xeon_phi_r3mwait(c);
780 msr = this_cpu_read(msr_misc_features_shadow);
781 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
784 static void split_lock_init(void);
785 static void bus_lock_init(void);
787 static void init_intel(struct cpuinfo_x86 *c)
791 intel_workarounds(c);
794 * Detect the extended topology information if available. This
795 * will reinitialise the initial_apicid which will be used
796 * in init_intel_cacheinfo()
798 detect_extended_topology(c);
800 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
802 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
805 detect_num_cpu_cores(c);
811 init_intel_cacheinfo(c);
813 if (c->cpuid_level > 9) {
814 unsigned eax = cpuid_eax(10);
815 /* Check for version and the number of counters */
816 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
817 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
820 if (cpu_has(c, X86_FEATURE_XMM2))
821 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
823 if (boot_cpu_has(X86_FEATURE_DS)) {
826 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
827 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
828 set_cpu_cap(c, X86_FEATURE_BTS);
829 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
830 set_cpu_cap(c, X86_FEATURE_PEBS);
833 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
834 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
835 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
837 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
838 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
839 set_cpu_bug(c, X86_BUG_MONITOR);
843 c->x86_cache_alignment = c->x86_clflush_size * 2;
845 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
848 * Names for the Pentium II/Celeron processors
849 * detectable only by also checking the cache size.
850 * Dixon is NOT a Celeron.
853 unsigned int l2 = c->x86_cache_size;
856 switch (c->x86_model) {
859 p = "Celeron (Covington)";
861 p = "Mobile Pentium II (Dixon)";
866 p = "Celeron (Mendocino)";
867 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
873 p = "Celeron (Coppermine)";
878 strcpy(c->x86_model_id, p);
882 set_cpu_cap(c, X86_FEATURE_P4);
884 set_cpu_cap(c, X86_FEATURE_P3);
887 /* Work around errata */
890 init_ia32_feat_ctl(c);
892 if (cpu_has(c, X86_FEATURE_TME))
895 init_intel_misc_features(c);
900 intel_init_thermal(c);
904 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
907 * Intel PIII Tualatin. This comes in two flavours.
908 * One has 256kb of cache, the other 512. We have no way
909 * to determine which, so we use a boottime override
910 * for the 512kb model, and assume 256 otherwise.
912 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
916 * Intel Quark SoC X1000 contains a 4-way set associative
917 * 16K cache with a 16 byte cache line and 256 lines per tag
919 if ((c->x86 == 5) && (c->x86_model == 9))
925 #define TLB_INST_4K 0x01
926 #define TLB_INST_4M 0x02
927 #define TLB_INST_2M_4M 0x03
929 #define TLB_INST_ALL 0x05
930 #define TLB_INST_1G 0x06
932 #define TLB_DATA_4K 0x11
933 #define TLB_DATA_4M 0x12
934 #define TLB_DATA_2M_4M 0x13
935 #define TLB_DATA_4K_4M 0x14
937 #define TLB_DATA_1G 0x16
939 #define TLB_DATA0_4K 0x21
940 #define TLB_DATA0_4M 0x22
941 #define TLB_DATA0_2M_4M 0x23
944 #define STLB_4K_2M 0x42
946 static const struct _tlb_table intel_tlb_table[] = {
947 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
948 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
949 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
950 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
951 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
952 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
953 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
954 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
955 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
956 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
957 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
958 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
959 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
960 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
961 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
962 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
963 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
964 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
965 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
966 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
967 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
968 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
969 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
970 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
971 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
972 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
973 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
974 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
975 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
976 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
977 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
978 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
979 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
980 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
981 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
982 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
986 static void intel_tlb_lookup(const unsigned char desc)
992 /* look up this descriptor in the table */
993 for (k = 0; intel_tlb_table[k].descriptor != desc &&
994 intel_tlb_table[k].descriptor != 0; k++)
997 if (intel_tlb_table[k].tlb_type == 0)
1000 switch (intel_tlb_table[k].tlb_type) {
1002 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
1003 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
1004 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
1005 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
1008 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
1009 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
1010 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
1011 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
1012 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
1013 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
1014 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
1015 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
1016 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
1017 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
1018 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
1019 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
1022 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
1023 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
1024 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
1025 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
1026 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
1027 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
1030 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
1031 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
1034 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
1035 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
1037 case TLB_INST_2M_4M:
1038 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
1039 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
1040 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
1041 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
1045 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
1046 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
1050 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
1051 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
1053 case TLB_DATA_2M_4M:
1054 case TLB_DATA0_2M_4M:
1055 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
1056 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
1057 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
1058 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
1060 case TLB_DATA_4K_4M:
1061 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
1062 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
1063 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
1064 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
1067 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
1068 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
1073 static void intel_detect_tlb(struct cpuinfo_x86 *c)
1076 unsigned int regs[4];
1077 unsigned char *desc = (unsigned char *)regs;
1079 if (c->cpuid_level < 2)
1082 /* Number of times to iterate */
1083 n = cpuid_eax(2) & 0xFF;
1085 for (i = 0 ; i < n ; i++) {
1086 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
1088 /* If bit 31 is set, this is an unknown format */
1089 for (j = 0 ; j < 3 ; j++)
1090 if (regs[j] & (1 << 31))
1093 /* Byte 0 is level count, not a descriptor */
1094 for (j = 1 ; j < 16 ; j++)
1095 intel_tlb_lookup(desc[j]);
1099 static const struct cpu_dev intel_cpu_dev = {
1100 .c_vendor = "Intel",
1101 .c_ident = { "GenuineIntel" },
1102 #ifdef CONFIG_X86_32
1104 { .family = 4, .model_names =
1106 [0] = "486 DX-25/33",
1112 [7] = "486 DX/2-WB",
1117 { .family = 5, .model_names =
1119 [0] = "Pentium 60/66 A-step",
1120 [1] = "Pentium 60/66",
1121 [2] = "Pentium 75 - 200",
1122 [3] = "OverDrive PODP5V83",
1123 [4] = "Pentium MMX",
1124 [7] = "Mobile Pentium 75 - 200",
1125 [8] = "Mobile Pentium MMX",
1126 [9] = "Quark SoC X1000",
1129 { .family = 6, .model_names =
1131 [0] = "Pentium Pro A-step",
1132 [1] = "Pentium Pro",
1133 [3] = "Pentium II (Klamath)",
1134 [4] = "Pentium II (Deschutes)",
1135 [5] = "Pentium II (Deschutes)",
1136 [6] = "Mobile Pentium II",
1137 [7] = "Pentium III (Katmai)",
1138 [8] = "Pentium III (Coppermine)",
1139 [10] = "Pentium III (Cascades)",
1140 [11] = "Pentium III (Tualatin)",
1143 { .family = 15, .model_names =
1145 [0] = "Pentium 4 (Unknown)",
1146 [1] = "Pentium 4 (Willamette)",
1147 [2] = "Pentium 4 (Northwood)",
1148 [4] = "Pentium 4 (Foster)",
1149 [5] = "Pentium 4 (Foster)",
1153 .legacy_cache_size = intel_size_cache,
1155 .c_detect_tlb = intel_detect_tlb,
1156 .c_early_init = early_init_intel,
1157 .c_bsp_init = bsp_init_intel,
1158 .c_init = init_intel,
1159 .c_x86_vendor = X86_VENDOR_INTEL,
1162 cpu_dev_register(intel_cpu_dev);
1165 #define pr_fmt(fmt) "x86/split lock detection: " fmt
1167 static const struct {
1169 enum split_lock_detect_state state;
1170 } sld_options[] __initconst = {
1172 { "warn", sld_warn },
1173 { "fatal", sld_fatal },
1174 { "ratelimit:", sld_ratelimit },
1177 static struct ratelimit_state bld_ratelimit;
1179 static unsigned int sysctl_sld_mitigate = 1;
1180 static DEFINE_SEMAPHORE(buslock_sem, 1);
1182 #ifdef CONFIG_PROC_SYSCTL
1183 static struct ctl_table sld_sysctls[] = {
1185 .procname = "split_lock_mitigate",
1186 .data = &sysctl_sld_mitigate,
1187 .maxlen = sizeof(unsigned int),
1189 .proc_handler = proc_douintvec_minmax,
1190 .extra1 = SYSCTL_ZERO,
1191 .extra2 = SYSCTL_ONE,
1196 static int __init sld_mitigate_sysctl_init(void)
1198 register_sysctl_init("kernel", sld_sysctls);
1202 late_initcall(sld_mitigate_sysctl_init);
1205 static inline bool match_option(const char *arg, int arglen, const char *opt)
1207 int len = strlen(opt), ratelimit;
1209 if (strncmp(arg, opt, len))
1213 * Min ratelimit is 1 bus lock/sec.
1214 * Max ratelimit is 1000 bus locks/sec.
1216 if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
1217 ratelimit > 0 && ratelimit <= 1000) {
1218 ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
1219 ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
1223 return len == arglen;
1226 static bool split_lock_verify_msr(bool on)
1230 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1233 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1235 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1236 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1238 rdmsrl(MSR_TEST_CTRL, tmp);
1242 static void __init sld_state_setup(void)
1244 enum split_lock_detect_state state = sld_warn;
1248 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1249 !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1252 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1255 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1256 if (match_option(arg, ret, sld_options[i].option)) {
1257 state = sld_options[i].state;
1265 static void __init __split_lock_setup(void)
1267 if (!split_lock_verify_msr(false)) {
1268 pr_info("MSR access failed: Disabled\n");
1272 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1274 if (!split_lock_verify_msr(true)) {
1275 pr_info("MSR access failed: Disabled\n");
1279 /* Restore the MSR to its cached value. */
1280 wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1282 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1286 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1287 * is not implemented as one thread could undo the setting of the other
1288 * thread immediately after dropping the lock anyway.
1290 static void sld_update_msr(bool on)
1292 u64 test_ctrl_val = msr_test_ctrl_cache;
1295 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1297 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1300 static void split_lock_init(void)
1303 * #DB for bus lock handles ratelimit and #AC for split lock is
1306 if (sld_state == sld_ratelimit) {
1307 split_lock_verify_msr(false);
1311 if (cpu_model_supports_sld)
1312 split_lock_verify_msr(sld_state != sld_off);
1315 static void __split_lock_reenable_unlock(struct work_struct *work)
1317 sld_update_msr(true);
1321 static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
1323 static void __split_lock_reenable(struct work_struct *work)
1325 sld_update_msr(true);
1327 static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
1330 * If a CPU goes offline with pending delayed work to re-enable split lock
1331 * detection then the delayed work will be executed on some other CPU. That
1332 * handles releasing the buslock_sem, but because it executes on a
1333 * different CPU probably won't re-enable split lock detection. This is a
1334 * problem on HT systems since the sibling CPU on the same core may then be
1335 * left running with split lock detection disabled.
1337 * Unconditionally re-enable detection here.
1339 static int splitlock_cpu_offline(unsigned int cpu)
1341 sld_update_msr(true);
1346 static void split_lock_warn(unsigned long ip)
1348 struct delayed_work *work;
1351 if (!current->reported_split_lock)
1352 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1353 current->comm, current->pid, ip);
1354 current->reported_split_lock = 1;
1356 if (sysctl_sld_mitigate) {
1359 * sleep 10ms before trying to execute split lock.
1361 if (msleep_interruptible(10) > 0)
1365 * only allow one buslocked disabled core at a time.
1367 if (down_interruptible(&buslock_sem) == -EINTR)
1369 work = &sl_reenable_unlock;
1371 work = &sl_reenable;
1375 schedule_delayed_work_on(cpu, work, 2);
1377 /* Disable split lock detection on this CPU to make progress */
1378 sld_update_msr(false);
1382 bool handle_guest_split_lock(unsigned long ip)
1384 if (sld_state == sld_warn) {
1385 split_lock_warn(ip);
1389 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1390 current->comm, current->pid,
1391 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1393 current->thread.error_code = 0;
1394 current->thread.trap_nr = X86_TRAP_AC;
1395 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1398 EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1400 static void bus_lock_init(void)
1404 if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1407 rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
1409 if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1410 (sld_state == sld_warn || sld_state == sld_fatal)) ||
1411 sld_state == sld_off) {
1413 * Warn and fatal are handled by #AC for split lock if #AC for
1414 * split lock is supported.
1416 val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
1418 val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
1421 wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
1424 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1426 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1428 split_lock_warn(regs->ip);
1432 void handle_bus_lock(struct pt_regs *regs)
1434 switch (sld_state) {
1438 /* Enforce no more than bld_ratelimit bus locks/sec. */
1439 while (!__ratelimit(&bld_ratelimit))
1441 /* Warn on the bus lock. */
1444 pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
1445 current->comm, current->pid, regs->ip);
1448 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1454 * CPU models that are known to have the per-core split-lock detection
1455 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1457 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1458 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1459 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1460 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1464 static void __init split_lock_setup(struct cpuinfo_x86 *c)
1466 const struct x86_cpu_id *m;
1469 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1472 /* Check for CPUs that have support but do not enumerate it: */
1473 m = x86_match_cpu(split_lock_cpu_ids);
1477 if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1481 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
1482 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set
1483 * it have split lock detection.
1485 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1486 if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
1489 /* CPU is not in the model list and does not have the MSR bit: */
1493 cpu_model_supports_sld = true;
1494 __split_lock_setup();
1497 static void sld_state_show(void)
1499 if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
1500 !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
1503 switch (sld_state) {
1505 pr_info("disabled\n");
1508 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1509 pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
1510 if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1511 "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
1512 pr_warn("No splitlock CPU offline handler\n");
1513 } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1514 pr_info("#DB: warning on user-space bus_locks\n");
1518 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1519 pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
1520 } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1521 pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
1522 boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
1523 " from non-WB" : "");
1527 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1528 pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
1533 void __init sld_setup(struct cpuinfo_x86 *c)
1535 split_lock_setup(c);
1540 #define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
1543 * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
1545 * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
1546 * a hybrid processor. If the processor is not hybrid, returns 0.
1548 u8 get_this_hybrid_cpu_type(void)
1550 if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
1553 return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;