1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
39 static void __init spectre_v1_select_mitigation(void);
40 static void __init spectre_v2_select_mitigation(void);
41 static void __init ssb_select_mitigation(void);
42 static void __init l1tf_select_mitigation(void);
43 static void __init mds_select_mitigation(void);
44 static void __init md_clear_update_mitigation(void);
45 static void __init md_clear_select_mitigation(void);
46 static void __init taa_select_mitigation(void);
47 static void __init mmio_select_mitigation(void);
48 static void __init srbds_select_mitigation(void);
49 static void __init l1d_flush_select_mitigation(void);
51 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
52 u64 x86_spec_ctrl_base;
53 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
54 static DEFINE_MUTEX(spec_ctrl_mutex);
57 * The vendor and possibly platform specific bits which can be modified in
60 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
63 * AMD specific MSR info for Speculative Store Bypass control.
64 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
66 u64 __ro_after_init x86_amd_ls_cfg_base;
67 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
69 /* Control conditional STIBP in switch_to() */
70 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
71 /* Control conditional IBPB in switch_mm() */
72 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
73 /* Control unconditional IBPB in switch_mm() */
74 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
76 /* Control MDS CPU buffer clear before returning to user space */
77 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
78 EXPORT_SYMBOL_GPL(mds_user_clear);
79 /* Control MDS CPU buffer clear before idling (halt, mwait) */
80 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
81 EXPORT_SYMBOL_GPL(mds_idle_clear);
84 * Controls whether l1d flush based mitigations are enabled,
85 * based on hw features and admin setting via boot parameter
88 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
90 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
91 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
92 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
94 void __init check_bugs(void)
99 * identify_boot_cpu() initialized SMT support information, let the
102 cpu_smt_check_topology();
104 if (!IS_ENABLED(CONFIG_SMP)) {
106 print_cpu_info(&boot_cpu_data);
110 * Read the SPEC_CTRL MSR to account for reserved bits which may
111 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
112 * init code as it is not enumerated and depends on the family.
114 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
115 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
117 /* Allow STIBP in MSR_SPEC_CTRL if supported */
118 if (boot_cpu_has(X86_FEATURE_STIBP))
119 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
121 /* Select the proper CPU mitigations before patching alternatives: */
122 spectre_v1_select_mitigation();
123 spectre_v2_select_mitigation();
124 ssb_select_mitigation();
125 l1tf_select_mitigation();
126 md_clear_select_mitigation();
127 srbds_select_mitigation();
128 l1d_flush_select_mitigation();
134 * Check whether we are able to run this kernel safely on SMP.
136 * - i386 is no longer supported.
137 * - In order to run on anything without a TSC, we need to be
138 * compiled for a i486.
140 if (boot_cpu_data.x86 < 4)
141 panic("Kernel requires i486+ for 'invlpg' and other features");
143 init_utsname()->machine[1] =
144 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
145 alternative_instructions();
147 fpu__init_check_bugs();
148 #else /* CONFIG_X86_64 */
149 alternative_instructions();
152 * Make sure the first 2MB area is not mapped by huge pages
153 * There are typically fixed size MTRRs in there and overlapping
154 * MTRRs into large pages causes slow downs.
156 * Right now we don't do that with gbpages because there seems
157 * very little benefit for that case.
160 set_memory_4k((unsigned long)__va(0), 1);
165 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
167 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
168 struct thread_info *ti = current_thread_info();
170 /* Is MSR_SPEC_CTRL implemented ? */
171 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
173 * Restrict guest_spec_ctrl to supported values. Clear the
174 * modifiable bits in the host base value and or the
175 * modifiable bits from the guest value.
177 guestval = hostval & ~x86_spec_ctrl_mask;
178 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
180 /* SSBD controlled in MSR_SPEC_CTRL */
181 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
182 static_cpu_has(X86_FEATURE_AMD_SSBD))
183 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
185 /* Conditional STIBP enabled? */
186 if (static_branch_unlikely(&switch_to_cond_stibp))
187 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
189 if (hostval != guestval) {
190 msrval = setguest ? guestval : hostval;
191 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
196 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
197 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
199 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
200 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
204 * If the host has SSBD mitigation enabled, force it in the host's
205 * virtual MSR value. If its not permanently enabled, evaluate
206 * current's TIF_SSBD thread flag.
208 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
209 hostval = SPEC_CTRL_SSBD;
211 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
213 /* Sanitize the guest value */
214 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
216 if (hostval != guestval) {
219 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
220 ssbd_spec_ctrl_to_tif(hostval);
222 speculation_ctrl_update(tif);
225 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
227 static void x86_amd_ssb_disable(void)
229 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
231 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
232 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
233 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
234 wrmsrl(MSR_AMD64_LS_CFG, msrval);
238 #define pr_fmt(fmt) "MDS: " fmt
240 /* Default mitigation for MDS-affected CPUs */
241 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
242 static bool mds_nosmt __ro_after_init = false;
244 static const char * const mds_strings[] = {
245 [MDS_MITIGATION_OFF] = "Vulnerable",
246 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
247 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
250 static void __init mds_select_mitigation(void)
252 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
253 mds_mitigation = MDS_MITIGATION_OFF;
257 if (mds_mitigation == MDS_MITIGATION_FULL) {
258 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
259 mds_mitigation = MDS_MITIGATION_VMWERV;
261 static_branch_enable(&mds_user_clear);
263 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
264 (mds_nosmt || cpu_mitigations_auto_nosmt()))
265 cpu_smt_disable(false);
269 static int __init mds_cmdline(char *str)
271 if (!boot_cpu_has_bug(X86_BUG_MDS))
277 if (!strcmp(str, "off"))
278 mds_mitigation = MDS_MITIGATION_OFF;
279 else if (!strcmp(str, "full"))
280 mds_mitigation = MDS_MITIGATION_FULL;
281 else if (!strcmp(str, "full,nosmt")) {
282 mds_mitigation = MDS_MITIGATION_FULL;
288 early_param("mds", mds_cmdline);
291 #define pr_fmt(fmt) "TAA: " fmt
293 enum taa_mitigations {
295 TAA_MITIGATION_UCODE_NEEDED,
297 TAA_MITIGATION_TSX_DISABLED,
300 /* Default mitigation for TAA-affected CPUs */
301 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
302 static bool taa_nosmt __ro_after_init;
304 static const char * const taa_strings[] = {
305 [TAA_MITIGATION_OFF] = "Vulnerable",
306 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
307 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
308 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
311 static void __init taa_select_mitigation(void)
315 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
316 taa_mitigation = TAA_MITIGATION_OFF;
320 /* TSX previously disabled by tsx=off */
321 if (!boot_cpu_has(X86_FEATURE_RTM)) {
322 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
326 if (cpu_mitigations_off()) {
327 taa_mitigation = TAA_MITIGATION_OFF;
332 * TAA mitigation via VERW is turned off if both
333 * tsx_async_abort=off and mds=off are specified.
335 if (taa_mitigation == TAA_MITIGATION_OFF &&
336 mds_mitigation == MDS_MITIGATION_OFF)
339 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
340 taa_mitigation = TAA_MITIGATION_VERW;
342 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
345 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
346 * A microcode update fixes this behavior to clear CPU buffers. It also
347 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
348 * ARCH_CAP_TSX_CTRL_MSR bit.
350 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
351 * update is required.
353 ia32_cap = x86_read_arch_cap_msr();
354 if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
355 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
356 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
359 * TSX is enabled, select alternate mitigation for TAA which is
360 * the same as MDS. Enable MDS static branch to clear CPU buffers.
362 * For guests that can't determine whether the correct microcode is
363 * present on host, enable the mitigation for UCODE_NEEDED as well.
365 static_branch_enable(&mds_user_clear);
367 if (taa_nosmt || cpu_mitigations_auto_nosmt())
368 cpu_smt_disable(false);
371 static int __init tsx_async_abort_parse_cmdline(char *str)
373 if (!boot_cpu_has_bug(X86_BUG_TAA))
379 if (!strcmp(str, "off")) {
380 taa_mitigation = TAA_MITIGATION_OFF;
381 } else if (!strcmp(str, "full")) {
382 taa_mitigation = TAA_MITIGATION_VERW;
383 } else if (!strcmp(str, "full,nosmt")) {
384 taa_mitigation = TAA_MITIGATION_VERW;
390 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
393 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
395 enum mmio_mitigations {
397 MMIO_MITIGATION_UCODE_NEEDED,
398 MMIO_MITIGATION_VERW,
401 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
402 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
403 static bool mmio_nosmt __ro_after_init = false;
405 static const char * const mmio_strings[] = {
406 [MMIO_MITIGATION_OFF] = "Vulnerable",
407 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
408 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
411 static void __init mmio_select_mitigation(void)
415 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
416 cpu_mitigations_off()) {
417 mmio_mitigation = MMIO_MITIGATION_OFF;
421 if (mmio_mitigation == MMIO_MITIGATION_OFF)
424 ia32_cap = x86_read_arch_cap_msr();
427 * Enable CPU buffer clear mitigation for host and VMM, if also affected
428 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
430 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
431 boot_cpu_has(X86_FEATURE_RTM)))
432 static_branch_enable(&mds_user_clear);
434 static_branch_enable(&mmio_stale_data_clear);
437 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
438 * be propagated to uncore buffers, clearing the Fill buffers on idle
439 * is required irrespective of SMT state.
441 if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
442 static_branch_enable(&mds_idle_clear);
445 * Check if the system has the right microcode.
447 * CPU Fill buffer clear mitigation is enumerated by either an explicit
448 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
451 if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
452 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
453 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
454 !(ia32_cap & ARCH_CAP_MDS_NO)))
455 mmio_mitigation = MMIO_MITIGATION_VERW;
457 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
459 if (mmio_nosmt || cpu_mitigations_auto_nosmt())
460 cpu_smt_disable(false);
463 static int __init mmio_stale_data_parse_cmdline(char *str)
465 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
471 if (!strcmp(str, "off")) {
472 mmio_mitigation = MMIO_MITIGATION_OFF;
473 } else if (!strcmp(str, "full")) {
474 mmio_mitigation = MMIO_MITIGATION_VERW;
475 } else if (!strcmp(str, "full,nosmt")) {
476 mmio_mitigation = MMIO_MITIGATION_VERW;
482 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
485 #define pr_fmt(fmt) "" fmt
487 static void __init md_clear_update_mitigation(void)
489 if (cpu_mitigations_off())
492 if (!static_key_enabled(&mds_user_clear))
496 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
497 * mitigation, if necessary.
499 if (mds_mitigation == MDS_MITIGATION_OFF &&
500 boot_cpu_has_bug(X86_BUG_MDS)) {
501 mds_mitigation = MDS_MITIGATION_FULL;
502 mds_select_mitigation();
504 if (taa_mitigation == TAA_MITIGATION_OFF &&
505 boot_cpu_has_bug(X86_BUG_TAA)) {
506 taa_mitigation = TAA_MITIGATION_VERW;
507 taa_select_mitigation();
509 if (mmio_mitigation == MMIO_MITIGATION_OFF &&
510 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
511 mmio_mitigation = MMIO_MITIGATION_VERW;
512 mmio_select_mitigation();
515 if (boot_cpu_has_bug(X86_BUG_MDS))
516 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
517 if (boot_cpu_has_bug(X86_BUG_TAA))
518 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
519 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
520 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
523 static void __init md_clear_select_mitigation(void)
525 mds_select_mitigation();
526 taa_select_mitigation();
527 mmio_select_mitigation();
530 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
531 * and print their mitigation after MDS, TAA and MMIO Stale Data
532 * mitigation selection is done.
534 md_clear_update_mitigation();
538 #define pr_fmt(fmt) "SRBDS: " fmt
540 enum srbds_mitigations {
541 SRBDS_MITIGATION_OFF,
542 SRBDS_MITIGATION_UCODE_NEEDED,
543 SRBDS_MITIGATION_FULL,
544 SRBDS_MITIGATION_TSX_OFF,
545 SRBDS_MITIGATION_HYPERVISOR,
548 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
550 static const char * const srbds_strings[] = {
551 [SRBDS_MITIGATION_OFF] = "Vulnerable",
552 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
553 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
554 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
555 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
558 static bool srbds_off;
560 void update_srbds_msr(void)
564 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
567 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
570 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
574 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
575 * being disabled and it hasn't received the SRBDS MSR microcode.
577 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
580 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
582 switch (srbds_mitigation) {
583 case SRBDS_MITIGATION_OFF:
584 case SRBDS_MITIGATION_TSX_OFF:
585 mcu_ctrl |= RNGDS_MITG_DIS;
587 case SRBDS_MITIGATION_FULL:
588 mcu_ctrl &= ~RNGDS_MITG_DIS;
594 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
597 static void __init srbds_select_mitigation(void)
601 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
605 * Check to see if this is one of the MDS_NO systems supporting TSX that
606 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
607 * by Processor MMIO Stale Data vulnerability.
609 ia32_cap = x86_read_arch_cap_msr();
610 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
611 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
612 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
613 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
614 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
615 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
616 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
617 else if (cpu_mitigations_off() || srbds_off)
618 srbds_mitigation = SRBDS_MITIGATION_OFF;
621 pr_info("%s\n", srbds_strings[srbds_mitigation]);
624 static int __init srbds_parse_cmdline(char *str)
629 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
632 srbds_off = !strcmp(str, "off");
635 early_param("srbds", srbds_parse_cmdline);
638 #define pr_fmt(fmt) "L1D Flush : " fmt
640 enum l1d_flush_mitigations {
645 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
647 static void __init l1d_flush_select_mitigation(void)
649 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
652 static_branch_enable(&switch_mm_cond_l1d_flush);
653 pr_info("Conditional flush on switch_mm() enabled\n");
656 static int __init l1d_flush_parse_cmdline(char *str)
658 if (!strcmp(str, "on"))
659 l1d_flush_mitigation = L1D_FLUSH_ON;
663 early_param("l1d_flush", l1d_flush_parse_cmdline);
666 #define pr_fmt(fmt) "Spectre V1 : " fmt
668 enum spectre_v1_mitigation {
669 SPECTRE_V1_MITIGATION_NONE,
670 SPECTRE_V1_MITIGATION_AUTO,
673 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
674 SPECTRE_V1_MITIGATION_AUTO;
676 static const char * const spectre_v1_strings[] = {
677 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
678 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
682 * Does SMAP provide full mitigation against speculative kernel access to
685 static bool smap_works_speculatively(void)
687 if (!boot_cpu_has(X86_FEATURE_SMAP))
691 * On CPUs which are vulnerable to Meltdown, SMAP does not
692 * prevent speculative access to user data in the L1 cache.
693 * Consider SMAP to be non-functional as a mitigation on these
696 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
702 static void __init spectre_v1_select_mitigation(void)
704 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
705 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
709 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
711 * With Spectre v1, a user can speculatively control either
712 * path of a conditional swapgs with a user-controlled GS
713 * value. The mitigation is to add lfences to both code paths.
715 * If FSGSBASE is enabled, the user can put a kernel address in
716 * GS, in which case SMAP provides no protection.
718 * If FSGSBASE is disabled, the user can only put a user space
719 * address in GS. That makes an attack harder, but still
720 * possible if there's no SMAP protection.
722 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
723 !smap_works_speculatively()) {
725 * Mitigation can be provided from SWAPGS itself or
726 * PTI as the CR3 write in the Meltdown mitigation
729 * If neither is there, mitigate with an LFENCE to
730 * stop speculation through swapgs.
732 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
733 !boot_cpu_has(X86_FEATURE_PTI))
734 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
737 * Enable lfences in the kernel entry (non-swapgs)
738 * paths, to prevent user entry from speculatively
741 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
745 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
748 static int __init nospectre_v1_cmdline(char *str)
750 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
753 early_param("nospectre_v1", nospectre_v1_cmdline);
756 #define pr_fmt(fmt) "Spectre V2 : " fmt
758 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
761 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
762 SPECTRE_V2_USER_NONE;
763 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
764 SPECTRE_V2_USER_NONE;
766 #ifdef CONFIG_RETPOLINE
767 static bool spectre_v2_bad_module;
769 bool retpoline_module_ok(bool has_retpoline)
771 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
774 pr_err("System may be vulnerable to spectre v2\n");
775 spectre_v2_bad_module = true;
779 static inline const char *spectre_v2_module_string(void)
781 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
784 static inline const char *spectre_v2_module_string(void) { return ""; }
787 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
788 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
789 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
791 #ifdef CONFIG_BPF_SYSCALL
792 void unpriv_ebpf_notify(int new_state)
797 /* Unprivileged eBPF is enabled */
799 switch (spectre_v2_enabled) {
800 case SPECTRE_V2_EIBRS:
801 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
803 case SPECTRE_V2_EIBRS_LFENCE:
804 if (sched_smt_active())
805 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
813 static inline bool match_option(const char *arg, int arglen, const char *opt)
815 int len = strlen(opt);
817 return len == arglen && !strncmp(arg, opt, len);
820 /* The kernel command line selection for spectre v2 */
821 enum spectre_v2_mitigation_cmd {
824 SPECTRE_V2_CMD_FORCE,
825 SPECTRE_V2_CMD_RETPOLINE,
826 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
827 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
828 SPECTRE_V2_CMD_EIBRS,
829 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
830 SPECTRE_V2_CMD_EIBRS_LFENCE,
833 enum spectre_v2_user_cmd {
834 SPECTRE_V2_USER_CMD_NONE,
835 SPECTRE_V2_USER_CMD_AUTO,
836 SPECTRE_V2_USER_CMD_FORCE,
837 SPECTRE_V2_USER_CMD_PRCTL,
838 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
839 SPECTRE_V2_USER_CMD_SECCOMP,
840 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
843 static const char * const spectre_v2_user_strings[] = {
844 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
845 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
846 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
847 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
848 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
851 static const struct {
853 enum spectre_v2_user_cmd cmd;
855 } v2_user_options[] __initconst = {
856 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
857 { "off", SPECTRE_V2_USER_CMD_NONE, false },
858 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
859 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
860 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
861 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
862 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
865 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
867 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
868 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
871 static enum spectre_v2_user_cmd __init
872 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
878 case SPECTRE_V2_CMD_NONE:
879 return SPECTRE_V2_USER_CMD_NONE;
880 case SPECTRE_V2_CMD_FORCE:
881 return SPECTRE_V2_USER_CMD_FORCE;
886 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
889 return SPECTRE_V2_USER_CMD_AUTO;
891 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
892 if (match_option(arg, ret, v2_user_options[i].option)) {
893 spec_v2_user_print_cond(v2_user_options[i].option,
894 v2_user_options[i].secure);
895 return v2_user_options[i].cmd;
899 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
900 return SPECTRE_V2_USER_CMD_AUTO;
903 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
905 return (mode == SPECTRE_V2_EIBRS ||
906 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
907 mode == SPECTRE_V2_EIBRS_LFENCE);
911 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
913 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
914 bool smt_possible = IS_ENABLED(CONFIG_SMP);
915 enum spectre_v2_user_cmd cmd;
917 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
920 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
921 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
922 smt_possible = false;
924 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
926 case SPECTRE_V2_USER_CMD_NONE:
928 case SPECTRE_V2_USER_CMD_FORCE:
929 mode = SPECTRE_V2_USER_STRICT;
931 case SPECTRE_V2_USER_CMD_AUTO:
932 case SPECTRE_V2_USER_CMD_PRCTL:
933 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
934 mode = SPECTRE_V2_USER_PRCTL;
936 case SPECTRE_V2_USER_CMD_SECCOMP:
937 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
938 if (IS_ENABLED(CONFIG_SECCOMP))
939 mode = SPECTRE_V2_USER_SECCOMP;
941 mode = SPECTRE_V2_USER_PRCTL;
945 /* Initialize Indirect Branch Prediction Barrier */
946 if (boot_cpu_has(X86_FEATURE_IBPB)) {
947 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
949 spectre_v2_user_ibpb = mode;
951 case SPECTRE_V2_USER_CMD_FORCE:
952 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
953 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
954 static_branch_enable(&switch_mm_always_ibpb);
955 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
957 case SPECTRE_V2_USER_CMD_PRCTL:
958 case SPECTRE_V2_USER_CMD_AUTO:
959 case SPECTRE_V2_USER_CMD_SECCOMP:
960 static_branch_enable(&switch_mm_cond_ibpb);
966 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
967 static_key_enabled(&switch_mm_always_ibpb) ?
968 "always-on" : "conditional");
972 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
975 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
977 spectre_v2_in_eibrs_mode(spectre_v2_enabled))
981 * At this point, an STIBP mode other than "off" has been set.
982 * If STIBP support is not being forced, check if STIBP always-on
985 if (mode != SPECTRE_V2_USER_STRICT &&
986 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
987 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
989 spectre_v2_user_stibp = mode;
992 pr_info("%s\n", spectre_v2_user_strings[mode]);
995 static const char * const spectre_v2_strings[] = {
996 [SPECTRE_V2_NONE] = "Vulnerable",
997 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
998 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
999 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
1000 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
1001 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
1004 static const struct {
1006 enum spectre_v2_mitigation_cmd cmd;
1008 } mitigation_options[] __initconst = {
1009 { "off", SPECTRE_V2_CMD_NONE, false },
1010 { "on", SPECTRE_V2_CMD_FORCE, true },
1011 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
1012 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1013 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1014 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1015 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
1016 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
1017 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
1018 { "auto", SPECTRE_V2_CMD_AUTO, false },
1021 static void __init spec_v2_print_cond(const char *reason, bool secure)
1023 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1024 pr_info("%s selected on command line.\n", reason);
1027 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1029 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1033 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1034 cpu_mitigations_off())
1035 return SPECTRE_V2_CMD_NONE;
1037 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1039 return SPECTRE_V2_CMD_AUTO;
1041 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1042 if (!match_option(arg, ret, mitigation_options[i].option))
1044 cmd = mitigation_options[i].cmd;
1048 if (i >= ARRAY_SIZE(mitigation_options)) {
1049 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1050 return SPECTRE_V2_CMD_AUTO;
1053 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1054 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1055 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1056 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1057 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1058 !IS_ENABLED(CONFIG_RETPOLINE)) {
1059 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1060 mitigation_options[i].option);
1061 return SPECTRE_V2_CMD_AUTO;
1064 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1065 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1066 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1067 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1068 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1069 mitigation_options[i].option);
1070 return SPECTRE_V2_CMD_AUTO;
1073 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1074 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1075 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1076 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1077 mitigation_options[i].option);
1078 return SPECTRE_V2_CMD_AUTO;
1081 spec_v2_print_cond(mitigation_options[i].option,
1082 mitigation_options[i].secure);
1086 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1088 if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1089 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1090 return SPECTRE_V2_NONE;
1093 return SPECTRE_V2_RETPOLINE;
1096 static void __init spectre_v2_select_mitigation(void)
1098 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1099 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1102 * If the CPU is not affected and the command line mode is NONE or AUTO
1103 * then nothing to do.
1105 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1106 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1110 case SPECTRE_V2_CMD_NONE:
1113 case SPECTRE_V2_CMD_FORCE:
1114 case SPECTRE_V2_CMD_AUTO:
1115 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1116 mode = SPECTRE_V2_EIBRS;
1120 mode = spectre_v2_select_retpoline();
1123 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1124 pr_err(SPECTRE_V2_LFENCE_MSG);
1125 mode = SPECTRE_V2_LFENCE;
1128 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1129 mode = SPECTRE_V2_RETPOLINE;
1132 case SPECTRE_V2_CMD_RETPOLINE:
1133 mode = spectre_v2_select_retpoline();
1136 case SPECTRE_V2_CMD_EIBRS:
1137 mode = SPECTRE_V2_EIBRS;
1140 case SPECTRE_V2_CMD_EIBRS_LFENCE:
1141 mode = SPECTRE_V2_EIBRS_LFENCE;
1144 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1145 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1149 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1150 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1152 if (spectre_v2_in_eibrs_mode(mode)) {
1153 /* Force it so VMEXIT will restore correctly */
1154 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1155 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1159 case SPECTRE_V2_NONE:
1160 case SPECTRE_V2_EIBRS:
1163 case SPECTRE_V2_LFENCE:
1164 case SPECTRE_V2_EIBRS_LFENCE:
1165 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1168 case SPECTRE_V2_RETPOLINE:
1169 case SPECTRE_V2_EIBRS_RETPOLINE:
1170 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1174 spectre_v2_enabled = mode;
1175 pr_info("%s\n", spectre_v2_strings[mode]);
1178 * If spectre v2 protection has been enabled, unconditionally fill
1179 * RSB during a context switch; this protects against two independent
1182 * - RSB underflow (and switch to BTB) on Skylake+
1183 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1185 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1186 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1189 * Retpoline means the kernel is safe because it has no indirect
1190 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1191 * speculation around firmware calls only when Enhanced IBRS isn't
1194 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1195 * the user might select retpoline on the kernel command line and if
1196 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1197 * enable IBRS around firmware calls.
1199 if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
1200 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1201 pr_info("Enabling Restricted Speculation for firmware calls\n");
1204 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1205 spectre_v2_user_select_mitigation(cmd);
1208 static void update_stibp_msr(void * __unused)
1210 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1213 /* Update x86_spec_ctrl_base in case SMT state changed. */
1214 static void update_stibp_strict(void)
1216 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1218 if (sched_smt_active())
1219 mask |= SPEC_CTRL_STIBP;
1221 if (mask == x86_spec_ctrl_base)
1224 pr_info("Update user space SMT mitigation: STIBP %s\n",
1225 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1226 x86_spec_ctrl_base = mask;
1227 on_each_cpu(update_stibp_msr, NULL, 1);
1230 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1231 static void update_indir_branch_cond(void)
1233 if (sched_smt_active())
1234 static_branch_enable(&switch_to_cond_stibp);
1236 static_branch_disable(&switch_to_cond_stibp);
1240 #define pr_fmt(fmt) fmt
1242 /* Update the static key controlling the MDS CPU buffer clear in idle */
1243 static void update_mds_branch_idle(void)
1245 u64 ia32_cap = x86_read_arch_cap_msr();
1248 * Enable the idle clearing if SMT is active on CPUs which are
1249 * affected only by MSBDS and not any other MDS variant.
1251 * The other variants cannot be mitigated when SMT is enabled, so
1252 * clearing the buffers on idle just to prevent the Store Buffer
1253 * repartitioning leak would be a window dressing exercise.
1255 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1258 if (sched_smt_active()) {
1259 static_branch_enable(&mds_idle_clear);
1260 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1261 (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1262 static_branch_disable(&mds_idle_clear);
1266 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1267 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1268 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1270 void cpu_bugs_smt_update(void)
1272 mutex_lock(&spec_ctrl_mutex);
1274 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1275 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1276 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1278 switch (spectre_v2_user_stibp) {
1279 case SPECTRE_V2_USER_NONE:
1281 case SPECTRE_V2_USER_STRICT:
1282 case SPECTRE_V2_USER_STRICT_PREFERRED:
1283 update_stibp_strict();
1285 case SPECTRE_V2_USER_PRCTL:
1286 case SPECTRE_V2_USER_SECCOMP:
1287 update_indir_branch_cond();
1291 switch (mds_mitigation) {
1292 case MDS_MITIGATION_FULL:
1293 case MDS_MITIGATION_VMWERV:
1294 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1295 pr_warn_once(MDS_MSG_SMT);
1296 update_mds_branch_idle();
1298 case MDS_MITIGATION_OFF:
1302 switch (taa_mitigation) {
1303 case TAA_MITIGATION_VERW:
1304 case TAA_MITIGATION_UCODE_NEEDED:
1305 if (sched_smt_active())
1306 pr_warn_once(TAA_MSG_SMT);
1308 case TAA_MITIGATION_TSX_DISABLED:
1309 case TAA_MITIGATION_OFF:
1313 switch (mmio_mitigation) {
1314 case MMIO_MITIGATION_VERW:
1315 case MMIO_MITIGATION_UCODE_NEEDED:
1316 if (sched_smt_active())
1317 pr_warn_once(MMIO_MSG_SMT);
1319 case MMIO_MITIGATION_OFF:
1323 mutex_unlock(&spec_ctrl_mutex);
1327 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1329 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1331 /* The kernel command line selection */
1332 enum ssb_mitigation_cmd {
1333 SPEC_STORE_BYPASS_CMD_NONE,
1334 SPEC_STORE_BYPASS_CMD_AUTO,
1335 SPEC_STORE_BYPASS_CMD_ON,
1336 SPEC_STORE_BYPASS_CMD_PRCTL,
1337 SPEC_STORE_BYPASS_CMD_SECCOMP,
1340 static const char * const ssb_strings[] = {
1341 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1342 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1343 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1344 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1347 static const struct {
1349 enum ssb_mitigation_cmd cmd;
1350 } ssb_mitigation_options[] __initconst = {
1351 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1352 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1353 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1354 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1355 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1358 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1360 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1364 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1365 cpu_mitigations_off()) {
1366 return SPEC_STORE_BYPASS_CMD_NONE;
1368 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1371 return SPEC_STORE_BYPASS_CMD_AUTO;
1373 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1374 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1377 cmd = ssb_mitigation_options[i].cmd;
1381 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1382 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1383 return SPEC_STORE_BYPASS_CMD_AUTO;
1390 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1392 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1393 enum ssb_mitigation_cmd cmd;
1395 if (!boot_cpu_has(X86_FEATURE_SSBD))
1398 cmd = ssb_parse_cmdline();
1399 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1400 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1401 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1405 case SPEC_STORE_BYPASS_CMD_SECCOMP:
1407 * Choose prctl+seccomp as the default mode if seccomp is
1410 if (IS_ENABLED(CONFIG_SECCOMP))
1411 mode = SPEC_STORE_BYPASS_SECCOMP;
1413 mode = SPEC_STORE_BYPASS_PRCTL;
1415 case SPEC_STORE_BYPASS_CMD_ON:
1416 mode = SPEC_STORE_BYPASS_DISABLE;
1418 case SPEC_STORE_BYPASS_CMD_AUTO:
1419 case SPEC_STORE_BYPASS_CMD_PRCTL:
1420 mode = SPEC_STORE_BYPASS_PRCTL;
1422 case SPEC_STORE_BYPASS_CMD_NONE:
1427 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1428 * bit in the mask to allow guests to use the mitigation even in the
1429 * case where the host does not enable it.
1431 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1432 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1433 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1437 * We have three CPU feature flags that are in play here:
1438 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1439 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1440 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1442 if (mode == SPEC_STORE_BYPASS_DISABLE) {
1443 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1445 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1446 * use a completely different MSR and bit dependent on family.
1448 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1449 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1450 x86_amd_ssb_disable();
1452 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1453 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1460 static void ssb_select_mitigation(void)
1462 ssb_mode = __ssb_select_mitigation();
1464 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1465 pr_info("%s\n", ssb_strings[ssb_mode]);
1469 #define pr_fmt(fmt) "Speculation prctl: " fmt
1471 static void task_update_spec_tif(struct task_struct *tsk)
1473 /* Force the update of the real TIF bits */
1474 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1477 * Immediately update the speculation control MSRs for the current
1478 * task, but for a non-current task delay setting the CPU
1479 * mitigation until it is scheduled next.
1481 * This can only happen for SECCOMP mitigation. For PRCTL it's
1482 * always the current task.
1485 speculation_ctrl_update_current();
1488 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1491 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1495 case PR_SPEC_ENABLE:
1496 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1498 case PR_SPEC_DISABLE:
1499 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1506 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1508 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1509 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1513 case PR_SPEC_ENABLE:
1514 /* If speculation is force disabled, enable is not allowed */
1515 if (task_spec_ssb_force_disable(task))
1517 task_clear_spec_ssb_disable(task);
1518 task_clear_spec_ssb_noexec(task);
1519 task_update_spec_tif(task);
1521 case PR_SPEC_DISABLE:
1522 task_set_spec_ssb_disable(task);
1523 task_clear_spec_ssb_noexec(task);
1524 task_update_spec_tif(task);
1526 case PR_SPEC_FORCE_DISABLE:
1527 task_set_spec_ssb_disable(task);
1528 task_set_spec_ssb_force_disable(task);
1529 task_clear_spec_ssb_noexec(task);
1530 task_update_spec_tif(task);
1532 case PR_SPEC_DISABLE_NOEXEC:
1533 if (task_spec_ssb_force_disable(task))
1535 task_set_spec_ssb_disable(task);
1536 task_set_spec_ssb_noexec(task);
1537 task_update_spec_tif(task);
1545 static bool is_spec_ib_user_controlled(void)
1547 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1548 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1549 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1550 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1553 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1556 case PR_SPEC_ENABLE:
1557 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1558 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1562 * With strict mode for both IBPB and STIBP, the instruction
1563 * code paths avoid checking this task flag and instead,
1564 * unconditionally run the instruction. However, STIBP and IBPB
1565 * are independent and either can be set to conditionally
1566 * enabled regardless of the mode of the other.
1568 * If either is set to conditional, allow the task flag to be
1569 * updated, unless it was force-disabled by a previous prctl
1570 * call. Currently, this is possible on an AMD CPU which has the
1571 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1572 * kernel is booted with 'spectre_v2_user=seccomp', then
1573 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1574 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1576 if (!is_spec_ib_user_controlled() ||
1577 task_spec_ib_force_disable(task))
1580 task_clear_spec_ib_disable(task);
1581 task_update_spec_tif(task);
1583 case PR_SPEC_DISABLE:
1584 case PR_SPEC_FORCE_DISABLE:
1586 * Indirect branch speculation is always allowed when
1587 * mitigation is force disabled.
1589 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1590 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1593 if (!is_spec_ib_user_controlled())
1596 task_set_spec_ib_disable(task);
1597 if (ctrl == PR_SPEC_FORCE_DISABLE)
1598 task_set_spec_ib_force_disable(task);
1599 task_update_spec_tif(task);
1607 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1611 case PR_SPEC_STORE_BYPASS:
1612 return ssb_prctl_set(task, ctrl);
1613 case PR_SPEC_INDIRECT_BRANCH:
1614 return ib_prctl_set(task, ctrl);
1615 case PR_SPEC_L1D_FLUSH:
1616 return l1d_flush_prctl_set(task, ctrl);
1622 #ifdef CONFIG_SECCOMP
1623 void arch_seccomp_spec_mitigate(struct task_struct *task)
1625 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1626 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1627 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1628 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1629 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1633 static int l1d_flush_prctl_get(struct task_struct *task)
1635 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1636 return PR_SPEC_FORCE_DISABLE;
1638 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
1639 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1641 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1644 static int ssb_prctl_get(struct task_struct *task)
1647 case SPEC_STORE_BYPASS_DISABLE:
1648 return PR_SPEC_DISABLE;
1649 case SPEC_STORE_BYPASS_SECCOMP:
1650 case SPEC_STORE_BYPASS_PRCTL:
1651 if (task_spec_ssb_force_disable(task))
1652 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1653 if (task_spec_ssb_noexec(task))
1654 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1655 if (task_spec_ssb_disable(task))
1656 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1657 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1659 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1660 return PR_SPEC_ENABLE;
1661 return PR_SPEC_NOT_AFFECTED;
1665 static int ib_prctl_get(struct task_struct *task)
1667 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1668 return PR_SPEC_NOT_AFFECTED;
1670 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1671 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1672 return PR_SPEC_ENABLE;
1673 else if (is_spec_ib_user_controlled()) {
1674 if (task_spec_ib_force_disable(task))
1675 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1676 if (task_spec_ib_disable(task))
1677 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1678 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1679 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1680 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1681 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1682 return PR_SPEC_DISABLE;
1684 return PR_SPEC_NOT_AFFECTED;
1687 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1690 case PR_SPEC_STORE_BYPASS:
1691 return ssb_prctl_get(task);
1692 case PR_SPEC_INDIRECT_BRANCH:
1693 return ib_prctl_get(task);
1694 case PR_SPEC_L1D_FLUSH:
1695 return l1d_flush_prctl_get(task);
1701 void x86_spec_ctrl_setup_ap(void)
1703 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1704 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1706 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1707 x86_amd_ssb_disable();
1710 bool itlb_multihit_kvm_mitigation;
1711 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1714 #define pr_fmt(fmt) "L1TF: " fmt
1716 /* Default mitigation for L1TF-affected CPUs */
1717 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1718 #if IS_ENABLED(CONFIG_KVM_INTEL)
1719 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1721 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1722 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1725 * These CPUs all support 44bits physical address space internally in the
1726 * cache but CPUID can report a smaller number of physical address bits.
1728 * The L1TF mitigation uses the top most address bit for the inversion of
1729 * non present PTEs. When the installed memory reaches into the top most
1730 * address bit due to memory holes, which has been observed on machines
1731 * which report 36bits physical address bits and have 32G RAM installed,
1732 * then the mitigation range check in l1tf_select_mitigation() triggers.
1733 * This is a false positive because the mitigation is still possible due to
1734 * the fact that the cache uses 44bit internally. Use the cache bits
1735 * instead of the reported physical bits and adjust them on the affected
1736 * machines to 44bit if the reported bits are less than 44.
1738 static void override_cache_bits(struct cpuinfo_x86 *c)
1743 switch (c->x86_model) {
1744 case INTEL_FAM6_NEHALEM:
1745 case INTEL_FAM6_WESTMERE:
1746 case INTEL_FAM6_SANDYBRIDGE:
1747 case INTEL_FAM6_IVYBRIDGE:
1748 case INTEL_FAM6_HASWELL:
1749 case INTEL_FAM6_HASWELL_L:
1750 case INTEL_FAM6_HASWELL_G:
1751 case INTEL_FAM6_BROADWELL:
1752 case INTEL_FAM6_BROADWELL_G:
1753 case INTEL_FAM6_SKYLAKE_L:
1754 case INTEL_FAM6_SKYLAKE:
1755 case INTEL_FAM6_KABYLAKE_L:
1756 case INTEL_FAM6_KABYLAKE:
1757 if (c->x86_cache_bits < 44)
1758 c->x86_cache_bits = 44;
1763 static void __init l1tf_select_mitigation(void)
1767 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1770 if (cpu_mitigations_off())
1771 l1tf_mitigation = L1TF_MITIGATION_OFF;
1772 else if (cpu_mitigations_auto_nosmt())
1773 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1775 override_cache_bits(&boot_cpu_data);
1777 switch (l1tf_mitigation) {
1778 case L1TF_MITIGATION_OFF:
1779 case L1TF_MITIGATION_FLUSH_NOWARN:
1780 case L1TF_MITIGATION_FLUSH:
1782 case L1TF_MITIGATION_FLUSH_NOSMT:
1783 case L1TF_MITIGATION_FULL:
1784 cpu_smt_disable(false);
1786 case L1TF_MITIGATION_FULL_FORCE:
1787 cpu_smt_disable(true);
1791 #if CONFIG_PGTABLE_LEVELS == 2
1792 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1796 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1797 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1798 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1799 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1800 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1802 pr_info("However, doing so will make a part of your RAM unusable.\n");
1803 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1807 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1810 static int __init l1tf_cmdline(char *str)
1812 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1818 if (!strcmp(str, "off"))
1819 l1tf_mitigation = L1TF_MITIGATION_OFF;
1820 else if (!strcmp(str, "flush,nowarn"))
1821 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1822 else if (!strcmp(str, "flush"))
1823 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1824 else if (!strcmp(str, "flush,nosmt"))
1825 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1826 else if (!strcmp(str, "full"))
1827 l1tf_mitigation = L1TF_MITIGATION_FULL;
1828 else if (!strcmp(str, "full,force"))
1829 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1833 early_param("l1tf", l1tf_cmdline);
1836 #define pr_fmt(fmt) fmt
1840 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1842 #if IS_ENABLED(CONFIG_KVM_INTEL)
1843 static const char * const l1tf_vmx_states[] = {
1844 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1845 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1846 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1847 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1848 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1849 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1852 static ssize_t l1tf_show_state(char *buf)
1854 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1855 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1857 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1858 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1859 sched_smt_active())) {
1860 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1861 l1tf_vmx_states[l1tf_vmx_mitigation]);
1864 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1865 l1tf_vmx_states[l1tf_vmx_mitigation],
1866 sched_smt_active() ? "vulnerable" : "disabled");
1869 static ssize_t itlb_multihit_show_state(char *buf)
1871 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
1872 !boot_cpu_has(X86_FEATURE_VMX))
1873 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
1874 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
1875 return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
1876 else if (itlb_multihit_kvm_mitigation)
1877 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
1879 return sprintf(buf, "KVM: Vulnerable\n");
1882 static ssize_t l1tf_show_state(char *buf)
1884 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1887 static ssize_t itlb_multihit_show_state(char *buf)
1889 return sprintf(buf, "Processor vulnerable\n");
1893 static ssize_t mds_show_state(char *buf)
1895 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1896 return sprintf(buf, "%s; SMT Host state unknown\n",
1897 mds_strings[mds_mitigation]);
1900 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1901 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1902 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1903 sched_smt_active() ? "mitigated" : "disabled"));
1906 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1907 sched_smt_active() ? "vulnerable" : "disabled");
1910 static ssize_t tsx_async_abort_show_state(char *buf)
1912 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1913 (taa_mitigation == TAA_MITIGATION_OFF))
1914 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1916 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1917 return sprintf(buf, "%s; SMT Host state unknown\n",
1918 taa_strings[taa_mitigation]);
1921 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1922 sched_smt_active() ? "vulnerable" : "disabled");
1925 static ssize_t mmio_stale_data_show_state(char *buf)
1927 if (mmio_mitigation == MMIO_MITIGATION_OFF)
1928 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
1930 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1931 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
1932 mmio_strings[mmio_mitigation]);
1935 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
1936 sched_smt_active() ? "vulnerable" : "disabled");
1939 static char *stibp_state(void)
1941 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
1944 switch (spectre_v2_user_stibp) {
1945 case SPECTRE_V2_USER_NONE:
1946 return ", STIBP: disabled";
1947 case SPECTRE_V2_USER_STRICT:
1948 return ", STIBP: forced";
1949 case SPECTRE_V2_USER_STRICT_PREFERRED:
1950 return ", STIBP: always-on";
1951 case SPECTRE_V2_USER_PRCTL:
1952 case SPECTRE_V2_USER_SECCOMP:
1953 if (static_key_enabled(&switch_to_cond_stibp))
1954 return ", STIBP: conditional";
1959 static char *ibpb_state(void)
1961 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1962 if (static_key_enabled(&switch_mm_always_ibpb))
1963 return ", IBPB: always-on";
1964 if (static_key_enabled(&switch_mm_cond_ibpb))
1965 return ", IBPB: conditional";
1966 return ", IBPB: disabled";
1971 static ssize_t spectre_v2_show_state(char *buf)
1973 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
1974 return sprintf(buf, "Vulnerable: LFENCE\n");
1976 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1977 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
1979 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1980 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1981 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
1983 return sprintf(buf, "%s%s%s%s%s%s\n",
1984 spectre_v2_strings[spectre_v2_enabled],
1986 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1988 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1989 spectre_v2_module_string());
1992 static ssize_t srbds_show_state(char *buf)
1994 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
1997 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1998 char *buf, unsigned int bug)
2000 if (!boot_cpu_has_bug(bug))
2001 return sprintf(buf, "Not affected\n");
2004 case X86_BUG_CPU_MELTDOWN:
2005 if (boot_cpu_has(X86_FEATURE_PTI))
2006 return sprintf(buf, "Mitigation: PTI\n");
2008 if (hypervisor_is_type(X86_HYPER_XEN_PV))
2009 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2013 case X86_BUG_SPECTRE_V1:
2014 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2016 case X86_BUG_SPECTRE_V2:
2017 return spectre_v2_show_state(buf);
2019 case X86_BUG_SPEC_STORE_BYPASS:
2020 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2023 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2024 return l1tf_show_state(buf);
2028 return mds_show_state(buf);
2031 return tsx_async_abort_show_state(buf);
2033 case X86_BUG_ITLB_MULTIHIT:
2034 return itlb_multihit_show_state(buf);
2037 return srbds_show_state(buf);
2039 case X86_BUG_MMIO_STALE_DATA:
2040 return mmio_stale_data_show_state(buf);
2046 return sprintf(buf, "Vulnerable\n");
2049 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2051 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2054 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2056 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2059 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2061 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2064 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2066 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2069 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2071 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2074 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2076 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2079 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2081 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2084 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2086 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2089 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2091 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2094 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2096 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);