Merge branch 'x86-mds-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34
35 #include "cpu.h"
36
37 static void __init spectre_v2_select_mitigation(void);
38 static void __init ssb_select_mitigation(void);
39 static void __init l1tf_select_mitigation(void);
40 static void __init mds_select_mitigation(void);
41
42 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
43 u64 x86_spec_ctrl_base;
44 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
45 static DEFINE_MUTEX(spec_ctrl_mutex);
46
47 /*
48  * The vendor and possibly platform specific bits which can be modified in
49  * x86_spec_ctrl_base.
50  */
51 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
52
53 /*
54  * AMD specific MSR info for Speculative Store Bypass control.
55  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
56  */
57 u64 __ro_after_init x86_amd_ls_cfg_base;
58 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
59
60 /* Control conditional STIBP in switch_to() */
61 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
62 /* Control conditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
64 /* Control unconditional IBPB in switch_mm() */
65 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
66
67 /* Control MDS CPU buffer clear before returning to user space */
68 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
69 EXPORT_SYMBOL_GPL(mds_user_clear);
70 /* Control MDS CPU buffer clear before idling (halt, mwait) */
71 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
72 EXPORT_SYMBOL_GPL(mds_idle_clear);
73
74 void __init check_bugs(void)
75 {
76         identify_boot_cpu();
77
78         /*
79          * identify_boot_cpu() initialized SMT support information, let the
80          * core code know.
81          */
82         cpu_smt_check_topology();
83
84         if (!IS_ENABLED(CONFIG_SMP)) {
85                 pr_info("CPU: ");
86                 print_cpu_info(&boot_cpu_data);
87         }
88
89         /*
90          * Read the SPEC_CTRL MSR to account for reserved bits which may
91          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
92          * init code as it is not enumerated and depends on the family.
93          */
94         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
95                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
96
97         /* Allow STIBP in MSR_SPEC_CTRL if supported */
98         if (boot_cpu_has(X86_FEATURE_STIBP))
99                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
100
101         /* Select the proper spectre mitigation before patching alternatives */
102         spectre_v2_select_mitigation();
103
104         /*
105          * Select proper mitigation for any exposure to the Speculative Store
106          * Bypass vulnerability.
107          */
108         ssb_select_mitigation();
109
110         l1tf_select_mitigation();
111
112         mds_select_mitigation();
113
114         arch_smt_update();
115
116 #ifdef CONFIG_X86_32
117         /*
118          * Check whether we are able to run this kernel safely on SMP.
119          *
120          * - i386 is no longer supported.
121          * - In order to run on anything without a TSC, we need to be
122          *   compiled for a i486.
123          */
124         if (boot_cpu_data.x86 < 4)
125                 panic("Kernel requires i486+ for 'invlpg' and other features");
126
127         init_utsname()->machine[1] =
128                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
129         alternative_instructions();
130
131         fpu__init_check_bugs();
132 #else /* CONFIG_X86_64 */
133         alternative_instructions();
134
135         /*
136          * Make sure the first 2MB area is not mapped by huge pages
137          * There are typically fixed size MTRRs in there and overlapping
138          * MTRRs into large pages causes slow downs.
139          *
140          * Right now we don't do that with gbpages because there seems
141          * very little benefit for that case.
142          */
143         if (!direct_gbpages)
144                 set_memory_4k((unsigned long)__va(0), 1);
145 #endif
146 }
147
148 void
149 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
150 {
151         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
152         struct thread_info *ti = current_thread_info();
153
154         /* Is MSR_SPEC_CTRL implemented ? */
155         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
156                 /*
157                  * Restrict guest_spec_ctrl to supported values. Clear the
158                  * modifiable bits in the host base value and or the
159                  * modifiable bits from the guest value.
160                  */
161                 guestval = hostval & ~x86_spec_ctrl_mask;
162                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
163
164                 /* SSBD controlled in MSR_SPEC_CTRL */
165                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
166                     static_cpu_has(X86_FEATURE_AMD_SSBD))
167                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
168
169                 /* Conditional STIBP enabled? */
170                 if (static_branch_unlikely(&switch_to_cond_stibp))
171                         hostval |= stibp_tif_to_spec_ctrl(ti->flags);
172
173                 if (hostval != guestval) {
174                         msrval = setguest ? guestval : hostval;
175                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
176                 }
177         }
178
179         /*
180          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
181          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
182          */
183         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
184             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
185                 return;
186
187         /*
188          * If the host has SSBD mitigation enabled, force it in the host's
189          * virtual MSR value. If its not permanently enabled, evaluate
190          * current's TIF_SSBD thread flag.
191          */
192         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
193                 hostval = SPEC_CTRL_SSBD;
194         else
195                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
196
197         /* Sanitize the guest value */
198         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
199
200         if (hostval != guestval) {
201                 unsigned long tif;
202
203                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
204                                  ssbd_spec_ctrl_to_tif(hostval);
205
206                 speculation_ctrl_update(tif);
207         }
208 }
209 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
210
211 static void x86_amd_ssb_disable(void)
212 {
213         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
214
215         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
216                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
217         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
218                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
219 }
220
221 #undef pr_fmt
222 #define pr_fmt(fmt)     "MDS: " fmt
223
224 /* Default mitigation for MDS-affected CPUs */
225 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
226 static bool mds_nosmt __ro_after_init = false;
227
228 static const char * const mds_strings[] = {
229         [MDS_MITIGATION_OFF]    = "Vulnerable",
230         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
231         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
232 };
233
234 static void __init mds_select_mitigation(void)
235 {
236         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
237                 mds_mitigation = MDS_MITIGATION_OFF;
238                 return;
239         }
240
241         if (mds_mitigation == MDS_MITIGATION_FULL) {
242                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
243                         mds_mitigation = MDS_MITIGATION_VMWERV;
244
245                 static_branch_enable(&mds_user_clear);
246
247                 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
248                     (mds_nosmt || cpu_mitigations_auto_nosmt()))
249                         cpu_smt_disable(false);
250         }
251
252         pr_info("%s\n", mds_strings[mds_mitigation]);
253 }
254
255 static int __init mds_cmdline(char *str)
256 {
257         if (!boot_cpu_has_bug(X86_BUG_MDS))
258                 return 0;
259
260         if (!str)
261                 return -EINVAL;
262
263         if (!strcmp(str, "off"))
264                 mds_mitigation = MDS_MITIGATION_OFF;
265         else if (!strcmp(str, "full"))
266                 mds_mitigation = MDS_MITIGATION_FULL;
267         else if (!strcmp(str, "full,nosmt")) {
268                 mds_mitigation = MDS_MITIGATION_FULL;
269                 mds_nosmt = true;
270         }
271
272         return 0;
273 }
274 early_param("mds", mds_cmdline);
275
276 #undef pr_fmt
277 #define pr_fmt(fmt)     "Spectre V2 : " fmt
278
279 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
280         SPECTRE_V2_NONE;
281
282 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
283         SPECTRE_V2_USER_NONE;
284
285 #ifdef CONFIG_RETPOLINE
286 static bool spectre_v2_bad_module;
287
288 bool retpoline_module_ok(bool has_retpoline)
289 {
290         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
291                 return true;
292
293         pr_err("System may be vulnerable to spectre v2\n");
294         spectre_v2_bad_module = true;
295         return false;
296 }
297
298 static inline const char *spectre_v2_module_string(void)
299 {
300         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
301 }
302 #else
303 static inline const char *spectre_v2_module_string(void) { return ""; }
304 #endif
305
306 static inline bool match_option(const char *arg, int arglen, const char *opt)
307 {
308         int len = strlen(opt);
309
310         return len == arglen && !strncmp(arg, opt, len);
311 }
312
313 /* The kernel command line selection for spectre v2 */
314 enum spectre_v2_mitigation_cmd {
315         SPECTRE_V2_CMD_NONE,
316         SPECTRE_V2_CMD_AUTO,
317         SPECTRE_V2_CMD_FORCE,
318         SPECTRE_V2_CMD_RETPOLINE,
319         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
320         SPECTRE_V2_CMD_RETPOLINE_AMD,
321 };
322
323 enum spectre_v2_user_cmd {
324         SPECTRE_V2_USER_CMD_NONE,
325         SPECTRE_V2_USER_CMD_AUTO,
326         SPECTRE_V2_USER_CMD_FORCE,
327         SPECTRE_V2_USER_CMD_PRCTL,
328         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
329         SPECTRE_V2_USER_CMD_SECCOMP,
330         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
331 };
332
333 static const char * const spectre_v2_user_strings[] = {
334         [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
335         [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
336         [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
337         [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
338         [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
339 };
340
341 static const struct {
342         const char                      *option;
343         enum spectre_v2_user_cmd        cmd;
344         bool                            secure;
345 } v2_user_options[] __initconst = {
346         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
347         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
348         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
349         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
350         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
351         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
352         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
353 };
354
355 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
356 {
357         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
358                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
359 }
360
361 static enum spectre_v2_user_cmd __init
362 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
363 {
364         char arg[20];
365         int ret, i;
366
367         switch (v2_cmd) {
368         case SPECTRE_V2_CMD_NONE:
369                 return SPECTRE_V2_USER_CMD_NONE;
370         case SPECTRE_V2_CMD_FORCE:
371                 return SPECTRE_V2_USER_CMD_FORCE;
372         default:
373                 break;
374         }
375
376         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
377                                   arg, sizeof(arg));
378         if (ret < 0)
379                 return SPECTRE_V2_USER_CMD_AUTO;
380
381         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
382                 if (match_option(arg, ret, v2_user_options[i].option)) {
383                         spec_v2_user_print_cond(v2_user_options[i].option,
384                                                 v2_user_options[i].secure);
385                         return v2_user_options[i].cmd;
386                 }
387         }
388
389         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
390         return SPECTRE_V2_USER_CMD_AUTO;
391 }
392
393 static void __init
394 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
395 {
396         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
397         bool smt_possible = IS_ENABLED(CONFIG_SMP);
398         enum spectre_v2_user_cmd cmd;
399
400         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
401                 return;
402
403         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
404             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
405                 smt_possible = false;
406
407         cmd = spectre_v2_parse_user_cmdline(v2_cmd);
408         switch (cmd) {
409         case SPECTRE_V2_USER_CMD_NONE:
410                 goto set_mode;
411         case SPECTRE_V2_USER_CMD_FORCE:
412                 mode = SPECTRE_V2_USER_STRICT;
413                 break;
414         case SPECTRE_V2_USER_CMD_PRCTL:
415         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
416                 mode = SPECTRE_V2_USER_PRCTL;
417                 break;
418         case SPECTRE_V2_USER_CMD_AUTO:
419         case SPECTRE_V2_USER_CMD_SECCOMP:
420         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
421                 if (IS_ENABLED(CONFIG_SECCOMP))
422                         mode = SPECTRE_V2_USER_SECCOMP;
423                 else
424                         mode = SPECTRE_V2_USER_PRCTL;
425                 break;
426         }
427
428         /*
429          * At this point, an STIBP mode other than "off" has been set.
430          * If STIBP support is not being forced, check if STIBP always-on
431          * is preferred.
432          */
433         if (mode != SPECTRE_V2_USER_STRICT &&
434             boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
435                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
436
437         /* Initialize Indirect Branch Prediction Barrier */
438         if (boot_cpu_has(X86_FEATURE_IBPB)) {
439                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
440
441                 switch (cmd) {
442                 case SPECTRE_V2_USER_CMD_FORCE:
443                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
444                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
445                         static_branch_enable(&switch_mm_always_ibpb);
446                         break;
447                 case SPECTRE_V2_USER_CMD_PRCTL:
448                 case SPECTRE_V2_USER_CMD_AUTO:
449                 case SPECTRE_V2_USER_CMD_SECCOMP:
450                         static_branch_enable(&switch_mm_cond_ibpb);
451                         break;
452                 default:
453                         break;
454                 }
455
456                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
457                         static_key_enabled(&switch_mm_always_ibpb) ?
458                         "always-on" : "conditional");
459         }
460
461         /* If enhanced IBRS is enabled no STIBP required */
462         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
463                 return;
464
465         /*
466          * If SMT is not possible or STIBP is not available clear the STIBP
467          * mode.
468          */
469         if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
470                 mode = SPECTRE_V2_USER_NONE;
471 set_mode:
472         spectre_v2_user = mode;
473         /* Only print the STIBP mode when SMT possible */
474         if (smt_possible)
475                 pr_info("%s\n", spectre_v2_user_strings[mode]);
476 }
477
478 static const char * const spectre_v2_strings[] = {
479         [SPECTRE_V2_NONE]                       = "Vulnerable",
480         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
481         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
482         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
483 };
484
485 static const struct {
486         const char *option;
487         enum spectre_v2_mitigation_cmd cmd;
488         bool secure;
489 } mitigation_options[] __initconst = {
490         { "off",                SPECTRE_V2_CMD_NONE,              false },
491         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
492         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
493         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
494         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
495         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
496 };
497
498 static void __init spec_v2_print_cond(const char *reason, bool secure)
499 {
500         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
501                 pr_info("%s selected on command line.\n", reason);
502 }
503
504 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
505 {
506         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
507         char arg[20];
508         int ret, i;
509
510         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
511             cpu_mitigations_off())
512                 return SPECTRE_V2_CMD_NONE;
513
514         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
515         if (ret < 0)
516                 return SPECTRE_V2_CMD_AUTO;
517
518         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
519                 if (!match_option(arg, ret, mitigation_options[i].option))
520                         continue;
521                 cmd = mitigation_options[i].cmd;
522                 break;
523         }
524
525         if (i >= ARRAY_SIZE(mitigation_options)) {
526                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
527                 return SPECTRE_V2_CMD_AUTO;
528         }
529
530         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
531              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
532              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
533             !IS_ENABLED(CONFIG_RETPOLINE)) {
534                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
535                 return SPECTRE_V2_CMD_AUTO;
536         }
537
538         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
539             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
540             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
541                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
542                 return SPECTRE_V2_CMD_AUTO;
543         }
544
545         spec_v2_print_cond(mitigation_options[i].option,
546                            mitigation_options[i].secure);
547         return cmd;
548 }
549
550 static void __init spectre_v2_select_mitigation(void)
551 {
552         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
553         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
554
555         /*
556          * If the CPU is not affected and the command line mode is NONE or AUTO
557          * then nothing to do.
558          */
559         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
560             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
561                 return;
562
563         switch (cmd) {
564         case SPECTRE_V2_CMD_NONE:
565                 return;
566
567         case SPECTRE_V2_CMD_FORCE:
568         case SPECTRE_V2_CMD_AUTO:
569                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
570                         mode = SPECTRE_V2_IBRS_ENHANCED;
571                         /* Force it so VMEXIT will restore correctly */
572                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
573                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
574                         goto specv2_set_mode;
575                 }
576                 if (IS_ENABLED(CONFIG_RETPOLINE))
577                         goto retpoline_auto;
578                 break;
579         case SPECTRE_V2_CMD_RETPOLINE_AMD:
580                 if (IS_ENABLED(CONFIG_RETPOLINE))
581                         goto retpoline_amd;
582                 break;
583         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
584                 if (IS_ENABLED(CONFIG_RETPOLINE))
585                         goto retpoline_generic;
586                 break;
587         case SPECTRE_V2_CMD_RETPOLINE:
588                 if (IS_ENABLED(CONFIG_RETPOLINE))
589                         goto retpoline_auto;
590                 break;
591         }
592         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
593         return;
594
595 retpoline_auto:
596         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
597             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
598         retpoline_amd:
599                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
600                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
601                         goto retpoline_generic;
602                 }
603                 mode = SPECTRE_V2_RETPOLINE_AMD;
604                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
605                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
606         } else {
607         retpoline_generic:
608                 mode = SPECTRE_V2_RETPOLINE_GENERIC;
609                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
610         }
611
612 specv2_set_mode:
613         spectre_v2_enabled = mode;
614         pr_info("%s\n", spectre_v2_strings[mode]);
615
616         /*
617          * If spectre v2 protection has been enabled, unconditionally fill
618          * RSB during a context switch; this protects against two independent
619          * issues:
620          *
621          *      - RSB underflow (and switch to BTB) on Skylake+
622          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
623          */
624         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
625         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
626
627         /*
628          * Retpoline means the kernel is safe because it has no indirect
629          * branches. Enhanced IBRS protects firmware too, so, enable restricted
630          * speculation around firmware calls only when Enhanced IBRS isn't
631          * supported.
632          *
633          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
634          * the user might select retpoline on the kernel command line and if
635          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
636          * enable IBRS around firmware calls.
637          */
638         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
639                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
640                 pr_info("Enabling Restricted Speculation for firmware calls\n");
641         }
642
643         /* Set up IBPB and STIBP depending on the general spectre V2 command */
644         spectre_v2_user_select_mitigation(cmd);
645 }
646
647 static void update_stibp_msr(void * __unused)
648 {
649         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
650 }
651
652 /* Update x86_spec_ctrl_base in case SMT state changed. */
653 static void update_stibp_strict(void)
654 {
655         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
656
657         if (sched_smt_active())
658                 mask |= SPEC_CTRL_STIBP;
659
660         if (mask == x86_spec_ctrl_base)
661                 return;
662
663         pr_info("Update user space SMT mitigation: STIBP %s\n",
664                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
665         x86_spec_ctrl_base = mask;
666         on_each_cpu(update_stibp_msr, NULL, 1);
667 }
668
669 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
670 static void update_indir_branch_cond(void)
671 {
672         if (sched_smt_active())
673                 static_branch_enable(&switch_to_cond_stibp);
674         else
675                 static_branch_disable(&switch_to_cond_stibp);
676 }
677
678 #undef pr_fmt
679 #define pr_fmt(fmt) fmt
680
681 /* Update the static key controlling the MDS CPU buffer clear in idle */
682 static void update_mds_branch_idle(void)
683 {
684         /*
685          * Enable the idle clearing if SMT is active on CPUs which are
686          * affected only by MSBDS and not any other MDS variant.
687          *
688          * The other variants cannot be mitigated when SMT is enabled, so
689          * clearing the buffers on idle just to prevent the Store Buffer
690          * repartitioning leak would be a window dressing exercise.
691          */
692         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
693                 return;
694
695         if (sched_smt_active())
696                 static_branch_enable(&mds_idle_clear);
697         else
698                 static_branch_disable(&mds_idle_clear);
699 }
700
701 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
702
703 void arch_smt_update(void)
704 {
705         /* Enhanced IBRS implies STIBP. No update required. */
706         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
707                 return;
708
709         mutex_lock(&spec_ctrl_mutex);
710
711         switch (spectre_v2_user) {
712         case SPECTRE_V2_USER_NONE:
713                 break;
714         case SPECTRE_V2_USER_STRICT:
715         case SPECTRE_V2_USER_STRICT_PREFERRED:
716                 update_stibp_strict();
717                 break;
718         case SPECTRE_V2_USER_PRCTL:
719         case SPECTRE_V2_USER_SECCOMP:
720                 update_indir_branch_cond();
721                 break;
722         }
723
724         switch (mds_mitigation) {
725         case MDS_MITIGATION_FULL:
726         case MDS_MITIGATION_VMWERV:
727                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
728                         pr_warn_once(MDS_MSG_SMT);
729                 update_mds_branch_idle();
730                 break;
731         case MDS_MITIGATION_OFF:
732                 break;
733         }
734
735         mutex_unlock(&spec_ctrl_mutex);
736 }
737
738 #undef pr_fmt
739 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
740
741 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
742
743 /* The kernel command line selection */
744 enum ssb_mitigation_cmd {
745         SPEC_STORE_BYPASS_CMD_NONE,
746         SPEC_STORE_BYPASS_CMD_AUTO,
747         SPEC_STORE_BYPASS_CMD_ON,
748         SPEC_STORE_BYPASS_CMD_PRCTL,
749         SPEC_STORE_BYPASS_CMD_SECCOMP,
750 };
751
752 static const char * const ssb_strings[] = {
753         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
754         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
755         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
756         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
757 };
758
759 static const struct {
760         const char *option;
761         enum ssb_mitigation_cmd cmd;
762 } ssb_mitigation_options[]  __initconst = {
763         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
764         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
765         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
766         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
767         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
768 };
769
770 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
771 {
772         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
773         char arg[20];
774         int ret, i;
775
776         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
777             cpu_mitigations_off()) {
778                 return SPEC_STORE_BYPASS_CMD_NONE;
779         } else {
780                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
781                                           arg, sizeof(arg));
782                 if (ret < 0)
783                         return SPEC_STORE_BYPASS_CMD_AUTO;
784
785                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
786                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
787                                 continue;
788
789                         cmd = ssb_mitigation_options[i].cmd;
790                         break;
791                 }
792
793                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
794                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
795                         return SPEC_STORE_BYPASS_CMD_AUTO;
796                 }
797         }
798
799         return cmd;
800 }
801
802 static enum ssb_mitigation __init __ssb_select_mitigation(void)
803 {
804         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
805         enum ssb_mitigation_cmd cmd;
806
807         if (!boot_cpu_has(X86_FEATURE_SSBD))
808                 return mode;
809
810         cmd = ssb_parse_cmdline();
811         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
812             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
813              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
814                 return mode;
815
816         switch (cmd) {
817         case SPEC_STORE_BYPASS_CMD_AUTO:
818         case SPEC_STORE_BYPASS_CMD_SECCOMP:
819                 /*
820                  * Choose prctl+seccomp as the default mode if seccomp is
821                  * enabled.
822                  */
823                 if (IS_ENABLED(CONFIG_SECCOMP))
824                         mode = SPEC_STORE_BYPASS_SECCOMP;
825                 else
826                         mode = SPEC_STORE_BYPASS_PRCTL;
827                 break;
828         case SPEC_STORE_BYPASS_CMD_ON:
829                 mode = SPEC_STORE_BYPASS_DISABLE;
830                 break;
831         case SPEC_STORE_BYPASS_CMD_PRCTL:
832                 mode = SPEC_STORE_BYPASS_PRCTL;
833                 break;
834         case SPEC_STORE_BYPASS_CMD_NONE:
835                 break;
836         }
837
838         /*
839          * We have three CPU feature flags that are in play here:
840          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
841          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
842          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
843          */
844         if (mode == SPEC_STORE_BYPASS_DISABLE) {
845                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
846                 /*
847                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
848                  * use a completely different MSR and bit dependent on family.
849                  */
850                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
851                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
852                         x86_amd_ssb_disable();
853                 } else {
854                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
855                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
856                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
857                 }
858         }
859
860         return mode;
861 }
862
863 static void ssb_select_mitigation(void)
864 {
865         ssb_mode = __ssb_select_mitigation();
866
867         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
868                 pr_info("%s\n", ssb_strings[ssb_mode]);
869 }
870
871 #undef pr_fmt
872 #define pr_fmt(fmt)     "Speculation prctl: " fmt
873
874 static void task_update_spec_tif(struct task_struct *tsk)
875 {
876         /* Force the update of the real TIF bits */
877         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
878
879         /*
880          * Immediately update the speculation control MSRs for the current
881          * task, but for a non-current task delay setting the CPU
882          * mitigation until it is scheduled next.
883          *
884          * This can only happen for SECCOMP mitigation. For PRCTL it's
885          * always the current task.
886          */
887         if (tsk == current)
888                 speculation_ctrl_update_current();
889 }
890
891 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
892 {
893         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
894             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
895                 return -ENXIO;
896
897         switch (ctrl) {
898         case PR_SPEC_ENABLE:
899                 /* If speculation is force disabled, enable is not allowed */
900                 if (task_spec_ssb_force_disable(task))
901                         return -EPERM;
902                 task_clear_spec_ssb_disable(task);
903                 task_clear_spec_ssb_noexec(task);
904                 task_update_spec_tif(task);
905                 break;
906         case PR_SPEC_DISABLE:
907                 task_set_spec_ssb_disable(task);
908                 task_clear_spec_ssb_noexec(task);
909                 task_update_spec_tif(task);
910                 break;
911         case PR_SPEC_FORCE_DISABLE:
912                 task_set_spec_ssb_disable(task);
913                 task_set_spec_ssb_force_disable(task);
914                 task_clear_spec_ssb_noexec(task);
915                 task_update_spec_tif(task);
916                 break;
917         case PR_SPEC_DISABLE_NOEXEC:
918                 if (task_spec_ssb_force_disable(task))
919                         return -EPERM;
920                 task_set_spec_ssb_disable(task);
921                 task_set_spec_ssb_noexec(task);
922                 task_update_spec_tif(task);
923                 break;
924         default:
925                 return -ERANGE;
926         }
927         return 0;
928 }
929
930 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
931 {
932         switch (ctrl) {
933         case PR_SPEC_ENABLE:
934                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
935                         return 0;
936                 /*
937                  * Indirect branch speculation is always disabled in strict
938                  * mode.
939                  */
940                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
941                     spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
942                         return -EPERM;
943                 task_clear_spec_ib_disable(task);
944                 task_update_spec_tif(task);
945                 break;
946         case PR_SPEC_DISABLE:
947         case PR_SPEC_FORCE_DISABLE:
948                 /*
949                  * Indirect branch speculation is always allowed when
950                  * mitigation is force disabled.
951                  */
952                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
953                         return -EPERM;
954                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
955                     spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
956                         return 0;
957                 task_set_spec_ib_disable(task);
958                 if (ctrl == PR_SPEC_FORCE_DISABLE)
959                         task_set_spec_ib_force_disable(task);
960                 task_update_spec_tif(task);
961                 break;
962         default:
963                 return -ERANGE;
964         }
965         return 0;
966 }
967
968 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
969                              unsigned long ctrl)
970 {
971         switch (which) {
972         case PR_SPEC_STORE_BYPASS:
973                 return ssb_prctl_set(task, ctrl);
974         case PR_SPEC_INDIRECT_BRANCH:
975                 return ib_prctl_set(task, ctrl);
976         default:
977                 return -ENODEV;
978         }
979 }
980
981 #ifdef CONFIG_SECCOMP
982 void arch_seccomp_spec_mitigate(struct task_struct *task)
983 {
984         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
985                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
986         if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
987                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
988 }
989 #endif
990
991 static int ssb_prctl_get(struct task_struct *task)
992 {
993         switch (ssb_mode) {
994         case SPEC_STORE_BYPASS_DISABLE:
995                 return PR_SPEC_DISABLE;
996         case SPEC_STORE_BYPASS_SECCOMP:
997         case SPEC_STORE_BYPASS_PRCTL:
998                 if (task_spec_ssb_force_disable(task))
999                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1000                 if (task_spec_ssb_noexec(task))
1001                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1002                 if (task_spec_ssb_disable(task))
1003                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1004                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1005         default:
1006                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1007                         return PR_SPEC_ENABLE;
1008                 return PR_SPEC_NOT_AFFECTED;
1009         }
1010 }
1011
1012 static int ib_prctl_get(struct task_struct *task)
1013 {
1014         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1015                 return PR_SPEC_NOT_AFFECTED;
1016
1017         switch (spectre_v2_user) {
1018         case SPECTRE_V2_USER_NONE:
1019                 return PR_SPEC_ENABLE;
1020         case SPECTRE_V2_USER_PRCTL:
1021         case SPECTRE_V2_USER_SECCOMP:
1022                 if (task_spec_ib_force_disable(task))
1023                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1024                 if (task_spec_ib_disable(task))
1025                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1026                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1027         case SPECTRE_V2_USER_STRICT:
1028         case SPECTRE_V2_USER_STRICT_PREFERRED:
1029                 return PR_SPEC_DISABLE;
1030         default:
1031                 return PR_SPEC_NOT_AFFECTED;
1032         }
1033 }
1034
1035 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1036 {
1037         switch (which) {
1038         case PR_SPEC_STORE_BYPASS:
1039                 return ssb_prctl_get(task);
1040         case PR_SPEC_INDIRECT_BRANCH:
1041                 return ib_prctl_get(task);
1042         default:
1043                 return -ENODEV;
1044         }
1045 }
1046
1047 void x86_spec_ctrl_setup_ap(void)
1048 {
1049         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1050                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1051
1052         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1053                 x86_amd_ssb_disable();
1054 }
1055
1056 #undef pr_fmt
1057 #define pr_fmt(fmt)     "L1TF: " fmt
1058
1059 /* Default mitigation for L1TF-affected CPUs */
1060 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1061 #if IS_ENABLED(CONFIG_KVM_INTEL)
1062 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1063 #endif
1064 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1065 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1066
1067 /*
1068  * These CPUs all support 44bits physical address space internally in the
1069  * cache but CPUID can report a smaller number of physical address bits.
1070  *
1071  * The L1TF mitigation uses the top most address bit for the inversion of
1072  * non present PTEs. When the installed memory reaches into the top most
1073  * address bit due to memory holes, which has been observed on machines
1074  * which report 36bits physical address bits and have 32G RAM installed,
1075  * then the mitigation range check in l1tf_select_mitigation() triggers.
1076  * This is a false positive because the mitigation is still possible due to
1077  * the fact that the cache uses 44bit internally. Use the cache bits
1078  * instead of the reported physical bits and adjust them on the affected
1079  * machines to 44bit if the reported bits are less than 44.
1080  */
1081 static void override_cache_bits(struct cpuinfo_x86 *c)
1082 {
1083         if (c->x86 != 6)
1084                 return;
1085
1086         switch (c->x86_model) {
1087         case INTEL_FAM6_NEHALEM:
1088         case INTEL_FAM6_WESTMERE:
1089         case INTEL_FAM6_SANDYBRIDGE:
1090         case INTEL_FAM6_IVYBRIDGE:
1091         case INTEL_FAM6_HASWELL_CORE:
1092         case INTEL_FAM6_HASWELL_ULT:
1093         case INTEL_FAM6_HASWELL_GT3E:
1094         case INTEL_FAM6_BROADWELL_CORE:
1095         case INTEL_FAM6_BROADWELL_GT3E:
1096         case INTEL_FAM6_SKYLAKE_MOBILE:
1097         case INTEL_FAM6_SKYLAKE_DESKTOP:
1098         case INTEL_FAM6_KABYLAKE_MOBILE:
1099         case INTEL_FAM6_KABYLAKE_DESKTOP:
1100                 if (c->x86_cache_bits < 44)
1101                         c->x86_cache_bits = 44;
1102                 break;
1103         }
1104 }
1105
1106 static void __init l1tf_select_mitigation(void)
1107 {
1108         u64 half_pa;
1109
1110         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1111                 return;
1112
1113         if (cpu_mitigations_off())
1114                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1115         else if (cpu_mitigations_auto_nosmt())
1116                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1117
1118         override_cache_bits(&boot_cpu_data);
1119
1120         switch (l1tf_mitigation) {
1121         case L1TF_MITIGATION_OFF:
1122         case L1TF_MITIGATION_FLUSH_NOWARN:
1123         case L1TF_MITIGATION_FLUSH:
1124                 break;
1125         case L1TF_MITIGATION_FLUSH_NOSMT:
1126         case L1TF_MITIGATION_FULL:
1127                 cpu_smt_disable(false);
1128                 break;
1129         case L1TF_MITIGATION_FULL_FORCE:
1130                 cpu_smt_disable(true);
1131                 break;
1132         }
1133
1134 #if CONFIG_PGTABLE_LEVELS == 2
1135         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1136         return;
1137 #endif
1138
1139         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1140         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1141                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1142                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1143                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1144                                 half_pa);
1145                 pr_info("However, doing so will make a part of your RAM unusable.\n");
1146                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1147                 return;
1148         }
1149
1150         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1151 }
1152
1153 static int __init l1tf_cmdline(char *str)
1154 {
1155         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1156                 return 0;
1157
1158         if (!str)
1159                 return -EINVAL;
1160
1161         if (!strcmp(str, "off"))
1162                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1163         else if (!strcmp(str, "flush,nowarn"))
1164                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1165         else if (!strcmp(str, "flush"))
1166                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1167         else if (!strcmp(str, "flush,nosmt"))
1168                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1169         else if (!strcmp(str, "full"))
1170                 l1tf_mitigation = L1TF_MITIGATION_FULL;
1171         else if (!strcmp(str, "full,force"))
1172                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1173
1174         return 0;
1175 }
1176 early_param("l1tf", l1tf_cmdline);
1177
1178 #undef pr_fmt
1179 #define pr_fmt(fmt) fmt
1180
1181 #ifdef CONFIG_SYSFS
1182
1183 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1184
1185 #if IS_ENABLED(CONFIG_KVM_INTEL)
1186 static const char * const l1tf_vmx_states[] = {
1187         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
1188         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
1189         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
1190         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
1191         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
1192         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
1193 };
1194
1195 static ssize_t l1tf_show_state(char *buf)
1196 {
1197         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1198                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1199
1200         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1201             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1202              sched_smt_active())) {
1203                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1204                                l1tf_vmx_states[l1tf_vmx_mitigation]);
1205         }
1206
1207         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1208                        l1tf_vmx_states[l1tf_vmx_mitigation],
1209                        sched_smt_active() ? "vulnerable" : "disabled");
1210 }
1211 #else
1212 static ssize_t l1tf_show_state(char *buf)
1213 {
1214         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1215 }
1216 #endif
1217
1218 static ssize_t mds_show_state(char *buf)
1219 {
1220         if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1221                 return sprintf(buf, "%s; SMT Host state unknown\n",
1222                                mds_strings[mds_mitigation]);
1223         }
1224
1225         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1226                 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1227                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1228                                 sched_smt_active() ? "mitigated" : "disabled"));
1229         }
1230
1231         return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1232                        sched_smt_active() ? "vulnerable" : "disabled");
1233 }
1234
1235 static char *stibp_state(void)
1236 {
1237         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1238                 return "";
1239
1240         switch (spectre_v2_user) {
1241         case SPECTRE_V2_USER_NONE:
1242                 return ", STIBP: disabled";
1243         case SPECTRE_V2_USER_STRICT:
1244                 return ", STIBP: forced";
1245         case SPECTRE_V2_USER_STRICT_PREFERRED:
1246                 return ", STIBP: always-on";
1247         case SPECTRE_V2_USER_PRCTL:
1248         case SPECTRE_V2_USER_SECCOMP:
1249                 if (static_key_enabled(&switch_to_cond_stibp))
1250                         return ", STIBP: conditional";
1251         }
1252         return "";
1253 }
1254
1255 static char *ibpb_state(void)
1256 {
1257         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1258                 if (static_key_enabled(&switch_mm_always_ibpb))
1259                         return ", IBPB: always-on";
1260                 if (static_key_enabled(&switch_mm_cond_ibpb))
1261                         return ", IBPB: conditional";
1262                 return ", IBPB: disabled";
1263         }
1264         return "";
1265 }
1266
1267 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1268                                char *buf, unsigned int bug)
1269 {
1270         if (!boot_cpu_has_bug(bug))
1271                 return sprintf(buf, "Not affected\n");
1272
1273         switch (bug) {
1274         case X86_BUG_CPU_MELTDOWN:
1275                 if (boot_cpu_has(X86_FEATURE_PTI))
1276                         return sprintf(buf, "Mitigation: PTI\n");
1277
1278                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1279                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1280
1281                 break;
1282
1283         case X86_BUG_SPECTRE_V1:
1284                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1285
1286         case X86_BUG_SPECTRE_V2:
1287                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1288                                ibpb_state(),
1289                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1290                                stibp_state(),
1291                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1292                                spectre_v2_module_string());
1293
1294         case X86_BUG_SPEC_STORE_BYPASS:
1295                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1296
1297         case X86_BUG_L1TF:
1298                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1299                         return l1tf_show_state(buf);
1300                 break;
1301
1302         case X86_BUG_MDS:
1303                 return mds_show_state(buf);
1304
1305         default:
1306                 break;
1307         }
1308
1309         return sprintf(buf, "Vulnerable\n");
1310 }
1311
1312 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1313 {
1314         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1315 }
1316
1317 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1318 {
1319         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1320 }
1321
1322 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1323 {
1324         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1325 }
1326
1327 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1328 {
1329         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1330 }
1331
1332 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1333 {
1334         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1335 }
1336
1337 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1338 {
1339         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1340 }
1341 #endif