4b202e460e6d1b620c962468368eef4a102e45e0
[linux-2.6-microblaze.git] / arch / arm64 / kernel / proton-pack.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26
27 #include <asm/insn.h>
28 #include <asm/spectre.h>
29 #include <asm/traps.h>
30
31 /*
32  * We try to ensure that the mitigation state can never change as the result of
33  * onlining a late CPU.
34  */
35 static void update_mitigation_state(enum mitigation_state *oldp,
36                                     enum mitigation_state new)
37 {
38         enum mitigation_state state;
39
40         do {
41                 state = READ_ONCE(*oldp);
42                 if (new <= state)
43                         break;
44
45                 /* Userspace almost certainly can't deal with this. */
46                 if (WARN_ON(system_capabilities_finalized()))
47                         break;
48         } while (cmpxchg_relaxed(oldp, state, new) != state);
49 }
50
51 /*
52  * Spectre v1.
53  *
54  * The kernel can't protect userspace for this one: it's each person for
55  * themselves. Advertise what we're doing and be done with it.
56  */
57 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
58                             char *buf)
59 {
60         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
61 }
62
63 /*
64  * Spectre v2.
65  *
66  * This one sucks. A CPU is either:
67  *
68  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
69  * - Mitigated in hardware and listed in our "safe list".
70  * - Mitigated in software by firmware.
71  * - Mitigated in software by a CPU-specific dance in the kernel and a
72  *   firmware call at EL2.
73  * - Vulnerable.
74  *
75  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
76  * different camps.
77  */
78 static enum mitigation_state spectre_v2_state;
79
80 static bool __read_mostly __nospectre_v2;
81 static int __init parse_spectre_v2_param(char *str)
82 {
83         __nospectre_v2 = true;
84         return 0;
85 }
86 early_param("nospectre_v2", parse_spectre_v2_param);
87
88 static bool spectre_v2_mitigations_off(void)
89 {
90         bool ret = __nospectre_v2 || cpu_mitigations_off();
91
92         if (ret)
93                 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
94
95         return ret;
96 }
97
98 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
99                             char *buf)
100 {
101         switch (spectre_v2_state) {
102         case SPECTRE_UNAFFECTED:
103                 return sprintf(buf, "Not affected\n");
104         case SPECTRE_MITIGATED:
105                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
106         case SPECTRE_VULNERABLE:
107                 fallthrough;
108         default:
109                 return sprintf(buf, "Vulnerable\n");
110         }
111 }
112
113 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
114 {
115         u64 pfr0;
116         static const struct midr_range spectre_v2_safe_list[] = {
117                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
118                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
119                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
120                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
121                 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
122                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
123                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
124                 { /* sentinel */ }
125         };
126
127         /* If the CPU has CSV2 set, we're safe */
128         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
129         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
130                 return SPECTRE_UNAFFECTED;
131
132         /* Alternatively, we have a list of unaffected CPUs */
133         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
134                 return SPECTRE_UNAFFECTED;
135
136         return SPECTRE_VULNERABLE;
137 }
138
139 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
140 {
141         int ret;
142         struct arm_smccc_res res;
143
144         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
145                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
146
147         ret = res.a0;
148         switch (ret) {
149         case SMCCC_RET_SUCCESS:
150                 return SPECTRE_MITIGATED;
151         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
152                 return SPECTRE_UNAFFECTED;
153         default:
154                 fallthrough;
155         case SMCCC_RET_NOT_SUPPORTED:
156                 return SPECTRE_VULNERABLE;
157         }
158 }
159
160 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
161 {
162         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
163
164         if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
165                 return false;
166
167         if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
168                 return false;
169
170         return true;
171 }
172
173 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
174
175 enum mitigation_state arm64_get_spectre_v2_state(void)
176 {
177         return spectre_v2_state;
178 }
179
180 #ifdef CONFIG_KVM
181 #include <asm/cacheflush.h>
182 #include <asm/kvm_asm.h>
183
184 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
185
186 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
187                                 const char *hyp_vecs_end)
188 {
189         void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
190         int i;
191
192         for (i = 0; i < SZ_2K; i += 0x80)
193                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
194
195         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
196 }
197
198 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
199 {
200         static DEFINE_RAW_SPINLOCK(bp_lock);
201         int cpu, slot = -1;
202         const char *hyp_vecs_start = __smccc_workaround_1_smc;
203         const char *hyp_vecs_end = __smccc_workaround_1_smc +
204                                    __SMCCC_WORKAROUND_1_SMC_SZ;
205
206         /*
207          * Vinz Clortho takes the hyp_vecs start/end "keys" at
208          * the door when we're a guest. Skip the hyp-vectors work.
209          */
210         if (!is_hyp_mode_available()) {
211                 __this_cpu_write(bp_hardening_data.fn, fn);
212                 return;
213         }
214
215         raw_spin_lock(&bp_lock);
216         for_each_possible_cpu(cpu) {
217                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
218                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
219                         break;
220                 }
221         }
222
223         if (slot == -1) {
224                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
225                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
226                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
227         }
228
229         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
230         __this_cpu_write(bp_hardening_data.fn, fn);
231         raw_spin_unlock(&bp_lock);
232 }
233 #else
234 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
235 {
236         __this_cpu_write(bp_hardening_data.fn, fn);
237 }
238 #endif  /* CONFIG_KVM */
239
240 static void call_smc_arch_workaround_1(void)
241 {
242         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
243 }
244
245 static void call_hvc_arch_workaround_1(void)
246 {
247         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
248 }
249
250 static void qcom_link_stack_sanitisation(void)
251 {
252         u64 tmp;
253
254         asm volatile("mov       %0, x30         \n"
255                      ".rept     16              \n"
256                      "bl        . + 4           \n"
257                      ".endr                     \n"
258                      "mov       x30, %0         \n"
259                      : "=&r" (tmp));
260 }
261
262 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
263 {
264         u32 midr = read_cpuid_id();
265         if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
266             ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
267                 return NULL;
268
269         return qcom_link_stack_sanitisation;
270 }
271
272 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
273 {
274         bp_hardening_cb_t cb;
275         enum mitigation_state state;
276
277         state = spectre_v2_get_cpu_fw_mitigation_state();
278         if (state != SPECTRE_MITIGATED)
279                 return state;
280
281         if (spectre_v2_mitigations_off())
282                 return SPECTRE_VULNERABLE;
283
284         switch (arm_smccc_1_1_get_conduit()) {
285         case SMCCC_CONDUIT_HVC:
286                 cb = call_hvc_arch_workaround_1;
287                 break;
288
289         case SMCCC_CONDUIT_SMC:
290                 cb = call_smc_arch_workaround_1;
291                 break;
292
293         default:
294                 return SPECTRE_VULNERABLE;
295         }
296
297         /*
298          * Prefer a CPU-specific workaround if it exists. Note that we
299          * still rely on firmware for the mitigation at EL2.
300          */
301         cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
302         install_bp_hardening_cb(cb);
303         return SPECTRE_MITIGATED;
304 }
305
306 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
307 {
308         enum mitigation_state state;
309
310         WARN_ON(preemptible());
311
312         state = spectre_v2_get_cpu_hw_mitigation_state();
313         if (state == SPECTRE_VULNERABLE)
314                 state = spectre_v2_enable_fw_mitigation();
315
316         update_mitigation_state(&spectre_v2_state, state);
317 }
318
319 /*
320  * Spectre v4.
321  *
322  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
323  * either:
324  *
325  * - Mitigated in hardware and listed in our "safe list".
326  * - Mitigated in hardware via PSTATE.SSBS.
327  * - Mitigated in software by firmware (sometimes referred to as SSBD).
328  *
329  * Wait, that doesn't sound so bad, does it? Keep reading...
330  *
331  * A major source of headaches is that the software mitigation is enabled both
332  * on a per-task basis, but can also be forced on for the kernel, necessitating
333  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
334  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
335  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
336  * so you can have systems that have both firmware and SSBS mitigations. This
337  * means we actually have to reject late onlining of CPUs with mitigations if
338  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
339  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
340  *
341  * The only good part is that if the firmware mitigation is present, then it is
342  * present for all CPUs, meaning we don't have to worry about late onlining of a
343  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
344  *
345  * Give me a VAX-11/780 any day of the week...
346  */
347 static enum mitigation_state spectre_v4_state;
348
349 /* This is the per-cpu state tracking whether we need to talk to firmware */
350 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
351
352 enum spectre_v4_policy {
353         SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
354         SPECTRE_V4_POLICY_MITIGATION_ENABLED,
355         SPECTRE_V4_POLICY_MITIGATION_DISABLED,
356 };
357
358 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
359
360 static const struct spectre_v4_param {
361         const char              *str;
362         enum spectre_v4_policy  policy;
363 } spectre_v4_params[] = {
364         { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
365         { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
366         { "kernel",     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
367 };
368 static int __init parse_spectre_v4_param(char *str)
369 {
370         int i;
371
372         if (!str || !str[0])
373                 return -EINVAL;
374
375         for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
376                 const struct spectre_v4_param *param = &spectre_v4_params[i];
377
378                 if (strncmp(str, param->str, strlen(param->str)))
379                         continue;
380
381                 __spectre_v4_policy = param->policy;
382                 return 0;
383         }
384
385         return -EINVAL;
386 }
387 early_param("ssbd", parse_spectre_v4_param);
388
389 /*
390  * Because this was all written in a rush by people working in different silos,
391  * we've ended up with multiple command line options to control the same thing.
392  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
393  * with contradictory parameters. The mitigation is always either "off",
394  * "dynamic" or "on".
395  */
396 static bool spectre_v4_mitigations_off(void)
397 {
398         bool ret = cpu_mitigations_off() ||
399                    __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
400
401         if (ret)
402                 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
403
404         return ret;
405 }
406
407 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
408 static bool spectre_v4_mitigations_dynamic(void)
409 {
410         return !spectre_v4_mitigations_off() &&
411                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
412 }
413
414 static bool spectre_v4_mitigations_on(void)
415 {
416         return !spectre_v4_mitigations_off() &&
417                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
418 }
419
420 ssize_t cpu_show_spec_store_bypass(struct device *dev,
421                                    struct device_attribute *attr, char *buf)
422 {
423         switch (spectre_v4_state) {
424         case SPECTRE_UNAFFECTED:
425                 return sprintf(buf, "Not affected\n");
426         case SPECTRE_MITIGATED:
427                 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
428         case SPECTRE_VULNERABLE:
429                 fallthrough;
430         default:
431                 return sprintf(buf, "Vulnerable\n");
432         }
433 }
434
435 enum mitigation_state arm64_get_spectre_v4_state(void)
436 {
437         return spectre_v4_state;
438 }
439
440 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
441 {
442         static const struct midr_range spectre_v4_safe_list[] = {
443                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
444                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
445                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
446                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
447                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
448                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
449                 { /* sentinel */ },
450         };
451
452         if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
453                 return SPECTRE_UNAFFECTED;
454
455         /* CPU features are detected first */
456         if (this_cpu_has_cap(ARM64_SSBS))
457                 return SPECTRE_MITIGATED;
458
459         return SPECTRE_VULNERABLE;
460 }
461
462 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
463 {
464         int ret;
465         struct arm_smccc_res res;
466
467         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
468                              ARM_SMCCC_ARCH_WORKAROUND_2, &res);
469
470         ret = res.a0;
471         switch (ret) {
472         case SMCCC_RET_SUCCESS:
473                 return SPECTRE_MITIGATED;
474         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
475                 fallthrough;
476         case SMCCC_RET_NOT_REQUIRED:
477                 return SPECTRE_UNAFFECTED;
478         default:
479                 fallthrough;
480         case SMCCC_RET_NOT_SUPPORTED:
481                 return SPECTRE_VULNERABLE;
482         }
483 }
484
485 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
486 {
487         enum mitigation_state state;
488
489         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
490
491         state = spectre_v4_get_cpu_hw_mitigation_state();
492         if (state == SPECTRE_VULNERABLE)
493                 state = spectre_v4_get_cpu_fw_mitigation_state();
494
495         return state != SPECTRE_UNAFFECTED;
496 }
497
498 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
499 {
500         if (user_mode(regs))
501                 return 1;
502
503         if (instr & BIT(PSTATE_Imm_shift))
504                 regs->pstate |= PSR_SSBS_BIT;
505         else
506                 regs->pstate &= ~PSR_SSBS_BIT;
507
508         arm64_skip_faulting_instruction(regs, 4);
509         return 0;
510 }
511
512 static struct undef_hook ssbs_emulation_hook = {
513         .instr_mask     = ~(1U << PSTATE_Imm_shift),
514         .instr_val      = 0xd500401f | PSTATE_SSBS,
515         .fn             = ssbs_emulation_handler,
516 };
517
518 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
519 {
520         static bool undef_hook_registered = false;
521         static DEFINE_RAW_SPINLOCK(hook_lock);
522         enum mitigation_state state;
523
524         /*
525          * If the system is mitigated but this CPU doesn't have SSBS, then
526          * we must be on the safelist and there's nothing more to do.
527          */
528         state = spectre_v4_get_cpu_hw_mitigation_state();
529         if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
530                 return state;
531
532         raw_spin_lock(&hook_lock);
533         if (!undef_hook_registered) {
534                 register_undef_hook(&ssbs_emulation_hook);
535                 undef_hook_registered = true;
536         }
537         raw_spin_unlock(&hook_lock);
538
539         if (spectre_v4_mitigations_off()) {
540                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
541                 asm volatile(SET_PSTATE_SSBS(1));
542                 return SPECTRE_VULNERABLE;
543         }
544
545         /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
546         asm volatile(SET_PSTATE_SSBS(0));
547         return SPECTRE_MITIGATED;
548 }
549
550 /*
551  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
552  * we fallthrough and check whether firmware needs to be called on this CPU.
553  */
554 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
555                                                   __le32 *origptr,
556                                                   __le32 *updptr, int nr_inst)
557 {
558         BUG_ON(nr_inst != 1); /* Branch -> NOP */
559
560         if (spectre_v4_mitigations_off())
561                 return;
562
563         if (cpus_have_final_cap(ARM64_SSBS))
564                 return;
565
566         if (spectre_v4_mitigations_dynamic())
567                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
568 }
569
570 /*
571  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
572  * to call into firmware to adjust the mitigation state.
573  */
574 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
575                                                    __le32 *origptr,
576                                                    __le32 *updptr, int nr_inst)
577 {
578         u32 insn;
579
580         BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
581
582         switch (arm_smccc_1_1_get_conduit()) {
583         case SMCCC_CONDUIT_HVC:
584                 insn = aarch64_insn_get_hvc_value();
585                 break;
586         case SMCCC_CONDUIT_SMC:
587                 insn = aarch64_insn_get_smc_value();
588                 break;
589         default:
590                 return;
591         }
592
593         *updptr = cpu_to_le32(insn);
594 }
595
596 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
597 {
598         enum mitigation_state state;
599
600         state = spectre_v4_get_cpu_fw_mitigation_state();
601         if (state != SPECTRE_MITIGATED)
602                 return state;
603
604         if (spectre_v4_mitigations_off()) {
605                 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
606                 return SPECTRE_VULNERABLE;
607         }
608
609         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
610
611         if (spectre_v4_mitigations_dynamic())
612                 __this_cpu_write(arm64_ssbd_callback_required, 1);
613
614         return SPECTRE_MITIGATED;
615 }
616
617 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
618 {
619         enum mitigation_state state;
620
621         WARN_ON(preemptible());
622
623         state = spectre_v4_enable_hw_mitigation();
624         if (state == SPECTRE_VULNERABLE)
625                 state = spectre_v4_enable_fw_mitigation();
626
627         update_mitigation_state(&spectre_v4_state, state);
628 }
629
630 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
631 {
632         u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
633
634         if (state)
635                 regs->pstate |= bit;
636         else
637                 regs->pstate &= ~bit;
638 }
639
640 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
641 {
642         struct pt_regs *regs = task_pt_regs(tsk);
643         bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
644
645         if (spectre_v4_mitigations_off())
646                 ssbs = true;
647         else if (spectre_v4_mitigations_dynamic() && !kthread)
648                 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
649
650         __update_pstate_ssbs(regs, ssbs);
651 }
652
653 /*
654  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
655  * This is interesting because the "speculation disabled" behaviour can be
656  * configured so that it is preserved across exec(), which means that the
657  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
658  * from userspace.
659  */
660 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
661 {
662         task_clear_spec_ssb_noexec(task);
663         task_set_spec_ssb_disable(task);
664         set_tsk_thread_flag(task, TIF_SSBD);
665 }
666
667 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
668 {
669         task_clear_spec_ssb_noexec(task);
670         task_clear_spec_ssb_disable(task);
671         clear_tsk_thread_flag(task, TIF_SSBD);
672 }
673
674 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
675 {
676         switch (ctrl) {
677         case PR_SPEC_ENABLE:
678                 /* Enable speculation: disable mitigation */
679                 /*
680                  * Force disabled speculation prevents it from being
681                  * re-enabled.
682                  */
683                 if (task_spec_ssb_force_disable(task))
684                         return -EPERM;
685
686                 /*
687                  * If the mitigation is forced on, then speculation is forced
688                  * off and we again prevent it from being re-enabled.
689                  */
690                 if (spectre_v4_mitigations_on())
691                         return -EPERM;
692
693                 ssbd_prctl_disable_mitigation(task);
694                 break;
695         case PR_SPEC_FORCE_DISABLE:
696                 /* Force disable speculation: force enable mitigation */
697                 /*
698                  * If the mitigation is forced off, then speculation is forced
699                  * on and we prevent it from being disabled.
700                  */
701                 if (spectre_v4_mitigations_off())
702                         return -EPERM;
703
704                 task_set_spec_ssb_force_disable(task);
705                 fallthrough;
706         case PR_SPEC_DISABLE:
707                 /* Disable speculation: enable mitigation */
708                 /* Same as PR_SPEC_FORCE_DISABLE */
709                 if (spectre_v4_mitigations_off())
710                         return -EPERM;
711
712                 ssbd_prctl_enable_mitigation(task);
713                 break;
714         case PR_SPEC_DISABLE_NOEXEC:
715                 /* Disable speculation until execve(): enable mitigation */
716                 /*
717                  * If the mitigation state is forced one way or the other, then
718                  * we must fail now before we try to toggle it on execve().
719                  */
720                 if (task_spec_ssb_force_disable(task) ||
721                     spectre_v4_mitigations_off() ||
722                     spectre_v4_mitigations_on()) {
723                         return -EPERM;
724                 }
725
726                 ssbd_prctl_enable_mitigation(task);
727                 task_set_spec_ssb_noexec(task);
728                 break;
729         default:
730                 return -ERANGE;
731         }
732
733         spectre_v4_enable_task_mitigation(task);
734         return 0;
735 }
736
737 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
738                              unsigned long ctrl)
739 {
740         switch (which) {
741         case PR_SPEC_STORE_BYPASS:
742                 return ssbd_prctl_set(task, ctrl);
743         default:
744                 return -ENODEV;
745         }
746 }
747
748 static int ssbd_prctl_get(struct task_struct *task)
749 {
750         switch (spectre_v4_state) {
751         case SPECTRE_UNAFFECTED:
752                 return PR_SPEC_NOT_AFFECTED;
753         case SPECTRE_MITIGATED:
754                 if (spectre_v4_mitigations_on())
755                         return PR_SPEC_NOT_AFFECTED;
756
757                 if (spectre_v4_mitigations_dynamic())
758                         break;
759
760                 /* Mitigations are disabled, so we're vulnerable. */
761                 fallthrough;
762         case SPECTRE_VULNERABLE:
763                 fallthrough;
764         default:
765                 return PR_SPEC_ENABLE;
766         }
767
768         /* Check the mitigation state for this task */
769         if (task_spec_ssb_force_disable(task))
770                 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
771
772         if (task_spec_ssb_noexec(task))
773                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
774
775         if (task_spec_ssb_disable(task))
776                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
777
778         return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
779 }
780
781 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
782 {
783         switch (which) {
784         case PR_SPEC_STORE_BYPASS:
785                 return ssbd_prctl_get(task);
786         default:
787                 return -ENODEV;
788         }
789 }