1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
15 * "If there's something strange in your neighbourhood, who you gonna call?"
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
28 #include <asm/spectre.h>
29 #include <asm/traps.h>
32 * We try to ensure that the mitigation state can never change as the result of
33 * onlining a late CPU.
35 static void update_mitigation_state(enum mitigation_state *oldp,
36 enum mitigation_state new)
38 enum mitigation_state state;
41 state = READ_ONCE(*oldp);
45 /* Userspace almost certainly can't deal with this. */
46 if (WARN_ON(system_capabilities_finalized()))
48 } while (cmpxchg_relaxed(oldp, state, new) != state);
54 * The kernel can't protect userspace for this one: it's each person for
55 * themselves. Advertise what we're doing and be done with it.
57 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
60 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
66 * This one sucks. A CPU is either:
68 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
69 * - Mitigated in hardware and listed in our "safe list".
70 * - Mitigated in software by firmware.
71 * - Mitigated in software by a CPU-specific dance in the kernel and a
72 * firmware call at EL2.
75 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
78 static enum mitigation_state spectre_v2_state;
80 static bool __read_mostly __nospectre_v2;
81 static int __init parse_spectre_v2_param(char *str)
83 __nospectre_v2 = true;
86 early_param("nospectre_v2", parse_spectre_v2_param);
88 static bool spectre_v2_mitigations_off(void)
90 bool ret = __nospectre_v2 || cpu_mitigations_off();
93 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
98 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
101 switch (spectre_v2_state) {
102 case SPECTRE_UNAFFECTED:
103 return sprintf(buf, "Not affected\n");
104 case SPECTRE_MITIGATED:
105 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
106 case SPECTRE_VULNERABLE:
109 return sprintf(buf, "Vulnerable\n");
113 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
116 static const struct midr_range spectre_v2_safe_list[] = {
117 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
118 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
119 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
120 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
121 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
122 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
123 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
127 /* If the CPU has CSV2 set, we're safe */
128 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
129 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
130 return SPECTRE_UNAFFECTED;
132 /* Alternatively, we have a list of unaffected CPUs */
133 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
134 return SPECTRE_UNAFFECTED;
136 return SPECTRE_VULNERABLE;
139 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
142 struct arm_smccc_res res;
144 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
145 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
149 case SMCCC_RET_SUCCESS:
150 return SPECTRE_MITIGATED;
151 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
152 return SPECTRE_UNAFFECTED;
155 case SMCCC_RET_NOT_SUPPORTED:
156 return SPECTRE_VULNERABLE;
160 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
162 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
164 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
167 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
173 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
175 enum mitigation_state arm64_get_spectre_v2_state(void)
177 return spectre_v2_state;
181 #include <asm/cacheflush.h>
182 #include <asm/kvm_asm.h>
184 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
186 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
187 const char *hyp_vecs_end)
189 void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
192 for (i = 0; i < SZ_2K; i += 0x80)
193 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
195 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
198 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
200 static DEFINE_RAW_SPINLOCK(bp_lock);
202 const char *hyp_vecs_start = __smccc_workaround_1_smc;
203 const char *hyp_vecs_end = __smccc_workaround_1_smc +
204 __SMCCC_WORKAROUND_1_SMC_SZ;
207 * Vinz Clortho takes the hyp_vecs start/end "keys" at
208 * the door when we're a guest. Skip the hyp-vectors work.
210 if (!is_hyp_mode_available()) {
211 __this_cpu_write(bp_hardening_data.fn, fn);
215 raw_spin_lock(&bp_lock);
216 for_each_possible_cpu(cpu) {
217 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
218 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
224 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
225 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
226 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
229 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
230 __this_cpu_write(bp_hardening_data.fn, fn);
231 raw_spin_unlock(&bp_lock);
234 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
236 __this_cpu_write(bp_hardening_data.fn, fn);
238 #endif /* CONFIG_KVM */
240 static void call_smc_arch_workaround_1(void)
242 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
245 static void call_hvc_arch_workaround_1(void)
247 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
250 static void qcom_link_stack_sanitisation(void)
254 asm volatile("mov %0, x30 \n"
262 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
264 u32 midr = read_cpuid_id();
265 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
266 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
269 return qcom_link_stack_sanitisation;
272 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
274 bp_hardening_cb_t cb;
275 enum mitigation_state state;
277 state = spectre_v2_get_cpu_fw_mitigation_state();
278 if (state != SPECTRE_MITIGATED)
281 if (spectre_v2_mitigations_off())
282 return SPECTRE_VULNERABLE;
284 switch (arm_smccc_1_1_get_conduit()) {
285 case SMCCC_CONDUIT_HVC:
286 cb = call_hvc_arch_workaround_1;
289 case SMCCC_CONDUIT_SMC:
290 cb = call_smc_arch_workaround_1;
294 return SPECTRE_VULNERABLE;
298 * Prefer a CPU-specific workaround if it exists. Note that we
299 * still rely on firmware for the mitigation at EL2.
301 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
302 install_bp_hardening_cb(cb);
303 return SPECTRE_MITIGATED;
306 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
308 enum mitigation_state state;
310 WARN_ON(preemptible());
312 state = spectre_v2_get_cpu_hw_mitigation_state();
313 if (state == SPECTRE_VULNERABLE)
314 state = spectre_v2_enable_fw_mitigation();
316 update_mitigation_state(&spectre_v2_state, state);
322 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
325 * - Mitigated in hardware and listed in our "safe list".
326 * - Mitigated in hardware via PSTATE.SSBS.
327 * - Mitigated in software by firmware (sometimes referred to as SSBD).
329 * Wait, that doesn't sound so bad, does it? Keep reading...
331 * A major source of headaches is that the software mitigation is enabled both
332 * on a per-task basis, but can also be forced on for the kernel, necessitating
333 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
334 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
335 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
336 * so you can have systems that have both firmware and SSBS mitigations. This
337 * means we actually have to reject late onlining of CPUs with mitigations if
338 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
339 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
341 * The only good part is that if the firmware mitigation is present, then it is
342 * present for all CPUs, meaning we don't have to worry about late onlining of a
343 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
345 * Give me a VAX-11/780 any day of the week...
347 static enum mitigation_state spectre_v4_state;
349 /* This is the per-cpu state tracking whether we need to talk to firmware */
350 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
352 enum spectre_v4_policy {
353 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
354 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
355 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
358 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
360 static const struct spectre_v4_param {
362 enum spectre_v4_policy policy;
363 } spectre_v4_params[] = {
364 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
365 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
366 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
368 static int __init parse_spectre_v4_param(char *str)
375 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
376 const struct spectre_v4_param *param = &spectre_v4_params[i];
378 if (strncmp(str, param->str, strlen(param->str)))
381 __spectre_v4_policy = param->policy;
387 early_param("ssbd", parse_spectre_v4_param);
390 * Because this was all written in a rush by people working in different silos,
391 * we've ended up with multiple command line options to control the same thing.
392 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
393 * with contradictory parameters. The mitigation is always either "off",
396 static bool spectre_v4_mitigations_off(void)
398 bool ret = cpu_mitigations_off() ||
399 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
402 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
407 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
408 static bool spectre_v4_mitigations_dynamic(void)
410 return !spectre_v4_mitigations_off() &&
411 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
414 static bool spectre_v4_mitigations_on(void)
416 return !spectre_v4_mitigations_off() &&
417 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
420 ssize_t cpu_show_spec_store_bypass(struct device *dev,
421 struct device_attribute *attr, char *buf)
423 switch (spectre_v4_state) {
424 case SPECTRE_UNAFFECTED:
425 return sprintf(buf, "Not affected\n");
426 case SPECTRE_MITIGATED:
427 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
428 case SPECTRE_VULNERABLE:
431 return sprintf(buf, "Vulnerable\n");
435 enum mitigation_state arm64_get_spectre_v4_state(void)
437 return spectre_v4_state;
440 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
442 static const struct midr_range spectre_v4_safe_list[] = {
443 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
444 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
445 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
446 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
447 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
448 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
452 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
453 return SPECTRE_UNAFFECTED;
455 /* CPU features are detected first */
456 if (this_cpu_has_cap(ARM64_SSBS))
457 return SPECTRE_MITIGATED;
459 return SPECTRE_VULNERABLE;
462 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
465 struct arm_smccc_res res;
467 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
468 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
472 case SMCCC_RET_SUCCESS:
473 return SPECTRE_MITIGATED;
474 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
476 case SMCCC_RET_NOT_REQUIRED:
477 return SPECTRE_UNAFFECTED;
480 case SMCCC_RET_NOT_SUPPORTED:
481 return SPECTRE_VULNERABLE;
485 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
487 enum mitigation_state state;
489 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
491 state = spectre_v4_get_cpu_hw_mitigation_state();
492 if (state == SPECTRE_VULNERABLE)
493 state = spectre_v4_get_cpu_fw_mitigation_state();
495 return state != SPECTRE_UNAFFECTED;
498 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
503 if (instr & BIT(PSTATE_Imm_shift))
504 regs->pstate |= PSR_SSBS_BIT;
506 regs->pstate &= ~PSR_SSBS_BIT;
508 arm64_skip_faulting_instruction(regs, 4);
512 static struct undef_hook ssbs_emulation_hook = {
513 .instr_mask = ~(1U << PSTATE_Imm_shift),
514 .instr_val = 0xd500401f | PSTATE_SSBS,
515 .fn = ssbs_emulation_handler,
518 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
520 static bool undef_hook_registered = false;
521 static DEFINE_RAW_SPINLOCK(hook_lock);
522 enum mitigation_state state;
525 * If the system is mitigated but this CPU doesn't have SSBS, then
526 * we must be on the safelist and there's nothing more to do.
528 state = spectre_v4_get_cpu_hw_mitigation_state();
529 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
532 raw_spin_lock(&hook_lock);
533 if (!undef_hook_registered) {
534 register_undef_hook(&ssbs_emulation_hook);
535 undef_hook_registered = true;
537 raw_spin_unlock(&hook_lock);
539 if (spectre_v4_mitigations_off()) {
540 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
541 asm volatile(SET_PSTATE_SSBS(1));
542 return SPECTRE_VULNERABLE;
545 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
546 asm volatile(SET_PSTATE_SSBS(0));
547 return SPECTRE_MITIGATED;
551 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
552 * we fallthrough and check whether firmware needs to be called on this CPU.
554 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
556 __le32 *updptr, int nr_inst)
558 BUG_ON(nr_inst != 1); /* Branch -> NOP */
560 if (spectre_v4_mitigations_off())
563 if (cpus_have_final_cap(ARM64_SSBS))
566 if (spectre_v4_mitigations_dynamic())
567 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
571 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
572 * to call into firmware to adjust the mitigation state.
574 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
576 __le32 *updptr, int nr_inst)
580 BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
582 switch (arm_smccc_1_1_get_conduit()) {
583 case SMCCC_CONDUIT_HVC:
584 insn = aarch64_insn_get_hvc_value();
586 case SMCCC_CONDUIT_SMC:
587 insn = aarch64_insn_get_smc_value();
593 *updptr = cpu_to_le32(insn);
596 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
598 enum mitigation_state state;
600 state = spectre_v4_get_cpu_fw_mitigation_state();
601 if (state != SPECTRE_MITIGATED)
604 if (spectre_v4_mitigations_off()) {
605 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
606 return SPECTRE_VULNERABLE;
609 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
611 if (spectre_v4_mitigations_dynamic())
612 __this_cpu_write(arm64_ssbd_callback_required, 1);
614 return SPECTRE_MITIGATED;
617 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
619 enum mitigation_state state;
621 WARN_ON(preemptible());
623 state = spectre_v4_enable_hw_mitigation();
624 if (state == SPECTRE_VULNERABLE)
625 state = spectre_v4_enable_fw_mitigation();
627 update_mitigation_state(&spectre_v4_state, state);
630 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
632 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
637 regs->pstate &= ~bit;
640 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
642 struct pt_regs *regs = task_pt_regs(tsk);
643 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
645 if (spectre_v4_mitigations_off())
647 else if (spectre_v4_mitigations_dynamic() && !kthread)
648 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
650 __update_pstate_ssbs(regs, ssbs);
654 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
655 * This is interesting because the "speculation disabled" behaviour can be
656 * configured so that it is preserved across exec(), which means that the
657 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
660 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
662 task_clear_spec_ssb_noexec(task);
663 task_set_spec_ssb_disable(task);
664 set_tsk_thread_flag(task, TIF_SSBD);
667 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
669 task_clear_spec_ssb_noexec(task);
670 task_clear_spec_ssb_disable(task);
671 clear_tsk_thread_flag(task, TIF_SSBD);
674 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
678 /* Enable speculation: disable mitigation */
680 * Force disabled speculation prevents it from being
683 if (task_spec_ssb_force_disable(task))
687 * If the mitigation is forced on, then speculation is forced
688 * off and we again prevent it from being re-enabled.
690 if (spectre_v4_mitigations_on())
693 ssbd_prctl_disable_mitigation(task);
695 case PR_SPEC_FORCE_DISABLE:
696 /* Force disable speculation: force enable mitigation */
698 * If the mitigation is forced off, then speculation is forced
699 * on and we prevent it from being disabled.
701 if (spectre_v4_mitigations_off())
704 task_set_spec_ssb_force_disable(task);
706 case PR_SPEC_DISABLE:
707 /* Disable speculation: enable mitigation */
708 /* Same as PR_SPEC_FORCE_DISABLE */
709 if (spectre_v4_mitigations_off())
712 ssbd_prctl_enable_mitigation(task);
714 case PR_SPEC_DISABLE_NOEXEC:
715 /* Disable speculation until execve(): enable mitigation */
717 * If the mitigation state is forced one way or the other, then
718 * we must fail now before we try to toggle it on execve().
720 if (task_spec_ssb_force_disable(task) ||
721 spectre_v4_mitigations_off() ||
722 spectre_v4_mitigations_on()) {
726 ssbd_prctl_enable_mitigation(task);
727 task_set_spec_ssb_noexec(task);
733 spectre_v4_enable_task_mitigation(task);
737 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
741 case PR_SPEC_STORE_BYPASS:
742 return ssbd_prctl_set(task, ctrl);
748 static int ssbd_prctl_get(struct task_struct *task)
750 switch (spectre_v4_state) {
751 case SPECTRE_UNAFFECTED:
752 return PR_SPEC_NOT_AFFECTED;
753 case SPECTRE_MITIGATED:
754 if (spectre_v4_mitigations_on())
755 return PR_SPEC_NOT_AFFECTED;
757 if (spectre_v4_mitigations_dynamic())
760 /* Mitigations are disabled, so we're vulnerable. */
762 case SPECTRE_VULNERABLE:
765 return PR_SPEC_ENABLE;
768 /* Check the mitigation state for this task */
769 if (task_spec_ssb_force_disable(task))
770 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
772 if (task_spec_ssb_noexec(task))
773 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
775 if (task_spec_ssb_disable(task))
776 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
778 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
781 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
784 case PR_SPEC_STORE_BYPASS:
785 return ssbd_prctl_get(task);