1 // SPDX-License-Identifier: GPL-2.0-only
3 * FP/SIMD context switching and fault handling
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/kernel.h>
19 #include <linux/linkage.h>
20 #include <linux/irqflags.h>
21 #include <linux/init.h>
22 #include <linux/percpu.h>
23 #include <linux/prctl.h>
24 #include <linux/preempt.h>
25 #include <linux/ptrace.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/signal.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/sysctl.h>
32 #include <linux/swab.h>
35 #include <asm/fpsimd.h>
36 #include <asm/cpufeature.h>
37 #include <asm/cputype.h>
38 #include <asm/processor.h>
40 #include <asm/sigcontext.h>
41 #include <asm/sysreg.h>
42 #include <asm/traps.h>
45 #define FPEXC_IOF (1 << 0)
46 #define FPEXC_DZF (1 << 1)
47 #define FPEXC_OFF (1 << 2)
48 #define FPEXC_UFF (1 << 3)
49 #define FPEXC_IXF (1 << 4)
50 #define FPEXC_IDF (1 << 7)
53 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
55 * In order to reduce the number of times the FPSIMD state is needlessly saved
56 * and restored, we need to keep track of two things:
57 * (a) for each task, we need to remember which CPU was the last one to have
58 * the task's FPSIMD state loaded into its FPSIMD registers;
59 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
60 * been loaded into its FPSIMD registers most recently, or whether it has
61 * been used to perform kernel mode NEON in the meantime.
63 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
64 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
65 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
66 * address of the userland FPSIMD state of the task that was loaded onto the CPU
67 * the most recently, or NULL if kernel mode NEON has been performed after that.
69 * With this in place, we no longer have to restore the next FPSIMD state right
70 * when switching between tasks. Instead, we can defer this check to userland
71 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
72 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
73 * can omit the FPSIMD restore.
75 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
76 * indicate whether or not the userland FPSIMD state of the current task is
77 * present in the registers. The flag is set unless the FPSIMD registers of this
78 * CPU currently contain the most recent userland FPSIMD state of the current
81 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
82 * save the task's FPSIMD context back to task_struct from softirq context.
83 * To prevent this from racing with the manipulation of the task's FPSIMD state
84 * from task context and thereby corrupting the state, it is necessary to
85 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
86 * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
87 * run but prevent them to use FPSIMD.
89 * For a certain task, the sequence may look something like this:
90 * - the task gets scheduled in; if both the task's fpsimd_cpu field
91 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
92 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
93 * cleared, otherwise it is set;
95 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
96 * userland FPSIMD state is copied from memory to the registers, the task's
97 * fpsimd_cpu field is set to the id of the current CPU, the current
98 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
99 * TIF_FOREIGN_FPSTATE flag is cleared;
101 * - the task executes an ordinary syscall; upon return to userland, the
102 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
105 * - the task executes a syscall which executes some NEON instructions; this is
106 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
107 * register contents to memory, clears the fpsimd_last_state per-cpu variable
108 * and sets the TIF_FOREIGN_FPSTATE flag;
110 * - the task gets preempted after kernel_neon_end() is called; as we have not
111 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
112 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
114 struct fpsimd_last_state_struct {
115 struct user_fpsimd_state *st;
120 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
122 /* Default VL for tasks that don't set it explicitly: */
123 static int __sve_default_vl = -1;
125 static int get_sve_default_vl(void)
127 return READ_ONCE(__sve_default_vl);
130 #ifdef CONFIG_ARM64_SVE
132 static void set_sve_default_vl(int val)
134 WRITE_ONCE(__sve_default_vl, val);
137 /* Maximum supported vector length across all CPUs (initially poisoned) */
138 int __ro_after_init sve_max_vl = SVE_VL_MIN;
139 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
142 * Set of available vector lengths,
143 * where length vq encoded as bit __vq_to_bit(vq):
145 __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
146 /* Set of vector lengths present on at least one cpu: */
147 static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
149 static void __percpu *efi_sve_state;
151 #else /* ! CONFIG_ARM64_SVE */
153 /* Dummy declaration for code that will be optimised out: */
154 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
155 extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
156 extern void __percpu *efi_sve_state;
158 #endif /* ! CONFIG_ARM64_SVE */
160 DEFINE_PER_CPU(bool, fpsimd_context_busy);
161 EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
163 static void __get_cpu_fpsimd_context(void)
165 bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
171 * Claim ownership of the CPU FPSIMD context for use by the calling context.
173 * The caller may freely manipulate the FPSIMD context metadata until
174 * put_cpu_fpsimd_context() is called.
176 * The double-underscore version must only be called if you know the task
177 * can't be preempted.
179 static void get_cpu_fpsimd_context(void)
182 __get_cpu_fpsimd_context();
185 static void __put_cpu_fpsimd_context(void)
187 bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
189 WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
193 * Release the CPU FPSIMD context.
195 * Must be called from a context in which get_cpu_fpsimd_context() was
196 * previously called, with no call to put_cpu_fpsimd_context() in the
199 static void put_cpu_fpsimd_context(void)
201 __put_cpu_fpsimd_context();
205 static bool have_cpu_fpsimd_context(void)
207 return !preemptible() && __this_cpu_read(fpsimd_context_busy);
211 * Call __sve_free() directly only if you know task can't be scheduled
214 static void __sve_free(struct task_struct *task)
216 kfree(task->thread.sve_state);
217 task->thread.sve_state = NULL;
220 static void sve_free(struct task_struct *task)
222 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
228 * TIF_SVE controls whether a task can use SVE without trapping while
229 * in userspace, and also the way a task's FPSIMD/SVE state is stored
232 * The kernel uses this flag to track whether a user task is actively
233 * using SVE, and therefore whether full SVE register state needs to
234 * be tracked. If not, the cheaper FPSIMD context handling code can
235 * be used instead of the more costly SVE equivalents.
239 * The task can execute SVE instructions while in userspace without
240 * trapping to the kernel.
242 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
243 * corresponding Zn), P0-P15 and FFR are encoded in in
244 * task->thread.sve_state, formatted appropriately for vector
245 * length task->thread.sve_vl.
247 * task->thread.sve_state must point to a valid buffer at least
248 * sve_state_size(task) bytes in size.
250 * During any syscall, the kernel may optionally clear TIF_SVE and
251 * discard the vector state except for the FPSIMD subset.
255 * An attempt by the user task to execute an SVE instruction causes
256 * do_sve_acc() to be called, which does some preparation and then
259 * When stored, FPSIMD registers V0-V31 are encoded in
260 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
261 * logically zero but not stored anywhere; P0-P15 and FFR are not
262 * stored and have unspecified values from userspace's point of
263 * view. For hygiene purposes, the kernel zeroes them on next use,
264 * but userspace is discouraged from relying on this.
266 * task->thread.sve_state does not need to be non-NULL, valid or any
267 * particular size: it must not be dereferenced.
269 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
270 * irrespective of whether TIF_SVE is clear or set, since these are
271 * not vector length dependent.
275 * Update current's FPSIMD/SVE registers from thread_struct.
277 * This function should be called only when the FPSIMD/SVE state in
278 * thread_struct is known to be up to date, when preparing to enter
281 static void task_fpsimd_load(void)
283 WARN_ON(!system_supports_fpsimd());
284 WARN_ON(!have_cpu_fpsimd_context());
286 if (system_supports_sve() && test_thread_flag(TIF_SVE))
287 sve_load_state(sve_pffr(¤t->thread),
288 ¤t->thread.uw.fpsimd_state.fpsr,
289 sve_vq_from_vl(current->thread.sve_vl) - 1);
291 fpsimd_load_state(¤t->thread.uw.fpsimd_state);
295 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
296 * date with respect to the CPU registers.
298 static void fpsimd_save(void)
300 struct fpsimd_last_state_struct const *last =
301 this_cpu_ptr(&fpsimd_last_state);
302 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
304 WARN_ON(!system_supports_fpsimd());
305 WARN_ON(!have_cpu_fpsimd_context());
307 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
308 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
309 if (WARN_ON(sve_get_vl() != last->sve_vl)) {
311 * Can't save the user regs, so current would
312 * re-enter user with corrupt state.
313 * There's no way to recover, so kill it:
315 force_signal_inject(SIGKILL, SI_KERNEL, 0);
319 sve_save_state((char *)last->sve_state +
320 sve_ffr_offset(last->sve_vl),
323 fpsimd_save_state(last->st);
328 * All vector length selection from userspace comes through here.
329 * We're on a slow path, so some sanity-checks are included.
330 * If things go wrong there's a bug somewhere, but try to fall back to a
333 static unsigned int find_supported_vector_length(unsigned int vl)
336 int max_vl = sve_max_vl;
338 if (WARN_ON(!sve_vl_valid(vl)))
341 if (WARN_ON(!sve_vl_valid(max_vl)))
347 bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
348 __vq_to_bit(sve_vq_from_vl(vl)));
349 return sve_vl_from_vq(__bit_to_vq(bit));
352 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
354 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
355 void *buffer, size_t *lenp, loff_t *ppos)
358 int vl = get_sve_default_vl();
359 struct ctl_table tmp_table = {
361 .maxlen = sizeof(vl),
364 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
368 /* Writing -1 has the special meaning "set to max": */
372 if (!sve_vl_valid(vl))
375 set_sve_default_vl(find_supported_vector_length(vl));
379 static struct ctl_table sve_default_vl_table[] = {
381 .procname = "sve_default_vector_length",
383 .proc_handler = sve_proc_do_default_vl,
388 static int __init sve_sysctl_init(void)
390 if (system_supports_sve())
391 if (!register_sysctl("abi", sve_default_vl_table))
397 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
398 static int __init sve_sysctl_init(void) { return 0; }
399 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
401 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
402 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
404 #ifdef CONFIG_CPU_BIG_ENDIAN
405 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
408 u64 b = swab64(x >> 64);
410 return ((__uint128_t)a << 64) | b;
413 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
419 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
421 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
427 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
428 p = (__uint128_t *)ZREG(sst, vq, i);
429 *p = arm64_cpu_to_le128(fst->vregs[i]);
434 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
435 * task->thread.sve_state.
437 * Task can be a non-runnable task, or current. In the latter case,
438 * the caller must have ownership of the cpu FPSIMD context before calling
440 * task->thread.sve_state must point to at least sve_state_size(task)
441 * bytes of allocated kernel memory.
442 * task->thread.uw.fpsimd_state must be up to date before calling this
445 static void fpsimd_to_sve(struct task_struct *task)
448 void *sst = task->thread.sve_state;
449 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
451 if (!system_supports_sve())
454 vq = sve_vq_from_vl(task->thread.sve_vl);
455 __fpsimd_to_sve(sst, fst, vq);
459 * Transfer the SVE state in task->thread.sve_state to
460 * task->thread.uw.fpsimd_state.
462 * Task can be a non-runnable task, or current. In the latter case,
463 * the caller must have ownership of the cpu FPSIMD context before calling
465 * task->thread.sve_state must point to at least sve_state_size(task)
466 * bytes of allocated kernel memory.
467 * task->thread.sve_state must be up to date before calling this function.
469 static void sve_to_fpsimd(struct task_struct *task)
472 void const *sst = task->thread.sve_state;
473 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
475 __uint128_t const *p;
477 if (!system_supports_sve())
480 vq = sve_vq_from_vl(task->thread.sve_vl);
481 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
482 p = (__uint128_t const *)ZREG(sst, vq, i);
483 fst->vregs[i] = arm64_le128_to_cpu(*p);
487 #ifdef CONFIG_ARM64_SVE
490 * Return how many bytes of memory are required to store the full SVE
491 * state for task, given task's currently configured vector length.
493 size_t sve_state_size(struct task_struct const *task)
495 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
499 * Ensure that task->thread.sve_state is allocated and sufficiently large.
501 * This function should be used only in preparation for replacing
502 * task->thread.sve_state with new data. The memory is always zeroed
503 * here to prevent stale data from showing through: this is done in
504 * the interest of testability and predictability: except in the
505 * do_sve_acc() case, there is no ABI requirement to hide stale data
506 * written previously be task.
508 void sve_alloc(struct task_struct *task)
510 if (task->thread.sve_state) {
511 memset(task->thread.sve_state, 0, sve_state_size(current));
515 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
516 task->thread.sve_state =
517 kzalloc(sve_state_size(task), GFP_KERNEL);
520 * If future SVE revisions can have larger vectors though,
521 * this may cease to be true:
523 BUG_ON(!task->thread.sve_state);
528 * Ensure that task->thread.sve_state is up to date with respect to
529 * the user task, irrespective of when SVE is in use or not.
531 * This should only be called by ptrace. task must be non-runnable.
532 * task->thread.sve_state must point to at least sve_state_size(task)
533 * bytes of allocated kernel memory.
535 void fpsimd_sync_to_sve(struct task_struct *task)
537 if (!test_tsk_thread_flag(task, TIF_SVE))
542 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
543 * the user task, irrespective of whether SVE is in use or not.
545 * This should only be called by ptrace. task must be non-runnable.
546 * task->thread.sve_state must point to at least sve_state_size(task)
547 * bytes of allocated kernel memory.
549 void sve_sync_to_fpsimd(struct task_struct *task)
551 if (test_tsk_thread_flag(task, TIF_SVE))
556 * Ensure that task->thread.sve_state is up to date with respect to
557 * the task->thread.uw.fpsimd_state.
559 * This should only be called by ptrace to merge new FPSIMD register
560 * values into a task for which SVE is currently active.
561 * task must be non-runnable.
562 * task->thread.sve_state must point to at least sve_state_size(task)
563 * bytes of allocated kernel memory.
564 * task->thread.uw.fpsimd_state must already have been initialised with
565 * the new FPSIMD register values to be merged in.
567 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
570 void *sst = task->thread.sve_state;
571 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
573 if (!test_tsk_thread_flag(task, TIF_SVE))
576 vq = sve_vq_from_vl(task->thread.sve_vl);
578 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
579 __fpsimd_to_sve(sst, fst, vq);
582 int sve_set_vector_length(struct task_struct *task,
583 unsigned long vl, unsigned long flags)
585 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
586 PR_SVE_SET_VL_ONEXEC))
589 if (!sve_vl_valid(vl))
593 * Clamp to the maximum vector length that VL-agnostic SVE code can
594 * work with. A flag may be assigned in the future to allow setting
595 * of larger vector lengths without confusing older software.
597 if (vl > SVE_VL_ARCH_MAX)
598 vl = SVE_VL_ARCH_MAX;
600 vl = find_supported_vector_length(vl);
602 if (flags & (PR_SVE_VL_INHERIT |
603 PR_SVE_SET_VL_ONEXEC))
604 task->thread.sve_vl_onexec = vl;
606 /* Reset VL to system default on next exec: */
607 task->thread.sve_vl_onexec = 0;
609 /* Only actually set the VL if not deferred: */
610 if (flags & PR_SVE_SET_VL_ONEXEC)
613 if (vl == task->thread.sve_vl)
617 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
618 * write any live register state back to task_struct, and convert to a
621 if (task == current) {
622 get_cpu_fpsimd_context();
627 fpsimd_flush_task_state(task);
628 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
632 put_cpu_fpsimd_context();
635 * Force reallocation of task SVE state to the correct size
640 task->thread.sve_vl = vl;
643 update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
644 flags & PR_SVE_VL_INHERIT);
650 * Encode the current vector length and flags for return.
651 * This is only required for prctl(): ptrace has separate fields
653 * flags are as for sve_set_vector_length().
655 static int sve_prctl_status(unsigned long flags)
659 if (flags & PR_SVE_SET_VL_ONEXEC)
660 ret = current->thread.sve_vl_onexec;
662 ret = current->thread.sve_vl;
664 if (test_thread_flag(TIF_SVE_VL_INHERIT))
665 ret |= PR_SVE_VL_INHERIT;
671 int sve_set_current_vl(unsigned long arg)
673 unsigned long vl, flags;
676 vl = arg & PR_SVE_VL_LEN_MASK;
679 if (!system_supports_sve())
682 ret = sve_set_vector_length(current, vl, flags);
686 return sve_prctl_status(flags);
690 int sve_get_current_vl(void)
692 if (!system_supports_sve())
695 return sve_prctl_status(0);
698 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
703 bitmap_zero(map, SVE_VQ_MAX);
705 zcr = ZCR_ELx_LEN_MASK;
706 zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
708 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
709 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
711 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
712 set_bit(__vq_to_bit(vq), map);
717 * Initialise the set of known supported VQs for the boot CPU.
718 * This is called during kernel boot, before secondary CPUs are brought up.
720 void __init sve_init_vq_map(void)
722 sve_probe_vqs(sve_vq_map);
723 bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
727 * If we haven't committed to the set of supported VQs yet, filter out
728 * those not supported by the current CPU.
729 * This function is called during the bring-up of early secondary CPUs only.
731 void sve_update_vq_map(void)
733 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
735 sve_probe_vqs(tmp_map);
736 bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
737 bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
741 * Check whether the current CPU supports all VQs in the committed set.
742 * This function is called during the bring-up of late secondary CPUs only.
744 int sve_verify_vq_map(void)
746 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
749 sve_probe_vqs(tmp_map);
751 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
752 if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
753 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
758 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
762 * For KVM, it is necessary to ensure that this CPU doesn't
763 * support any vector length that guests may have probed as
767 /* Recover the set of supported VQs: */
768 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
769 /* Find VQs supported that are not globally supported: */
770 bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
772 /* Find the lowest such VQ, if any: */
773 b = find_last_bit(tmp_map, SVE_VQ_MAX);
775 return 0; /* no mismatches */
778 * Mismatches above sve_max_virtualisable_vl are fine, since
779 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
781 if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
782 pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
790 static void __init sve_efi_setup(void)
792 if (!IS_ENABLED(CONFIG_EFI))
796 * alloc_percpu() warns and prints a backtrace if this goes wrong.
797 * This is evidence of a crippled system and we are returning void,
798 * so no attempt is made to handle this situation here.
800 if (!sve_vl_valid(sve_max_vl))
803 efi_sve_state = __alloc_percpu(
804 SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
811 panic("Cannot allocate percpu memory for EFI SVE save/restore");
815 * Enable SVE for EL1.
816 * Intended for use by the cpufeatures code during CPU boot.
818 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
820 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
825 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
828 * Use only if SVE is present.
829 * This function clobbers the SVE vector length.
831 u64 read_zcr_features(void)
837 * Set the maximum possible VL, and write zeroes to all other
838 * bits to see if they stick.
840 sve_kernel_enable(NULL);
841 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
843 zcr = read_sysreg_s(SYS_ZCR_EL1);
844 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
845 vq_max = sve_vq_from_vl(sve_get_vl());
846 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
851 void __init sve_setup(void)
854 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
857 if (!system_supports_sve())
861 * The SVE architecture mandates support for 128-bit vectors,
862 * so sve_vq_map must have at least SVE_VQ_MIN set.
863 * If something went wrong, at least try to patch it up:
865 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
866 set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
868 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
869 sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
872 * Sanity-check that the max VL we determined through CPU features
873 * corresponds properly to sve_vq_map. If not, do our best:
875 if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
876 sve_max_vl = find_supported_vector_length(sve_max_vl);
879 * For the default VL, pick the maximum supported value <= 64.
880 * VL == 64 is guaranteed not to grow the signal frame.
882 set_sve_default_vl(find_supported_vector_length(64));
884 bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
887 b = find_last_bit(tmp_map, SVE_VQ_MAX);
889 /* No non-virtualisable VLs found */
890 sve_max_virtualisable_vl = SVE_VQ_MAX;
891 else if (WARN_ON(b == SVE_VQ_MAX - 1))
892 /* No virtualisable VLs? This is architecturally forbidden. */
893 sve_max_virtualisable_vl = SVE_VQ_MIN;
894 else /* b + 1 < SVE_VQ_MAX */
895 sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
897 if (sve_max_virtualisable_vl > sve_max_vl)
898 sve_max_virtualisable_vl = sve_max_vl;
900 pr_info("SVE: maximum available vector length %u bytes per vector\n",
902 pr_info("SVE: default vector length %u bytes per vector\n",
903 get_sve_default_vl());
905 /* KVM decides whether to support mismatched systems. Just warn here: */
906 if (sve_max_virtualisable_vl < sve_max_vl)
907 pr_warn("SVE: unvirtualisable vector lengths present\n");
913 * Called from the put_task_struct() path, which cannot get here
914 * unless dead_task is really dead and not schedulable.
916 void fpsimd_release_task(struct task_struct *dead_task)
918 __sve_free(dead_task);
921 #endif /* CONFIG_ARM64_SVE */
926 * Storage is allocated for the full SVE state, the current FPSIMD
927 * register contents are migrated across, and TIF_SVE is set so that
928 * the SVE access trap will be disabled the next time this task
929 * reaches ret_to_user.
931 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
932 * would have disabled the SVE access trap for userspace during
933 * ret_to_user, making an SVE access trap impossible in that case.
935 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
937 /* Even if we chose not to use SVE, the hardware could still trap: */
938 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
939 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
945 get_cpu_fpsimd_context();
949 /* Force ret_to_user to reload the registers: */
950 fpsimd_flush_task_state(current);
952 fpsimd_to_sve(current);
953 if (test_and_set_thread_flag(TIF_SVE))
954 WARN_ON(1); /* SVE access shouldn't have trapped */
956 put_cpu_fpsimd_context();
960 * Trapped FP/ASIMD access.
962 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
964 /* TODO: implement lazy context saving/restoring */
969 * Raise a SIGFPE for the current process.
971 void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
973 unsigned int si_code = FPE_FLTUNK;
975 if (esr & ESR_ELx_FP_EXC_TFV) {
977 si_code = FPE_FLTINV;
978 else if (esr & FPEXC_DZF)
979 si_code = FPE_FLTDIV;
980 else if (esr & FPEXC_OFF)
981 si_code = FPE_FLTOVF;
982 else if (esr & FPEXC_UFF)
983 si_code = FPE_FLTUND;
984 else if (esr & FPEXC_IXF)
985 si_code = FPE_FLTRES;
988 send_sig_fault(SIGFPE, si_code,
989 (void __user *)instruction_pointer(regs),
993 void fpsimd_thread_switch(struct task_struct *next)
995 bool wrong_task, wrong_cpu;
997 if (!system_supports_fpsimd())
1000 __get_cpu_fpsimd_context();
1002 /* Save unsaved fpsimd state, if any: */
1006 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1007 * state. For kernel threads, FPSIMD registers are never loaded
1008 * and wrong_task and wrong_cpu will always be true.
1010 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1011 &next->thread.uw.fpsimd_state;
1012 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1014 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1015 wrong_task || wrong_cpu);
1017 __put_cpu_fpsimd_context();
1020 void fpsimd_flush_thread(void)
1022 int vl, supported_vl;
1024 if (!system_supports_fpsimd())
1027 get_cpu_fpsimd_context();
1029 fpsimd_flush_task_state(current);
1030 memset(¤t->thread.uw.fpsimd_state, 0,
1031 sizeof(current->thread.uw.fpsimd_state));
1033 if (system_supports_sve()) {
1034 clear_thread_flag(TIF_SVE);
1038 * Reset the task vector length as required.
1039 * This is where we ensure that all user tasks have a valid
1040 * vector length configured: no kernel task can become a user
1041 * task without an exec and hence a call to this function.
1042 * By the time the first call to this function is made, all
1043 * early hardware probing is complete, so __sve_default_vl
1045 * If a bug causes this to go wrong, we make some noise and
1046 * try to fudge thread.sve_vl to a safe value here.
1048 vl = current->thread.sve_vl_onexec ?
1049 current->thread.sve_vl_onexec : get_sve_default_vl();
1051 if (WARN_ON(!sve_vl_valid(vl)))
1054 supported_vl = find_supported_vector_length(vl);
1055 if (WARN_ON(supported_vl != vl))
1058 current->thread.sve_vl = vl;
1061 * If the task is not set to inherit, ensure that the vector
1062 * length will be reset by a subsequent exec:
1064 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
1065 current->thread.sve_vl_onexec = 0;
1068 put_cpu_fpsimd_context();
1072 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1073 * currently held in the registers does in fact belong to 'current'
1075 void fpsimd_preserve_current_state(void)
1077 if (!system_supports_fpsimd())
1080 get_cpu_fpsimd_context();
1082 put_cpu_fpsimd_context();
1086 * Like fpsimd_preserve_current_state(), but ensure that
1087 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1090 void fpsimd_signal_preserve_current_state(void)
1092 fpsimd_preserve_current_state();
1093 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1094 sve_to_fpsimd(current);
1098 * Associate current's FPSIMD context with this cpu
1099 * The caller must have ownership of the cpu FPSIMD context before calling
1102 void fpsimd_bind_task_to_cpu(void)
1104 struct fpsimd_last_state_struct *last =
1105 this_cpu_ptr(&fpsimd_last_state);
1107 WARN_ON(!system_supports_fpsimd());
1108 last->st = ¤t->thread.uw.fpsimd_state;
1109 last->sve_state = current->thread.sve_state;
1110 last->sve_vl = current->thread.sve_vl;
1111 current->thread.fpsimd_cpu = smp_processor_id();
1113 if (system_supports_sve()) {
1114 /* Toggle SVE trapping for userspace if needed */
1115 if (test_thread_flag(TIF_SVE))
1120 /* Serialised by exception return to user */
1124 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1125 unsigned int sve_vl)
1127 struct fpsimd_last_state_struct *last =
1128 this_cpu_ptr(&fpsimd_last_state);
1130 WARN_ON(!system_supports_fpsimd());
1131 WARN_ON(!in_softirq() && !irqs_disabled());
1134 last->sve_state = sve_state;
1135 last->sve_vl = sve_vl;
1139 * Load the userland FPSIMD state of 'current' from memory, but only if the
1140 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1141 * state of 'current'
1143 void fpsimd_restore_current_state(void)
1146 * For the tasks that were created before we detected the absence of
1147 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1148 * e.g, init. This could be then inherited by the children processes.
1149 * If we later detect that the system doesn't support FP/SIMD,
1150 * we must clear the flag for all the tasks to indicate that the
1151 * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1152 * do_notify_resume().
1154 if (!system_supports_fpsimd()) {
1155 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1159 get_cpu_fpsimd_context();
1161 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1163 fpsimd_bind_task_to_cpu();
1166 put_cpu_fpsimd_context();
1170 * Load an updated userland FPSIMD state for 'current' from memory and set the
1171 * flag that indicates that the FPSIMD register contents are the most recent
1172 * FPSIMD state of 'current'
1174 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1176 if (WARN_ON(!system_supports_fpsimd()))
1179 get_cpu_fpsimd_context();
1181 current->thread.uw.fpsimd_state = *state;
1182 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1183 fpsimd_to_sve(current);
1186 fpsimd_bind_task_to_cpu();
1188 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1190 put_cpu_fpsimd_context();
1194 * Invalidate live CPU copies of task t's FPSIMD state
1196 * This function may be called with preemption enabled. The barrier()
1197 * ensures that the assignment to fpsimd_cpu is visible to any
1198 * preemption/softirq that could race with set_tsk_thread_flag(), so
1199 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1201 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1204 void fpsimd_flush_task_state(struct task_struct *t)
1206 t->thread.fpsimd_cpu = NR_CPUS;
1208 * If we don't support fpsimd, bail out after we have
1209 * reset the fpsimd_cpu for this task and clear the
1212 if (!system_supports_fpsimd())
1215 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1221 * Invalidate any task's FPSIMD state that is present on this cpu.
1222 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1223 * before calling this function.
1225 static void fpsimd_flush_cpu_state(void)
1227 WARN_ON(!system_supports_fpsimd());
1228 __this_cpu_write(fpsimd_last_state.st, NULL);
1229 set_thread_flag(TIF_FOREIGN_FPSTATE);
1233 * Save the FPSIMD state to memory and invalidate cpu view.
1234 * This function must be called with preemption disabled.
1236 void fpsimd_save_and_flush_cpu_state(void)
1238 if (!system_supports_fpsimd())
1240 WARN_ON(preemptible());
1241 __get_cpu_fpsimd_context();
1243 fpsimd_flush_cpu_state();
1244 __put_cpu_fpsimd_context();
1247 #ifdef CONFIG_KERNEL_MODE_NEON
1250 * Kernel-side NEON support functions
1254 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1257 * Must not be called unless may_use_simd() returns true.
1258 * Task context in the FPSIMD registers is saved back to memory as necessary.
1260 * A matching call to kernel_neon_end() must be made before returning from the
1263 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1266 void kernel_neon_begin(void)
1268 if (WARN_ON(!system_supports_fpsimd()))
1271 BUG_ON(!may_use_simd());
1273 get_cpu_fpsimd_context();
1275 /* Save unsaved fpsimd state, if any: */
1278 /* Invalidate any task state remaining in the fpsimd regs: */
1279 fpsimd_flush_cpu_state();
1281 EXPORT_SYMBOL(kernel_neon_begin);
1284 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1286 * Must be called from a context in which kernel_neon_begin() was previously
1287 * called, with no call to kernel_neon_end() in the meantime.
1289 * The caller must not use the FPSIMD registers after this function is called,
1290 * unless kernel_neon_begin() is called again in the meantime.
1292 void kernel_neon_end(void)
1294 if (!system_supports_fpsimd())
1297 put_cpu_fpsimd_context();
1299 EXPORT_SYMBOL(kernel_neon_end);
1303 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1304 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1305 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1308 * EFI runtime services support functions
1310 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1311 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1312 * is always used rather than being an optional accelerator.
1314 * These functions provide the necessary support for ensuring FPSIMD
1315 * save/restore in the contexts from which EFI is used.
1317 * Do not use them for any other purpose -- if tempted to do so, you are
1318 * either doing something wrong or you need to propose some refactoring.
1322 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1324 void __efi_fpsimd_begin(void)
1326 if (!system_supports_fpsimd())
1329 WARN_ON(preemptible());
1331 if (may_use_simd()) {
1332 kernel_neon_begin();
1335 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1338 if (system_supports_sve() && likely(efi_sve_state)) {
1339 char *sve_state = this_cpu_ptr(efi_sve_state);
1341 __this_cpu_write(efi_sve_state_used, true);
1343 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1344 &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1346 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1349 __this_cpu_write(efi_fpsimd_state_used, true);
1354 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1356 void __efi_fpsimd_end(void)
1358 if (!system_supports_fpsimd())
1361 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1364 if (system_supports_sve() &&
1365 likely(__this_cpu_read(efi_sve_state_used))) {
1366 char const *sve_state = this_cpu_ptr(efi_sve_state);
1368 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1369 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1370 sve_vq_from_vl(sve_get_vl()) - 1);
1372 __this_cpu_write(efi_sve_state_used, false);
1374 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1379 #endif /* CONFIG_EFI */
1381 #endif /* CONFIG_KERNEL_MODE_NEON */
1383 #ifdef CONFIG_CPU_PM
1384 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1385 unsigned long cmd, void *v)
1389 fpsimd_save_and_flush_cpu_state();
1393 case CPU_PM_ENTER_FAILED:
1400 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1401 .notifier_call = fpsimd_cpu_pm_notifier,
1404 static void __init fpsimd_pm_init(void)
1406 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1410 static inline void fpsimd_pm_init(void) { }
1411 #endif /* CONFIG_CPU_PM */
1413 #ifdef CONFIG_HOTPLUG_CPU
1414 static int fpsimd_cpu_dead(unsigned int cpu)
1416 per_cpu(fpsimd_last_state.st, cpu) = NULL;
1420 static inline void fpsimd_hotplug_init(void)
1422 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1423 NULL, fpsimd_cpu_dead);
1427 static inline void fpsimd_hotplug_init(void) { }
1431 * FP/SIMD support code initialisation.
1433 static int __init fpsimd_init(void)
1435 if (cpu_have_named_feature(FP)) {
1437 fpsimd_hotplug_init();
1439 pr_notice("Floating-point is not implemented\n");
1442 if (!cpu_have_named_feature(ASIMD))
1443 pr_notice("Advanced SIMD is not implemented\n");
1445 return sve_sysctl_init();
1447 core_initcall(fpsimd_init);