2 * FP/SIMD context switching and fault handling
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/bitmap.h>
21 #include <linux/bottom_half.h>
22 #include <linux/bug.h>
23 #include <linux/cache.h>
24 #include <linux/compat.h>
25 #include <linux/cpu.h>
26 #include <linux/cpu_pm.h>
27 #include <linux/kernel.h>
28 #include <linux/linkage.h>
29 #include <linux/irqflags.h>
30 #include <linux/init.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/prctl.h>
34 #include <linux/ptrace.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/signal.h>
38 #include <linux/slab.h>
40 #include <asm/fpsimd.h>
41 #include <asm/cputype.h>
43 #include <asm/sigcontext.h>
44 #include <asm/sysreg.h>
45 #include <asm/traps.h>
47 #define FPEXC_IOF (1 << 0)
48 #define FPEXC_DZF (1 << 1)
49 #define FPEXC_OFF (1 << 2)
50 #define FPEXC_UFF (1 << 3)
51 #define FPEXC_IXF (1 << 4)
52 #define FPEXC_IDF (1 << 7)
55 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
57 * In order to reduce the number of times the FPSIMD state is needlessly saved
58 * and restored, we need to keep track of two things:
59 * (a) for each task, we need to remember which CPU was the last one to have
60 * the task's FPSIMD state loaded into its FPSIMD registers;
61 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
62 * been loaded into its FPSIMD registers most recently, or whether it has
63 * been used to perform kernel mode NEON in the meantime.
65 * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
66 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
67 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
68 * address of the userland FPSIMD state of the task that was loaded onto the CPU
69 * the most recently, or NULL if kernel mode NEON has been performed after that.
71 * With this in place, we no longer have to restore the next FPSIMD state right
72 * when switching between tasks. Instead, we can defer this check to userland
73 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
74 * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
75 * can omit the FPSIMD restore.
77 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
78 * indicate whether or not the userland FPSIMD state of the current task is
79 * present in the registers. The flag is set unless the FPSIMD registers of this
80 * CPU currently contain the most recent userland FPSIMD state of the current
83 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
84 * save the task's FPSIMD context back to task_struct from softirq context.
85 * To prevent this from racing with the manipulation of the task's FPSIMD state
86 * from task context and thereby corrupting the state, it is necessary to
87 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
88 * flag with local_bh_disable() unless softirqs are already masked.
90 * For a certain task, the sequence may look something like this:
91 * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
92 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
93 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
94 * cleared, otherwise it is set;
96 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
97 * userland FPSIMD state is copied from memory to the registers, the task's
98 * fpsimd_state.cpu field is set to the id of the current CPU, the current
99 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
100 * TIF_FOREIGN_FPSTATE flag is cleared;
102 * - the task executes an ordinary syscall; upon return to userland, the
103 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
106 * - the task executes a syscall which executes some NEON instructions; this is
107 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
108 * register contents to memory, clears the fpsimd_last_state per-cpu variable
109 * and sets the TIF_FOREIGN_FPSTATE flag;
111 * - the task gets preempted after kernel_neon_end() is called; as we have not
112 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
113 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
115 static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
117 /* Default VL for tasks that don't set it explicitly: */
118 static int sve_default_vl = -1;
120 #ifdef CONFIG_ARM64_SVE
122 /* Maximum supported vector length across all CPUs (initially poisoned) */
123 int __ro_after_init sve_max_vl = -1;
124 /* Set of available vector lengths, as vq_to_bit(vq): */
125 static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
127 #else /* ! CONFIG_ARM64_SVE */
129 /* Dummy declaration for code that will be optimised out: */
130 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
132 #endif /* ! CONFIG_ARM64_SVE */
135 * Call __sve_free() directly only if you know task can't be scheduled
138 static void __sve_free(struct task_struct *task)
140 kfree(task->thread.sve_state);
141 task->thread.sve_state = NULL;
144 static void sve_free(struct task_struct *task)
146 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
152 /* Offset of FFR in the SVE register dump */
153 static size_t sve_ffr_offset(int vl)
155 return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
158 static void *sve_pffr(struct task_struct *task)
160 return (char *)task->thread.sve_state +
161 sve_ffr_offset(task->thread.sve_vl);
164 static void change_cpacr(u64 val, u64 mask)
166 u64 cpacr = read_sysreg(CPACR_EL1);
167 u64 new = (cpacr & ~mask) | val;
170 write_sysreg(new, CPACR_EL1);
173 static void sve_user_disable(void)
175 change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
178 static void sve_user_enable(void)
180 change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
184 * TIF_SVE controls whether a task can use SVE without trapping while
185 * in userspace, and also the way a task's FPSIMD/SVE state is stored
188 * The kernel uses this flag to track whether a user task is actively
189 * using SVE, and therefore whether full SVE register state needs to
190 * be tracked. If not, the cheaper FPSIMD context handling code can
191 * be used instead of the more costly SVE equivalents.
195 * The task can execute SVE instructions while in userspace without
196 * trapping to the kernel.
198 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
199 * corresponding Zn), P0-P15 and FFR are encoded in in
200 * task->thread.sve_state, formatted appropriately for vector
201 * length task->thread.sve_vl.
203 * task->thread.sve_state must point to a valid buffer at least
204 * sve_state_size(task) bytes in size.
206 * During any syscall, the kernel may optionally clear TIF_SVE and
207 * discard the vector state except for the FPSIMD subset.
211 * An attempt by the user task to execute an SVE instruction causes
212 * do_sve_acc() to be called, which does some preparation and then
215 * When stored, FPSIMD registers V0-V31 are encoded in
216 * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are
217 * logically zero but not stored anywhere; P0-P15 and FFR are not
218 * stored and have unspecified values from userspace's point of
219 * view. For hygiene purposes, the kernel zeroes them on next use,
220 * but userspace is discouraged from relying on this.
222 * task->thread.sve_state does not need to be non-NULL, valid or any
223 * particular size: it must not be dereferenced.
225 * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of
226 * whether TIF_SVE is clear or set, since these are not vector length
231 * Update current's FPSIMD/SVE registers from thread_struct.
233 * This function should be called only when the FPSIMD/SVE state in
234 * thread_struct is known to be up to date, when preparing to enter
237 * Softirqs (and preemption) must be disabled.
239 static void task_fpsimd_load(void)
241 WARN_ON(!in_softirq() && !irqs_disabled());
243 if (system_supports_sve() && test_thread_flag(TIF_SVE))
244 sve_load_state(sve_pffr(current),
245 ¤t->thread.fpsimd_state.fpsr,
246 sve_vq_from_vl(current->thread.sve_vl) - 1);
248 fpsimd_load_state(¤t->thread.fpsimd_state);
250 if (system_supports_sve()) {
251 /* Toggle SVE trapping for userspace if needed */
252 if (test_thread_flag(TIF_SVE))
257 /* Serialised by exception return to user */
262 * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
263 * with respect to the CPU registers.
265 * Softirqs (and preemption) must be disabled.
267 static void task_fpsimd_save(void)
269 WARN_ON(!in_softirq() && !irqs_disabled());
271 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
272 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
273 if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
275 * Can't save the user regs, so current would
276 * re-enter user with corrupt state.
277 * There's no way to recover, so kill it:
280 SIGKILL, 0, current_pt_regs(), 0);
284 sve_save_state(sve_pffr(current),
285 ¤t->thread.fpsimd_state.fpsr);
287 fpsimd_save_state(¤t->thread.fpsimd_state);
292 * Helpers to translate bit indices in sve_vq_map to VQ values (and
293 * vice versa). This allows find_next_bit() to be used to find the
294 * _maximum_ VQ not exceeding a certain value.
297 static unsigned int vq_to_bit(unsigned int vq)
299 return SVE_VQ_MAX - vq;
302 static unsigned int bit_to_vq(unsigned int bit)
304 if (WARN_ON(bit >= SVE_VQ_MAX))
305 bit = SVE_VQ_MAX - 1;
307 return SVE_VQ_MAX - bit;
311 * All vector length selection from userspace comes through here.
312 * We're on a slow path, so some sanity-checks are included.
313 * If things go wrong there's a bug somewhere, but try to fall back to a
316 static unsigned int find_supported_vector_length(unsigned int vl)
319 int max_vl = sve_max_vl;
321 if (WARN_ON(!sve_vl_valid(vl)))
324 if (WARN_ON(!sve_vl_valid(max_vl)))
330 bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
331 vq_to_bit(sve_vq_from_vl(vl)));
332 return sve_vl_from_vq(bit_to_vq(bit));
335 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
336 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
339 * Transfer the FPSIMD state in task->thread.fpsimd_state to
340 * task->thread.sve_state.
342 * Task can be a non-runnable task, or current. In the latter case,
343 * softirqs (and preemption) must be disabled.
344 * task->thread.sve_state must point to at least sve_state_size(task)
345 * bytes of allocated kernel memory.
346 * task->thread.fpsimd_state must be up to date before calling this function.
348 static void fpsimd_to_sve(struct task_struct *task)
351 void *sst = task->thread.sve_state;
352 struct fpsimd_state const *fst = &task->thread.fpsimd_state;
355 if (!system_supports_sve())
358 vq = sve_vq_from_vl(task->thread.sve_vl);
359 for (i = 0; i < 32; ++i)
360 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
361 sizeof(fst->vregs[i]));
365 * Transfer the SVE state in task->thread.sve_state to
366 * task->thread.fpsimd_state.
368 * Task can be a non-runnable task, or current. In the latter case,
369 * softirqs (and preemption) must be disabled.
370 * task->thread.sve_state must point to at least sve_state_size(task)
371 * bytes of allocated kernel memory.
372 * task->thread.sve_state must be up to date before calling this function.
374 static void sve_to_fpsimd(struct task_struct *task)
377 void const *sst = task->thread.sve_state;
378 struct fpsimd_state *fst = &task->thread.fpsimd_state;
381 if (!system_supports_sve())
384 vq = sve_vq_from_vl(task->thread.sve_vl);
385 for (i = 0; i < 32; ++i)
386 memcpy(&fst->vregs[i], ZREG(sst, vq, i),
387 sizeof(fst->vregs[i]));
390 #ifdef CONFIG_ARM64_SVE
393 * Return how many bytes of memory are required to store the full SVE
394 * state for task, given task's currently configured vector length.
396 size_t sve_state_size(struct task_struct const *task)
398 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
402 * Ensure that task->thread.sve_state is allocated and sufficiently large.
404 * This function should be used only in preparation for replacing
405 * task->thread.sve_state with new data. The memory is always zeroed
406 * here to prevent stale data from showing through: this is done in
407 * the interest of testability and predictability: except in the
408 * do_sve_acc() case, there is no ABI requirement to hide stale data
409 * written previously be task.
411 void sve_alloc(struct task_struct *task)
413 if (task->thread.sve_state) {
414 memset(task->thread.sve_state, 0, sve_state_size(current));
418 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
419 task->thread.sve_state =
420 kzalloc(sve_state_size(task), GFP_KERNEL);
423 * If future SVE revisions can have larger vectors though,
424 * this may cease to be true:
426 BUG_ON(!task->thread.sve_state);
429 int sve_set_vector_length(struct task_struct *task,
430 unsigned long vl, unsigned long flags)
432 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
433 PR_SVE_SET_VL_ONEXEC))
436 if (!sve_vl_valid(vl))
440 * Clamp to the maximum vector length that VL-agnostic SVE code can
441 * work with. A flag may be assigned in the future to allow setting
442 * of larger vector lengths without confusing older software.
444 if (vl > SVE_VL_ARCH_MAX)
445 vl = SVE_VL_ARCH_MAX;
447 vl = find_supported_vector_length(vl);
449 if (flags & (PR_SVE_VL_INHERIT |
450 PR_SVE_SET_VL_ONEXEC))
451 task->thread.sve_vl_onexec = vl;
453 /* Reset VL to system default on next exec: */
454 task->thread.sve_vl_onexec = 0;
456 /* Only actually set the VL if not deferred: */
457 if (flags & PR_SVE_SET_VL_ONEXEC)
460 if (vl == task->thread.sve_vl)
464 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
465 * write any live register state back to task_struct, and convert to a
468 if (task == current) {
472 set_thread_flag(TIF_FOREIGN_FPSTATE);
475 fpsimd_flush_task_state(task);
476 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
483 * Force reallocation of task SVE state to the correct size
488 task->thread.sve_vl = vl;
491 if (flags & PR_SVE_VL_INHERIT)
492 set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
494 clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
500 * Bitmap for temporary storage of the per-CPU set of supported vector lengths
501 * during secondary boot.
503 static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
505 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
510 bitmap_zero(map, SVE_VQ_MAX);
512 zcr = ZCR_ELx_LEN_MASK;
513 zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
515 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
516 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
518 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
519 set_bit(vq_to_bit(vq), map);
523 void __init sve_init_vq_map(void)
525 sve_probe_vqs(sve_vq_map);
529 * If we haven't committed to the set of supported VQs yet, filter out
530 * those not supported by the current CPU.
532 void sve_update_vq_map(void)
534 sve_probe_vqs(sve_secondary_vq_map);
535 bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
538 /* Check whether the current CPU supports all VQs in the committed set */
539 int sve_verify_vq_map(void)
543 sve_probe_vqs(sve_secondary_vq_map);
544 bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
546 if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
547 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
556 * Enable SVE for EL1.
557 * Intended for use by the cpufeatures code during CPU boot.
559 int sve_kernel_enable(void *__always_unused p)
561 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
567 void __init sve_setup(void)
571 if (!system_supports_sve())
575 * The SVE architecture mandates support for 128-bit vectors,
576 * so sve_vq_map must have at least SVE_VQ_MIN set.
577 * If something went wrong, at least try to patch it up:
579 if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
580 set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
582 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
583 sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
586 * Sanity-check that the max VL we determined through CPU features
587 * corresponds properly to sve_vq_map. If not, do our best:
589 if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
590 sve_max_vl = find_supported_vector_length(sve_max_vl);
593 * For the default VL, pick the maximum supported value <= 64.
594 * VL == 64 is guaranteed not to grow the signal frame.
596 sve_default_vl = find_supported_vector_length(64);
598 pr_info("SVE: maximum available vector length %u bytes per vector\n",
600 pr_info("SVE: default vector length %u bytes per vector\n",
605 * Called from the put_task_struct() path, which cannot get here
606 * unless dead_task is really dead and not schedulable.
608 void fpsimd_release_task(struct task_struct *dead_task)
610 __sve_free(dead_task);
613 #endif /* CONFIG_ARM64_SVE */
618 * Storage is allocated for the full SVE state, the current FPSIMD
619 * register contents are migrated across, and TIF_SVE is set so that
620 * the SVE access trap will be disabled the next time this task
621 * reaches ret_to_user.
623 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
624 * would have disabled the SVE access trap for userspace during
625 * ret_to_user, making an SVE access trap impossible in that case.
627 asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
629 /* Even if we chose not to use SVE, the hardware could still trap: */
630 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
631 force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
640 fpsimd_to_sve(current);
642 /* Force ret_to_user to reload the registers: */
643 fpsimd_flush_task_state(current);
644 set_thread_flag(TIF_FOREIGN_FPSTATE);
646 if (test_and_set_thread_flag(TIF_SVE))
647 WARN_ON(1); /* SVE access shouldn't have trapped */
653 * Trapped FP/ASIMD access.
655 asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
657 /* TODO: implement lazy context saving/restoring */
662 * Raise a SIGFPE for the current process.
664 asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
667 unsigned int si_code = 0;
670 si_code = FPE_FLTINV;
671 else if (esr & FPEXC_DZF)
672 si_code = FPE_FLTDIV;
673 else if (esr & FPEXC_OFF)
674 si_code = FPE_FLTOVF;
675 else if (esr & FPEXC_UFF)
676 si_code = FPE_FLTUND;
677 else if (esr & FPEXC_IXF)
678 si_code = FPE_FLTRES;
680 memset(&info, 0, sizeof(info));
681 info.si_signo = SIGFPE;
682 info.si_code = si_code;
683 info.si_addr = (void __user *)instruction_pointer(regs);
685 send_sig_info(SIGFPE, &info, current);
688 void fpsimd_thread_switch(struct task_struct *next)
690 if (!system_supports_fpsimd())
693 * Save the current FPSIMD state to memory, but only if whatever is in
694 * the registers is in fact the most recent userland FPSIMD state of
702 * If we are switching to a task whose most recent userland
703 * FPSIMD state is already in the registers of *this* cpu,
704 * we can skip loading the state from memory. Otherwise, set
705 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
706 * upon the next return to userland.
708 struct fpsimd_state *st = &next->thread.fpsimd_state;
710 if (__this_cpu_read(fpsimd_last_state) == st
711 && st->cpu == smp_processor_id())
712 clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
714 set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
718 void fpsimd_flush_thread(void)
720 int vl, supported_vl;
722 if (!system_supports_fpsimd())
727 memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
728 fpsimd_flush_task_state(current);
730 if (system_supports_sve()) {
731 clear_thread_flag(TIF_SVE);
735 * Reset the task vector length as required.
736 * This is where we ensure that all user tasks have a valid
737 * vector length configured: no kernel task can become a user
738 * task without an exec and hence a call to this function.
739 * By the time the first call to this function is made, all
740 * early hardware probing is complete, so sve_default_vl
742 * If a bug causes this to go wrong, we make some noise and
743 * try to fudge thread.sve_vl to a safe value here.
745 vl = current->thread.sve_vl_onexec ?
746 current->thread.sve_vl_onexec : sve_default_vl;
748 if (WARN_ON(!sve_vl_valid(vl)))
751 supported_vl = find_supported_vector_length(vl);
752 if (WARN_ON(supported_vl != vl))
755 current->thread.sve_vl = vl;
758 * If the task is not set to inherit, ensure that the vector
759 * length will be reset by a subsequent exec:
761 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
762 current->thread.sve_vl_onexec = 0;
765 set_thread_flag(TIF_FOREIGN_FPSTATE);
771 * Save the userland FPSIMD state of 'current' to memory, but only if the state
772 * currently held in the registers does in fact belong to 'current'
774 void fpsimd_preserve_current_state(void)
776 if (!system_supports_fpsimd())
785 * Like fpsimd_preserve_current_state(), but ensure that
786 * current->thread.fpsimd_state is updated so that it can be copied to
789 void fpsimd_signal_preserve_current_state(void)
791 fpsimd_preserve_current_state();
792 if (system_supports_sve() && test_thread_flag(TIF_SVE))
793 sve_to_fpsimd(current);
797 * Load the userland FPSIMD state of 'current' from memory, but only if the
798 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
801 void fpsimd_restore_current_state(void)
803 if (!system_supports_fpsimd())
808 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
809 struct fpsimd_state *st = ¤t->thread.fpsimd_state;
812 __this_cpu_write(fpsimd_last_state, st);
813 st->cpu = smp_processor_id();
820 * Load an updated userland FPSIMD state for 'current' from memory and set the
821 * flag that indicates that the FPSIMD register contents are the most recent
822 * FPSIMD state of 'current'
824 void fpsimd_update_current_state(struct fpsimd_state *state)
826 if (!system_supports_fpsimd())
831 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
832 current->thread.fpsimd_state = *state;
833 fpsimd_to_sve(current);
837 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
838 struct fpsimd_state *st = ¤t->thread.fpsimd_state;
840 __this_cpu_write(fpsimd_last_state, st);
841 st->cpu = smp_processor_id();
848 * Invalidate live CPU copies of task t's FPSIMD state
850 void fpsimd_flush_task_state(struct task_struct *t)
852 t->thread.fpsimd_state.cpu = NR_CPUS;
855 #ifdef CONFIG_KERNEL_MODE_NEON
857 DEFINE_PER_CPU(bool, kernel_neon_busy);
858 EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
861 * Kernel-side NEON support functions
865 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
868 * Must not be called unless may_use_simd() returns true.
869 * Task context in the FPSIMD registers is saved back to memory as necessary.
871 * A matching call to kernel_neon_end() must be made before returning from the
874 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
877 void kernel_neon_begin(void)
879 if (WARN_ON(!system_supports_fpsimd()))
882 BUG_ON(!may_use_simd());
886 __this_cpu_write(kernel_neon_busy, true);
888 /* Save unsaved task fpsimd state, if any: */
889 if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
890 fpsimd_save_state(¤t->thread.fpsimd_state);
892 /* Invalidate any task state remaining in the fpsimd regs: */
893 __this_cpu_write(fpsimd_last_state, NULL);
899 EXPORT_SYMBOL(kernel_neon_begin);
902 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
904 * Must be called from a context in which kernel_neon_begin() was previously
905 * called, with no call to kernel_neon_end() in the meantime.
907 * The caller must not use the FPSIMD registers after this function is called,
908 * unless kernel_neon_begin() is called again in the meantime.
910 void kernel_neon_end(void)
914 if (!system_supports_fpsimd())
917 busy = __this_cpu_xchg(kernel_neon_busy, false);
918 WARN_ON(!busy); /* No matching kernel_neon_begin()? */
922 EXPORT_SYMBOL(kernel_neon_end);
926 static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
927 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
930 * EFI runtime services support functions
932 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
933 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
934 * is always used rather than being an optional accelerator.
936 * These functions provide the necessary support for ensuring FPSIMD
937 * save/restore in the contexts from which EFI is used.
939 * Do not use them for any other purpose -- if tempted to do so, you are
940 * either doing something wrong or you need to propose some refactoring.
944 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
946 void __efi_fpsimd_begin(void)
948 if (!system_supports_fpsimd())
951 WARN_ON(preemptible());
956 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
957 __this_cpu_write(efi_fpsimd_state_used, true);
962 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
964 void __efi_fpsimd_end(void)
966 if (!system_supports_fpsimd())
969 if (__this_cpu_xchg(efi_fpsimd_state_used, false))
970 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
975 #endif /* CONFIG_EFI */
977 #endif /* CONFIG_KERNEL_MODE_NEON */
980 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
981 unsigned long cmd, void *v)
987 this_cpu_write(fpsimd_last_state, NULL);
991 set_thread_flag(TIF_FOREIGN_FPSTATE);
993 case CPU_PM_ENTER_FAILED:
1000 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1001 .notifier_call = fpsimd_cpu_pm_notifier,
1004 static void __init fpsimd_pm_init(void)
1006 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1010 static inline void fpsimd_pm_init(void) { }
1011 #endif /* CONFIG_CPU_PM */
1013 #ifdef CONFIG_HOTPLUG_CPU
1014 static int fpsimd_cpu_dead(unsigned int cpu)
1016 per_cpu(fpsimd_last_state, cpu) = NULL;
1020 static inline void fpsimd_hotplug_init(void)
1022 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1023 NULL, fpsimd_cpu_dead);
1027 static inline void fpsimd_hotplug_init(void) { }
1031 * FP/SIMD support code initialisation.
1033 static int __init fpsimd_init(void)
1035 if (elf_hwcap & HWCAP_FP) {
1037 fpsimd_hotplug_init();
1039 pr_notice("Floating-point is not implemented\n");
1042 if (!(elf_hwcap & HWCAP_ASIMD))
1043 pr_notice("Advanced SIMD is not implemented\n");
1047 late_initcall(fpsimd_init);