1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
26 union fpregs_state init_fpstate __read_mostly;
29 * Track whether the kernel is using the FPU state
34 * - by IRQ context code to potentially use the FPU
37 * - to debug kernel_fpu_begin()/end() correctness
39 static DEFINE_PER_CPU(bool, in_kernel_fpu);
42 * Track which context is using the FPU on the CPU:
44 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
46 static bool kernel_fpu_disabled(void)
48 return this_cpu_read(in_kernel_fpu);
51 static bool interrupted_kernel_fpu_idle(void)
53 return !kernel_fpu_disabled();
57 * Were we in user mode (or vm86 mode) when we were
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
64 static bool interrupted_user_mode(void)
66 struct pt_regs *regs = get_irq_regs();
67 return regs && user_mode(regs);
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
77 bool irq_fpu_usable(void)
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
83 EXPORT_SYMBOL(irq_fpu_usable);
86 * Save the FPU register state in fpu->state. The register state is
89 * Must be called with fpregs_lock() held.
91 * The legacy FNSAVE instruction clears all FPU state unconditionally, so
92 * register state has to be reloaded. That might be a pointless exercise
93 * when the FPU is going to be used by another task right after that. But
94 * this only affects 20+ years old 32bit systems and avoids conditionals all
97 * FXSAVE and all XSAVE variants preserve the FPU register state.
99 void save_fpregs_to_fpstate(struct fpu *fpu)
101 if (likely(use_xsave())) {
102 os_xsave(&fpu->state.xsave);
105 * AVX512 state is tracked here because its use is
106 * known to slow the max clock speed of the core.
108 if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
109 fpu->avx512_timestamp = jiffies;
113 if (likely(use_fxsr())) {
114 fxsave(&fpu->state.fxsave);
119 * Legacy FPU register saving, FNSAVE always clears FPU registers,
120 * so we have to reload them from the memory state.
122 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
123 frstor(&fpu->state.fsave);
125 EXPORT_SYMBOL(save_fpregs_to_fpstate);
127 void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
130 * AMD K7/K8 and later CPUs up to Zen don't save/restore
131 * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
132 * here by setting it to fixed values. "m" is a random variable
133 * that should be in L1.
135 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
139 "fildl %P[addr]" /* set F?P to defined value */
140 : : [addr] "m" (fpstate));
144 os_xrstor(&fpstate->xsave, mask);
147 fxrstor(&fpstate->fxsave);
149 frstor(&fpstate->fsave);
152 EXPORT_SYMBOL_GPL(__restore_fpregs_from_fpstate);
154 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
158 WARN_ON_FPU(!irq_fpu_usable());
159 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
161 this_cpu_write(in_kernel_fpu, true);
163 if (!(current->flags & PF_KTHREAD) &&
164 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
165 set_thread_flag(TIF_NEED_FPU_LOAD);
166 save_fpregs_to_fpstate(¤t->thread.fpu);
168 __cpu_invalidate_fpregs_state();
170 /* Put sane initial values into the control registers. */
171 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
172 ldmxcsr(MXCSR_DEFAULT);
174 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
175 asm volatile ("fninit");
177 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
179 void kernel_fpu_end(void)
181 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
183 this_cpu_write(in_kernel_fpu, false);
186 EXPORT_SYMBOL_GPL(kernel_fpu_end);
189 * Sync the FPU register state to current's memory register state when the
190 * current task owns the FPU. The hardware register state is preserved.
192 void fpu_sync_fpstate(struct fpu *fpu)
194 WARN_ON_FPU(fpu != ¤t->thread.fpu);
197 trace_x86_fpu_before_save(fpu);
199 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
200 save_fpregs_to_fpstate(fpu);
202 trace_x86_fpu_after_save(fpu);
206 static inline void fpstate_init_xstate(struct xregs_state *xsave)
209 * XRSTORS requires these bits set in xcomp_bv, or it will
212 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
215 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
218 fx->mxcsr = MXCSR_DEFAULT;
222 * Legacy x87 fpstate state init:
224 static inline void fpstate_init_fstate(struct fregs_state *fp)
226 fp->cwd = 0xffff037fu;
227 fp->swd = 0xffff0000u;
228 fp->twd = 0xffffffffu;
229 fp->fos = 0xffff0000u;
232 void fpstate_init(union fpregs_state *state)
234 if (!static_cpu_has(X86_FEATURE_FPU)) {
235 fpstate_init_soft(&state->soft);
239 memset(state, 0, fpu_kernel_xstate_size);
241 if (static_cpu_has(X86_FEATURE_XSAVES))
242 fpstate_init_xstate(&state->xsave);
243 if (static_cpu_has(X86_FEATURE_FXSR))
244 fpstate_init_fxstate(&state->fxsave);
246 fpstate_init_fstate(&state->fsave);
248 EXPORT_SYMBOL_GPL(fpstate_init);
250 /* Clone current's FPU state on fork */
251 int fpu_clone(struct task_struct *dst)
253 struct fpu *src_fpu = ¤t->thread.fpu;
254 struct fpu *dst_fpu = &dst->thread.fpu;
256 /* The new task's FPU state cannot be valid in the hardware. */
257 dst_fpu->last_cpu = -1;
259 if (!cpu_feature_enabled(X86_FEATURE_FPU))
263 * Don't let 'init optimized' areas of the XSAVE area
264 * leak into the child task:
266 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
269 * If the FPU registers are not owned by current just memcpy() the
270 * state. Otherwise save the FPU registers directly into the
271 * child's FPU context, without any memory-to-memory copying.
274 if (test_thread_flag(TIF_NEED_FPU_LOAD))
275 memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
278 save_fpregs_to_fpstate(dst_fpu);
281 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
283 trace_x86_fpu_copy_src(src_fpu);
284 trace_x86_fpu_copy_dst(dst_fpu);
290 * Drops current FPU state: deactivates the fpregs and
291 * the fpstate. NOTE: it still leaves previous contents
292 * in the fpregs in the eager-FPU case.
294 * This function can be used in cases where we know that
295 * a state-restore is coming: either an explicit one,
298 void fpu__drop(struct fpu *fpu)
302 if (fpu == ¤t->thread.fpu) {
303 /* Ignore delayed exceptions from user space */
304 asm volatile("1: fwait\n"
306 _ASM_EXTABLE(1b, 2b));
307 fpregs_deactivate(fpu);
310 trace_x86_fpu_dropped(fpu);
316 * Clear FPU registers by setting them up from the init fpstate.
317 * Caller must do fpregs_[un]lock() around it.
319 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
322 os_xrstor(&init_fpstate.xsave, features_mask);
324 fxrstor(&init_fpstate.fxsave);
326 frstor(&init_fpstate.fsave);
328 pkru_write_default();
331 static inline unsigned int init_fpstate_copy_size(void)
334 return fpu_kernel_xstate_size;
336 /* XSAVE(S) just needs the legacy and the xstate header part */
337 return sizeof(init_fpstate.xsave);
340 /* Temporary workaround. Will be removed once PKRU and XSTATE are untangled. */
341 static inline void pkru_set_default_in_xstate(struct xregs_state *xsave)
343 struct pkru_state *pk;
345 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
348 * Force XFEATURE_PKRU to be set in the header otherwise
349 * get_xsave_addr() does not work and it also needs to be set to
350 * make XRSTOR(S) load it.
352 xsave->header.xfeatures |= XFEATURE_MASK_PKRU;
353 pk = get_xsave_addr(xsave, XFEATURE_PKRU);
354 pk->pkru = pkru_get_init_value();
358 * Reset current->fpu memory state to the init values.
360 static void fpu_reset_fpstate(void)
362 struct fpu *fpu = ¤t->thread.fpu;
367 * This does not change the actual hardware registers. It just
368 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
369 * subsequent return to usermode will reload the registers from the
370 * task's memory image.
372 * Do not use fpstate_init() here. Just copy init_fpstate which has
373 * the correct content already except for PKRU.
375 memcpy(&fpu->state, &init_fpstate, init_fpstate_copy_size());
376 pkru_set_default_in_xstate(&fpu->state.xsave);
377 set_thread_flag(TIF_NEED_FPU_LOAD);
382 * Reset current's user FPU states to the init states. current's
383 * supervisor states, if any, are not modified by this function. The
384 * caller guarantees that the XSTATE header in memory is intact.
386 void fpu__clear_user_states(struct fpu *fpu)
388 WARN_ON_FPU(fpu != ¤t->thread.fpu);
391 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
398 * Ensure that current's supervisor states are loaded into their
399 * corresponding registers.
401 if (xfeatures_mask_supervisor() &&
402 !fpregs_state_valid(fpu, smp_processor_id())) {
403 os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
406 /* Reset user states in registers. */
407 restore_fpregs_from_init_fpstate(xfeatures_mask_restore_user());
410 * Now all FPU registers have their desired values. Inform the FPU
411 * state machine that current's FPU registers are in the hardware
412 * registers. The memory image does not need to be updated because
413 * any operation relying on it has to save the registers first when
414 * current's FPU is marked active.
416 fpregs_mark_activate();
420 void fpu_flush_thread(void)
425 * Load FPU context before returning to userspace.
427 void switch_fpu_return(void)
429 if (!static_cpu_has(X86_FEATURE_FPU))
432 fpregs_restore_userregs();
434 EXPORT_SYMBOL_GPL(switch_fpu_return);
436 #ifdef CONFIG_X86_DEBUG_FPU
438 * If current FPU state according to its tracking (loaded FPU context on this
439 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
440 * loaded on return to userland.
442 void fpregs_assert_state_consistent(void)
444 struct fpu *fpu = ¤t->thread.fpu;
446 if (test_thread_flag(TIF_NEED_FPU_LOAD))
449 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
451 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
454 void fpregs_mark_activate(void)
456 struct fpu *fpu = ¤t->thread.fpu;
458 fpregs_activate(fpu);
459 fpu->last_cpu = smp_processor_id();
460 clear_thread_flag(TIF_NEED_FPU_LOAD);
462 EXPORT_SYMBOL_GPL(fpregs_mark_activate);
465 * x87 math exception handling:
468 int fpu__exception_code(struct fpu *fpu, int trap_nr)
472 if (trap_nr == X86_TRAP_MF) {
473 unsigned short cwd, swd;
475 * (~cwd & swd) will mask out exceptions that are not set to unmasked
476 * status. 0x3f is the exception bits in these regs, 0x200 is the
477 * C1 reg you need in case of a stack fault, 0x040 is the stack
478 * fault bit. We should only be taking one exception at a time,
479 * so if this combination doesn't produce any single exception,
480 * then we have a bad program that isn't synchronizing its FPU usage
481 * and it will suffer the consequences since we won't be able to
482 * fully reproduce the context of the exception.
484 if (boot_cpu_has(X86_FEATURE_FXSR)) {
485 cwd = fpu->state.fxsave.cwd;
486 swd = fpu->state.fxsave.swd;
488 cwd = (unsigned short)fpu->state.fsave.cwd;
489 swd = (unsigned short)fpu->state.fsave.swd;
495 * The SIMD FPU exceptions are handled a little differently, as there
496 * is only a single status/control register. Thus, to determine which
497 * unmasked exception was caught we must mask the exception mask bits
498 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
500 unsigned short mxcsr = MXCSR_DEFAULT;
502 if (boot_cpu_has(X86_FEATURE_XMM))
503 mxcsr = fpu->state.fxsave.mxcsr;
505 err = ~(mxcsr >> 7) & mxcsr;
508 if (err & 0x001) { /* Invalid op */
510 * swd & 0x240 == 0x040: Stack Underflow
511 * swd & 0x240 == 0x240: Stack Overflow
512 * User must clear the SF bit (0x40) if set
515 } else if (err & 0x004) { /* Divide by Zero */
517 } else if (err & 0x008) { /* Overflow */
519 } else if (err & 0x012) { /* Denormal, Underflow */
521 } else if (err & 0x020) { /* Precision */
526 * If we're using IRQ 13, or supposedly even some trap
527 * X86_TRAP_MF implementations, it's possible
528 * we get a spurious trap, which is not an error.