1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
26 union fpregs_state init_fpstate __read_mostly;
29 * Track whether the kernel is using the FPU state
34 * - by IRQ context code to potentially use the FPU
37 * - to debug kernel_fpu_begin()/end() correctness
39 static DEFINE_PER_CPU(bool, in_kernel_fpu);
42 * Track which context is using the FPU on the CPU:
44 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
46 static bool kernel_fpu_disabled(void)
48 return this_cpu_read(in_kernel_fpu);
51 static bool interrupted_kernel_fpu_idle(void)
53 return !kernel_fpu_disabled();
57 * Were we in user mode (or vm86 mode) when we were
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
64 static bool interrupted_user_mode(void)
66 struct pt_regs *regs = get_irq_regs();
67 return regs && user_mode(regs);
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
77 bool irq_fpu_usable(void)
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
83 EXPORT_SYMBOL(irq_fpu_usable);
86 * These must be called with preempt disabled. Returns
87 * 'true' if the FPU state is still intact and we can
88 * keep registers active.
90 * The legacy FNSAVE instruction cleared all FPU state
91 * unconditionally, so registers are essentially destroyed.
92 * Modern FPU state can be kept in registers, if there are
93 * no pending FP exceptions.
95 int copy_fpregs_to_fpstate(struct fpu *fpu)
97 if (likely(use_xsave())) {
98 os_xsave(&fpu->state.xsave);
101 * AVX512 state is tracked here because its use is
102 * known to slow the max clock speed of the core.
104 if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
105 fpu->avx512_timestamp = jiffies;
109 if (likely(use_fxsr())) {
110 copy_fxregs_to_kernel(fpu);
115 * Legacy FPU register saving, FNSAVE always clears FPU registers,
116 * so we have to mark them inactive:
118 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
122 EXPORT_SYMBOL(copy_fpregs_to_fpstate);
124 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
128 WARN_ON_FPU(!irq_fpu_usable());
129 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
131 this_cpu_write(in_kernel_fpu, true);
133 if (!(current->flags & PF_KTHREAD) &&
134 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
135 set_thread_flag(TIF_NEED_FPU_LOAD);
137 * Ignore return value -- we don't care if reg state
140 copy_fpregs_to_fpstate(¤t->thread.fpu);
142 __cpu_invalidate_fpregs_state();
144 /* Put sane initial values into the control registers. */
145 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
146 ldmxcsr(MXCSR_DEFAULT);
148 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
149 asm volatile ("fninit");
151 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
153 void kernel_fpu_end(void)
155 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
157 this_cpu_write(in_kernel_fpu, false);
160 EXPORT_SYMBOL_GPL(kernel_fpu_end);
163 * Save the FPU state (mark it for reload if necessary):
165 * This only ever gets called for the current task.
167 void fpu__save(struct fpu *fpu)
169 WARN_ON_FPU(fpu != ¤t->thread.fpu);
172 trace_x86_fpu_before_save(fpu);
174 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
175 if (!copy_fpregs_to_fpstate(fpu)) {
176 copy_kernel_to_fpregs(&fpu->state);
180 trace_x86_fpu_after_save(fpu);
184 static inline void fpstate_init_xstate(struct xregs_state *xsave)
187 * XRSTORS requires these bits set in xcomp_bv, or it will
190 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
193 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
196 fx->mxcsr = MXCSR_DEFAULT;
200 * Legacy x87 fpstate state init:
202 static inline void fpstate_init_fstate(struct fregs_state *fp)
204 fp->cwd = 0xffff037fu;
205 fp->swd = 0xffff0000u;
206 fp->twd = 0xffffffffu;
207 fp->fos = 0xffff0000u;
210 void fpstate_init(union fpregs_state *state)
212 if (!static_cpu_has(X86_FEATURE_FPU)) {
213 fpstate_init_soft(&state->soft);
217 memset(state, 0, fpu_kernel_xstate_size);
219 if (static_cpu_has(X86_FEATURE_XSAVES))
220 fpstate_init_xstate(&state->xsave);
221 if (static_cpu_has(X86_FEATURE_FXSR))
222 fpstate_init_fxstate(&state->fxsave);
224 fpstate_init_fstate(&state->fsave);
226 EXPORT_SYMBOL_GPL(fpstate_init);
228 int fpu__copy(struct task_struct *dst, struct task_struct *src)
230 struct fpu *dst_fpu = &dst->thread.fpu;
231 struct fpu *src_fpu = &src->thread.fpu;
233 dst_fpu->last_cpu = -1;
235 if (!static_cpu_has(X86_FEATURE_FPU))
238 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
241 * Don't let 'init optimized' areas of the XSAVE area
242 * leak into the child task:
244 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
247 * If the FPU registers are not current just memcpy() the state.
248 * Otherwise save current FPU registers directly into the child's FPU
249 * context, without any memory-to-memory copying.
251 * ( The function 'fails' in the FNSAVE case, which destroys
252 * register contents so we have to load them back. )
255 if (test_thread_flag(TIF_NEED_FPU_LOAD))
256 memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
258 else if (!copy_fpregs_to_fpstate(dst_fpu))
259 copy_kernel_to_fpregs(&dst_fpu->state);
263 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
265 trace_x86_fpu_copy_src(src_fpu);
266 trace_x86_fpu_copy_dst(dst_fpu);
272 * Activate the current task's in-memory FPU context,
273 * if it has not been used before:
275 static void fpu__initialize(struct fpu *fpu)
277 WARN_ON_FPU(fpu != ¤t->thread.fpu);
279 set_thread_flag(TIF_NEED_FPU_LOAD);
280 fpstate_init(&fpu->state);
281 trace_x86_fpu_init_state(fpu);
285 * Drops current FPU state: deactivates the fpregs and
286 * the fpstate. NOTE: it still leaves previous contents
287 * in the fpregs in the eager-FPU case.
289 * This function can be used in cases where we know that
290 * a state-restore is coming: either an explicit one,
293 void fpu__drop(struct fpu *fpu)
297 if (fpu == ¤t->thread.fpu) {
298 /* Ignore delayed exceptions from user space */
299 asm volatile("1: fwait\n"
301 _ASM_EXTABLE(1b, 2b));
302 fpregs_deactivate(fpu);
305 trace_x86_fpu_dropped(fpu);
311 * Clear FPU registers by setting them up from the init fpstate.
312 * Caller must do fpregs_[un]lock() around it.
314 static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
317 os_xrstor(&init_fpstate.xsave, features_mask);
318 else if (static_cpu_has(X86_FEATURE_FXSR))
319 copy_kernel_to_fxregs(&init_fpstate.fxsave);
321 copy_kernel_to_fregs(&init_fpstate.fsave);
323 if (boot_cpu_has(X86_FEATURE_OSPKE))
324 copy_init_pkru_to_fpregs();
328 * Clear the FPU state back to init state.
330 * Called by sys_execve(), by the signal handler code and by various
333 static void fpu__clear(struct fpu *fpu, bool user_only)
335 WARN_ON_FPU(fpu != ¤t->thread.fpu);
337 if (!static_cpu_has(X86_FEATURE_FPU)) {
339 fpu__initialize(fpu);
346 if (!fpregs_state_valid(fpu, smp_processor_id()) &&
347 xfeatures_mask_supervisor())
348 os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
349 copy_init_fpstate_to_fpregs(xfeatures_mask_user());
351 copy_init_fpstate_to_fpregs(xfeatures_mask_all);
354 fpregs_mark_activate();
358 void fpu__clear_user_states(struct fpu *fpu)
360 fpu__clear(fpu, true);
363 void fpu__clear_all(struct fpu *fpu)
365 fpu__clear(fpu, false);
369 * Load FPU context before returning to userspace.
371 void switch_fpu_return(void)
373 if (!static_cpu_has(X86_FEATURE_FPU))
376 __fpregs_load_activate();
378 EXPORT_SYMBOL_GPL(switch_fpu_return);
380 #ifdef CONFIG_X86_DEBUG_FPU
382 * If current FPU state according to its tracking (loaded FPU context on this
383 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
384 * loaded on return to userland.
386 void fpregs_assert_state_consistent(void)
388 struct fpu *fpu = ¤t->thread.fpu;
390 if (test_thread_flag(TIF_NEED_FPU_LOAD))
393 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
395 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
398 void fpregs_mark_activate(void)
400 struct fpu *fpu = ¤t->thread.fpu;
402 fpregs_activate(fpu);
403 fpu->last_cpu = smp_processor_id();
404 clear_thread_flag(TIF_NEED_FPU_LOAD);
406 EXPORT_SYMBOL_GPL(fpregs_mark_activate);
409 * x87 math exception handling:
412 int fpu__exception_code(struct fpu *fpu, int trap_nr)
416 if (trap_nr == X86_TRAP_MF) {
417 unsigned short cwd, swd;
419 * (~cwd & swd) will mask out exceptions that are not set to unmasked
420 * status. 0x3f is the exception bits in these regs, 0x200 is the
421 * C1 reg you need in case of a stack fault, 0x040 is the stack
422 * fault bit. We should only be taking one exception at a time,
423 * so if this combination doesn't produce any single exception,
424 * then we have a bad program that isn't synchronizing its FPU usage
425 * and it will suffer the consequences since we won't be able to
426 * fully reproduce the context of the exception.
428 if (boot_cpu_has(X86_FEATURE_FXSR)) {
429 cwd = fpu->state.fxsave.cwd;
430 swd = fpu->state.fxsave.swd;
432 cwd = (unsigned short)fpu->state.fsave.cwd;
433 swd = (unsigned short)fpu->state.fsave.swd;
439 * The SIMD FPU exceptions are handled a little differently, as there
440 * is only a single status/control register. Thus, to determine which
441 * unmasked exception was caught we must mask the exception mask bits
442 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
444 unsigned short mxcsr = MXCSR_DEFAULT;
446 if (boot_cpu_has(X86_FEATURE_XMM))
447 mxcsr = fpu->state.fxsave.mxcsr;
449 err = ~(mxcsr >> 7) & mxcsr;
452 if (err & 0x001) { /* Invalid op */
454 * swd & 0x240 == 0x040: Stack Underflow
455 * swd & 0x240 == 0x240: Stack Overflow
456 * User must clear the SF bit (0x40) if set
459 } else if (err & 0x004) { /* Divide by Zero */
461 } else if (err & 0x008) { /* Overflow */
463 } else if (err & 0x012) { /* Denormal, Underflow */
465 } else if (err & 0x020) { /* Precision */
470 * If we're using IRQ 13, or supposedly even some trap
471 * X86_TRAP_MF implementations, it's possible
472 * we get a spurious trap, which is not an error.