1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
11 #ifndef _ASM_X86_FPU_INTERNAL_H
12 #define _ASM_X86_FPU_INTERNAL_H
14 #include <linux/compat.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
20 #include <asm/fpu/api.h>
21 #include <asm/fpu/xstate.h>
22 #include <asm/fpu/xcr.h>
23 #include <asm/cpufeature.h>
24 #include <asm/trace/fpu.h>
27 * High level FPU state handling functions:
29 extern void fpu__prepare_read(struct fpu *fpu);
30 extern void fpu__prepare_write(struct fpu *fpu);
31 extern void fpu__save(struct fpu *fpu);
32 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
33 extern void fpu__drop(struct fpu *fpu);
34 extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
35 extern void fpu__clear_user_states(struct fpu *fpu);
36 extern void fpu__clear_all(struct fpu *fpu);
37 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
40 * Boot time FPU initialization functions:
42 extern void fpu__init_cpu(void);
43 extern void fpu__init_system_xstate(void);
44 extern void fpu__init_cpu_xstate(void);
45 extern void fpu__init_system(struct cpuinfo_x86 *c);
46 extern void fpu__init_check_bugs(void);
47 extern void fpu__resume_cpu(void);
48 extern u64 fpu__get_supported_xfeatures_mask(void);
53 #ifdef CONFIG_X86_DEBUG_FPU
54 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
56 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
60 * FPU related CPU feature flag helper routines:
62 static __always_inline __pure bool use_xsaveopt(void)
64 return static_cpu_has(X86_FEATURE_XSAVEOPT);
67 static __always_inline __pure bool use_xsave(void)
69 return static_cpu_has(X86_FEATURE_XSAVE);
72 static __always_inline __pure bool use_fxsr(void)
74 return static_cpu_has(X86_FEATURE_FXSR);
78 * fpstate handling functions:
81 extern union fpregs_state init_fpstate;
83 extern void fpstate_init(union fpregs_state *state);
84 #ifdef CONFIG_MATH_EMULATION
85 extern void fpstate_init_soft(struct swregs_state *soft);
87 static inline void fpstate_init_soft(struct swregs_state *soft) {}
90 static inline void fpstate_init_xstate(struct xregs_state *xsave)
93 * XRSTORS requires these bits set in xcomp_bv, or it will
96 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
99 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
102 fx->mxcsr = MXCSR_DEFAULT;
104 extern void fpstate_sanitize_xstate(struct fpu *fpu);
106 #define user_insn(insn, output, input...) \
112 asm volatile(ASM_STAC "\n" \
114 "2: " ASM_CLAC "\n" \
115 ".section .fixup,\"ax\"\n" \
116 "3: movl $-1,%[err]\n" \
119 _ASM_EXTABLE(1b, 3b) \
120 : [err] "=r" (err), output \
125 #define kernel_insn_err(insn, output, input...) \
128 asm volatile("1:" #insn "\n\t" \
130 ".section .fixup,\"ax\"\n" \
131 "3: movl $-1,%[err]\n" \
134 _ASM_EXTABLE(1b, 3b) \
135 : [err] "=r" (err), output \
140 #define kernel_insn(insn, output, input...) \
141 asm volatile("1:" #insn "\n\t" \
143 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
146 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
148 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
151 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153 if (IS_ENABLED(CONFIG_X86_32))
154 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
156 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
160 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
162 if (IS_ENABLED(CONFIG_X86_32))
163 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
165 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
168 static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
170 if (IS_ENABLED(CONFIG_X86_32))
171 return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
173 return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
176 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
178 if (IS_ENABLED(CONFIG_X86_32))
179 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
181 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
184 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
186 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
189 static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
191 return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
194 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
196 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
199 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
201 if (IS_ENABLED(CONFIG_X86_32))
202 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
204 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
207 static inline void fxsave(struct fxregs_state *fx)
209 if (IS_ENABLED(CONFIG_X86_32))
210 asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
212 asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
215 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
216 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
217 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
218 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
219 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
220 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
222 #define XSTATE_OP(op, st, lmask, hmask, err) \
223 asm volatile("1:" op "\n\t" \
224 "xor %[err], %[err]\n" \
226 ".pushsection .fixup,\"ax\"\n\t" \
227 "3: movl $-2,%[err]\n\t" \
230 _ASM_EXTABLE(1b, 3b) \
232 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
236 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
237 * format and supervisor states in addition to modified optimization in
240 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
241 * supports modified optimization which is not supported by XSAVE.
243 * We use XSAVE as a fallback.
245 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
246 * original instruction which gets replaced. We need to use it here as the
247 * address of the instruction where we might get an exception at.
249 #define XSTATE_XSAVE(st, lmask, hmask, err) \
250 asm volatile(ALTERNATIVE_2(XSAVE, \
251 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
252 XSAVES, X86_FEATURE_XSAVES) \
254 "xor %[err], %[err]\n" \
256 ".pushsection .fixup,\"ax\"\n" \
257 "4: movl $-2, %[err]\n" \
260 _ASM_EXTABLE(661b, 4b) \
262 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
266 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
269 #define XSTATE_XRESTORE(st, lmask, hmask) \
270 asm volatile(ALTERNATIVE(XRSTOR, \
271 XRSTORS, X86_FEATURE_XSAVES) \
274 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
276 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
280 * This function is called only during boot time when x86 caps are not set
281 * up and alternative can not be used yet.
283 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
287 u32 hmask = mask >> 32;
290 WARN_ON(system_state != SYSTEM_BOOTING);
292 if (boot_cpu_has(X86_FEATURE_XSAVES))
293 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
295 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
298 * We should never fault when copying from a kernel buffer, and the FPU
299 * state we set at boot time should be valid.
305 * Save processor xstate to xsave area.
307 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
309 u64 mask = xfeatures_mask_all;
311 u32 hmask = mask >> 32;
314 WARN_ON_FPU(!alternatives_patched);
316 XSTATE_XSAVE(xstate, lmask, hmask, err);
318 /* We should never fault when copying to a kernel buffer: */
323 * Restore processor xstate from xsave area.
325 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
328 u32 hmask = mask >> 32;
330 XSTATE_XRESTORE(xstate, lmask, hmask);
334 * Save xstate to user space xsave area.
336 * We don't use modified optimization because xrstor/xrstors might track
337 * a different application.
339 * We don't use compacted format xsave area for
340 * backward compatibility for old applications which don't understand
341 * compacted format of xsave area.
343 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
345 u64 mask = xfeatures_mask_user();
347 u32 hmask = mask >> 32;
351 * Clear the xsave header first, so that reserved fields are
352 * initialized to zero.
354 err = __clear_user(&buf->header, sizeof(buf->header));
359 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
366 * Restore xstate from user space xsave area.
368 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
370 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
372 u32 hmask = mask >> 32;
376 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
383 * Restore xstate from kernel space xsave area, return an error code instead of
386 static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
389 u32 hmask = mask >> 32;
392 if (static_cpu_has(X86_FEATURE_XSAVES))
393 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
395 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
400 extern int copy_fpregs_to_fpstate(struct fpu *fpu);
402 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
405 copy_kernel_to_xregs(&fpstate->xsave, mask);
408 copy_kernel_to_fxregs(&fpstate->fxsave);
410 copy_kernel_to_fregs(&fpstate->fsave);
414 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
417 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
418 * pending. Clear the x87 state here by setting it to fixed values.
419 * "m" is a random variable that should be in L1.
421 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
425 "fildl %P[addr]" /* set F?P to defined value */
426 : : [addr] "m" (fpstate));
429 __copy_kernel_to_fpregs(fpstate, -1);
432 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
435 * FPU context switch related helper methods:
438 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
441 * The in-register FPU state for an FPU context on a CPU is assumed to be
442 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
445 * If the FPU register state is valid, the kernel can skip restoring the
446 * FPU state from memory.
448 * Any code that clobbers the FPU registers or updates the in-memory
449 * FPU state for a task MUST let the rest of the kernel know that the
450 * FPU registers are no longer valid for this task.
452 * Either one of these invalidation functions is enough. Invalidate
453 * a resource you control: CPU if using the CPU for something else
454 * (with preemption disabled), FPU for the current task, or a task that
455 * is prevented from running by the current task.
457 static inline void __cpu_invalidate_fpregs_state(void)
459 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
462 static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
467 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
469 return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
473 * These generally need preemption protection to work,
474 * do try to avoid using these on their own:
476 static inline void fpregs_deactivate(struct fpu *fpu)
478 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
479 trace_x86_fpu_regs_deactivated(fpu);
482 static inline void fpregs_activate(struct fpu *fpu)
484 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
485 trace_x86_fpu_regs_activated(fpu);
489 * Internal helper, do not use directly. Use switch_fpu_return() instead.
491 static inline void __fpregs_load_activate(void)
493 struct fpu *fpu = ¤t->thread.fpu;
494 int cpu = smp_processor_id();
496 if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
499 if (!fpregs_state_valid(fpu, cpu)) {
500 copy_kernel_to_fpregs(&fpu->state);
501 fpregs_activate(fpu);
504 clear_thread_flag(TIF_NEED_FPU_LOAD);
508 * FPU state switching for scheduling.
510 * This is a two-stage process:
512 * - switch_fpu_prepare() saves the old state.
513 * This is done within the context of the old process.
515 * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
516 * will get loaded on return to userspace, or when the kernel needs it.
518 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
519 * are saved in the current thread's FPU register state.
521 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
522 * hold current()'s FPU registers. It is required to load the
523 * registers before returning to userland or using the content
526 * The FPU context is only stored/restored for a user task and
527 * PF_KTHREAD is used to distinguish between kernel and user threads.
529 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
531 if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
532 if (!copy_fpregs_to_fpstate(old_fpu))
533 old_fpu->last_cpu = -1;
535 old_fpu->last_cpu = cpu;
537 /* But leave fpu_fpregs_owner_ctx! */
538 trace_x86_fpu_regs_deactivated(old_fpu);
543 * Misc helper functions:
547 * Load PKRU from the FPU context if available. Delay loading of the
548 * complete FPU state until the return to userland.
550 static inline void switch_fpu_finish(struct fpu *new_fpu)
552 u32 pkru_val = init_pkru_value;
553 struct pkru_state *pk;
555 if (!static_cpu_has(X86_FEATURE_FPU))
558 set_thread_flag(TIF_NEED_FPU_LOAD);
560 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
564 * PKRU state is switched eagerly because it needs to be valid before we
565 * return to userland e.g. for a copy_to_user() operation.
567 if (!(current->flags & PF_KTHREAD)) {
569 * If the PKRU bit in xsave.header.xfeatures is not set,
570 * then the PKRU component was in init state, which means
571 * XRSTOR will set PKRU to 0. If the bit is not set then
572 * get_xsave_addr() will return NULL because the PKRU value
573 * in memory is not valid. This means pkru_val has to be
574 * set to 0 and not to init_pkru_value.
576 pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
577 pkru_val = pk ? pk->pkru : 0;
579 __write_pkru(pkru_val);
582 #endif /* _ASM_X86_FPU_INTERNAL_H */