854bfb03645e60c0df0920fad42d4658f9d8186a
[linux-2.6-microblaze.git] / arch / x86 / include / asm / fpu / internal.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1994 Linus Torvalds
4  *
5  * Pentium III FXSR, SSE support
6  * General FPU state handling cleanups
7  *      Gareth Hughes <gareth@valinux.com>, May 2000
8  * x86-64 work by Andi Kleen 2002
9  */
10
11 #ifndef _ASM_X86_FPU_INTERNAL_H
12 #define _ASM_X86_FPU_INTERNAL_H
13
14 #include <linux/compat.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18
19 #include <asm/user.h>
20 #include <asm/fpu/api.h>
21 #include <asm/fpu/xstate.h>
22 #include <asm/fpu/xcr.h>
23 #include <asm/cpufeature.h>
24 #include <asm/trace/fpu.h>
25
26 /*
27  * High level FPU state handling functions:
28  */
29 extern void fpu__save(struct fpu *fpu);
30 extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
31 extern void fpu__drop(struct fpu *fpu);
32 extern int  fpu__copy(struct task_struct *dst, struct task_struct *src);
33 extern void fpu__clear_user_states(struct fpu *fpu);
34 extern void fpu__clear_all(struct fpu *fpu);
35 extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
36
37 /*
38  * Boot time FPU initialization functions:
39  */
40 extern void fpu__init_cpu(void);
41 extern void fpu__init_system_xstate(void);
42 extern void fpu__init_cpu_xstate(void);
43 extern void fpu__init_system(struct cpuinfo_x86 *c);
44 extern void fpu__init_check_bugs(void);
45 extern void fpu__resume_cpu(void);
46
47 /*
48  * Debugging facility:
49  */
50 #ifdef CONFIG_X86_DEBUG_FPU
51 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
52 #else
53 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
54 #endif
55
56 /*
57  * FPU related CPU feature flag helper routines:
58  */
59 static __always_inline __pure bool use_xsaveopt(void)
60 {
61         return static_cpu_has(X86_FEATURE_XSAVEOPT);
62 }
63
64 static __always_inline __pure bool use_xsave(void)
65 {
66         return static_cpu_has(X86_FEATURE_XSAVE);
67 }
68
69 static __always_inline __pure bool use_fxsr(void)
70 {
71         return static_cpu_has(X86_FEATURE_FXSR);
72 }
73
74 /*
75  * fpstate handling functions:
76  */
77
78 extern union fpregs_state init_fpstate;
79
80 extern void fpstate_init(union fpregs_state *state);
81 #ifdef CONFIG_MATH_EMULATION
82 extern void fpstate_init_soft(struct swregs_state *soft);
83 #else
84 static inline void fpstate_init_soft(struct swregs_state *soft) {}
85 #endif
86
87 #define user_insn(insn, output, input...)                               \
88 ({                                                                      \
89         int err;                                                        \
90                                                                         \
91         might_fault();                                                  \
92                                                                         \
93         asm volatile(ASM_STAC "\n"                                      \
94                      "1:" #insn "\n\t"                                  \
95                      "2: " ASM_CLAC "\n"                                \
96                      ".section .fixup,\"ax\"\n"                         \
97                      "3:  movl $-1,%[err]\n"                            \
98                      "    jmp  2b\n"                                    \
99                      ".previous\n"                                      \
100                      _ASM_EXTABLE(1b, 3b)                               \
101                      : [err] "=r" (err), output                         \
102                      : "0"(0), input);                                  \
103         err;                                                            \
104 })
105
106 #define kernel_insn_err(insn, output, input...)                         \
107 ({                                                                      \
108         int err;                                                        \
109         asm volatile("1:" #insn "\n\t"                                  \
110                      "2:\n"                                             \
111                      ".section .fixup,\"ax\"\n"                         \
112                      "3:  movl $-1,%[err]\n"                            \
113                      "    jmp  2b\n"                                    \
114                      ".previous\n"                                      \
115                      _ASM_EXTABLE(1b, 3b)                               \
116                      : [err] "=r" (err), output                         \
117                      : "0"(0), input);                                  \
118         err;                                                            \
119 })
120
121 #define kernel_insn(insn, output, input...)                             \
122         asm volatile("1:" #insn "\n\t"                                  \
123                      "2:\n"                                             \
124                      _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)  \
125                      : output : input)
126
127 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
128 {
129         return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
130 }
131
132 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
133 {
134         if (IS_ENABLED(CONFIG_X86_32))
135                 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
136         else
137                 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
138
139 }
140
141 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
142 {
143         if (IS_ENABLED(CONFIG_X86_32))
144                 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
145         else
146                 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
147 }
148
149 static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
150 {
151         if (IS_ENABLED(CONFIG_X86_32))
152                 return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
153         else
154                 return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
155 }
156
157 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
158 {
159         if (IS_ENABLED(CONFIG_X86_32))
160                 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
161         else
162                 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
163 }
164
165 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
166 {
167         kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
168 }
169
170 static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
171 {
172         return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
173 }
174
175 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
176 {
177         return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
178 }
179
180 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
181 {
182         if (IS_ENABLED(CONFIG_X86_32))
183                 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
184         else
185                 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
186 }
187
188 static inline void fxsave(struct fxregs_state *fx)
189 {
190         if (IS_ENABLED(CONFIG_X86_32))
191                 asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
192         else
193                 asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
194 }
195
196 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
197 #define XSAVE           ".byte " REX_PREFIX "0x0f,0xae,0x27"
198 #define XSAVEOPT        ".byte " REX_PREFIX "0x0f,0xae,0x37"
199 #define XSAVES          ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
200 #define XRSTOR          ".byte " REX_PREFIX "0x0f,0xae,0x2f"
201 #define XRSTORS         ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
202
203 #define XSTATE_OP(op, st, lmask, hmask, err)                            \
204         asm volatile("1:" op "\n\t"                                     \
205                      "xor %[err], %[err]\n"                             \
206                      "2:\n\t"                                           \
207                      ".pushsection .fixup,\"ax\"\n\t"                   \
208                      "3: movl $-2,%[err]\n\t"                           \
209                      "jmp 2b\n\t"                                       \
210                      ".popsection\n\t"                                  \
211                      _ASM_EXTABLE(1b, 3b)                               \
212                      : [err] "=r" (err)                                 \
213                      : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
214                      : "memory")
215
216 /*
217  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
218  * format and supervisor states in addition to modified optimization in
219  * XSAVEOPT.
220  *
221  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
222  * supports modified optimization which is not supported by XSAVE.
223  *
224  * We use XSAVE as a fallback.
225  *
226  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
227  * original instruction which gets replaced. We need to use it here as the
228  * address of the instruction where we might get an exception at.
229  */
230 #define XSTATE_XSAVE(st, lmask, hmask, err)                             \
231         asm volatile(ALTERNATIVE_2(XSAVE,                               \
232                                    XSAVEOPT, X86_FEATURE_XSAVEOPT,      \
233                                    XSAVES,   X86_FEATURE_XSAVES)        \
234                      "\n"                                               \
235                      "xor %[err], %[err]\n"                             \
236                      "3:\n"                                             \
237                      ".pushsection .fixup,\"ax\"\n"                     \
238                      "4: movl $-2, %[err]\n"                            \
239                      "jmp 3b\n"                                         \
240                      ".popsection\n"                                    \
241                      _ASM_EXTABLE(661b, 4b)                             \
242                      : [err] "=r" (err)                                 \
243                      : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
244                      : "memory")
245
246 /*
247  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
248  * XSAVE area format.
249  */
250 #define XSTATE_XRESTORE(st, lmask, hmask)                               \
251         asm volatile(ALTERNATIVE(XRSTOR,                                \
252                                  XRSTORS, X86_FEATURE_XSAVES)           \
253                      "\n"                                               \
254                      "3:\n"                                             \
255                      _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
256                      :                                                  \
257                      : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
258                      : "memory")
259
260 /*
261  * This function is called only during boot time when x86 caps are not set
262  * up and alternative can not be used yet.
263  */
264 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
265 {
266         u64 mask = -1;
267         u32 lmask = mask;
268         u32 hmask = mask >> 32;
269         int err;
270
271         WARN_ON(system_state != SYSTEM_BOOTING);
272
273         if (boot_cpu_has(X86_FEATURE_XSAVES))
274                 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
275         else
276                 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
277
278         /*
279          * We should never fault when copying from a kernel buffer, and the FPU
280          * state we set at boot time should be valid.
281          */
282         WARN_ON_FPU(err);
283 }
284
285 /*
286  * Save processor xstate to xsave area.
287  */
288 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
289 {
290         u64 mask = xfeatures_mask_all;
291         u32 lmask = mask;
292         u32 hmask = mask >> 32;
293         int err;
294
295         WARN_ON_FPU(!alternatives_patched);
296
297         XSTATE_XSAVE(xstate, lmask, hmask, err);
298
299         /* We should never fault when copying to a kernel buffer: */
300         WARN_ON_FPU(err);
301 }
302
303 /*
304  * Restore processor xstate from xsave area.
305  */
306 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
307 {
308         u32 lmask = mask;
309         u32 hmask = mask >> 32;
310
311         XSTATE_XRESTORE(xstate, lmask, hmask);
312 }
313
314 /*
315  * Save xstate to user space xsave area.
316  *
317  * We don't use modified optimization because xrstor/xrstors might track
318  * a different application.
319  *
320  * We don't use compacted format xsave area for
321  * backward compatibility for old applications which don't understand
322  * compacted format of xsave area.
323  */
324 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
325 {
326         u64 mask = xfeatures_mask_user();
327         u32 lmask = mask;
328         u32 hmask = mask >> 32;
329         int err;
330
331         /*
332          * Clear the xsave header first, so that reserved fields are
333          * initialized to zero.
334          */
335         err = __clear_user(&buf->header, sizeof(buf->header));
336         if (unlikely(err))
337                 return -EFAULT;
338
339         stac();
340         XSTATE_OP(XSAVE, buf, lmask, hmask, err);
341         clac();
342
343         return err;
344 }
345
346 /*
347  * Restore xstate from user space xsave area.
348  */
349 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
350 {
351         struct xregs_state *xstate = ((__force struct xregs_state *)buf);
352         u32 lmask = mask;
353         u32 hmask = mask >> 32;
354         int err;
355
356         stac();
357         XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
358         clac();
359
360         return err;
361 }
362
363 /*
364  * Restore xstate from kernel space xsave area, return an error code instead of
365  * an exception.
366  */
367 static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
368 {
369         u32 lmask = mask;
370         u32 hmask = mask >> 32;
371         int err;
372
373         if (static_cpu_has(X86_FEATURE_XSAVES))
374                 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
375         else
376                 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
377
378         return err;
379 }
380
381 extern int copy_fpregs_to_fpstate(struct fpu *fpu);
382
383 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
384 {
385         if (use_xsave()) {
386                 copy_kernel_to_xregs(&fpstate->xsave, mask);
387         } else {
388                 if (use_fxsr())
389                         copy_kernel_to_fxregs(&fpstate->fxsave);
390                 else
391                         copy_kernel_to_fregs(&fpstate->fsave);
392         }
393 }
394
395 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
396 {
397         /*
398          * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
399          * pending. Clear the x87 state here by setting it to fixed values.
400          * "m" is a random variable that should be in L1.
401          */
402         if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
403                 asm volatile(
404                         "fnclex\n\t"
405                         "emms\n\t"
406                         "fildl %P[addr]"        /* set F?P to defined value */
407                         : : [addr] "m" (fpstate));
408         }
409
410         __copy_kernel_to_fpregs(fpstate, -1);
411 }
412
413 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
414
415 /*
416  * FPU context switch related helper methods:
417  */
418
419 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
420
421 /*
422  * The in-register FPU state for an FPU context on a CPU is assumed to be
423  * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
424  * matches the FPU.
425  *
426  * If the FPU register state is valid, the kernel can skip restoring the
427  * FPU state from memory.
428  *
429  * Any code that clobbers the FPU registers or updates the in-memory
430  * FPU state for a task MUST let the rest of the kernel know that the
431  * FPU registers are no longer valid for this task.
432  *
433  * Either one of these invalidation functions is enough. Invalidate
434  * a resource you control: CPU if using the CPU for something else
435  * (with preemption disabled), FPU for the current task, or a task that
436  * is prevented from running by the current task.
437  */
438 static inline void __cpu_invalidate_fpregs_state(void)
439 {
440         __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
441 }
442
443 static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
444 {
445         fpu->last_cpu = -1;
446 }
447
448 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
449 {
450         return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
451 }
452
453 /*
454  * These generally need preemption protection to work,
455  * do try to avoid using these on their own:
456  */
457 static inline void fpregs_deactivate(struct fpu *fpu)
458 {
459         this_cpu_write(fpu_fpregs_owner_ctx, NULL);
460         trace_x86_fpu_regs_deactivated(fpu);
461 }
462
463 static inline void fpregs_activate(struct fpu *fpu)
464 {
465         this_cpu_write(fpu_fpregs_owner_ctx, fpu);
466         trace_x86_fpu_regs_activated(fpu);
467 }
468
469 /*
470  * Internal helper, do not use directly. Use switch_fpu_return() instead.
471  */
472 static inline void __fpregs_load_activate(void)
473 {
474         struct fpu *fpu = &current->thread.fpu;
475         int cpu = smp_processor_id();
476
477         if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
478                 return;
479
480         if (!fpregs_state_valid(fpu, cpu)) {
481                 copy_kernel_to_fpregs(&fpu->state);
482                 fpregs_activate(fpu);
483                 fpu->last_cpu = cpu;
484         }
485         clear_thread_flag(TIF_NEED_FPU_LOAD);
486 }
487
488 /*
489  * FPU state switching for scheduling.
490  *
491  * This is a two-stage process:
492  *
493  *  - switch_fpu_prepare() saves the old state.
494  *    This is done within the context of the old process.
495  *
496  *  - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
497  *    will get loaded on return to userspace, or when the kernel needs it.
498  *
499  * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
500  * are saved in the current thread's FPU register state.
501  *
502  * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
503  * hold current()'s FPU registers. It is required to load the
504  * registers before returning to userland or using the content
505  * otherwise.
506  *
507  * The FPU context is only stored/restored for a user task and
508  * PF_KTHREAD is used to distinguish between kernel and user threads.
509  */
510 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
511 {
512         if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
513                 if (!copy_fpregs_to_fpstate(old_fpu))
514                         old_fpu->last_cpu = -1;
515                 else
516                         old_fpu->last_cpu = cpu;
517
518                 /* But leave fpu_fpregs_owner_ctx! */
519                 trace_x86_fpu_regs_deactivated(old_fpu);
520         }
521 }
522
523 /*
524  * Misc helper functions:
525  */
526
527 /*
528  * Load PKRU from the FPU context if available. Delay loading of the
529  * complete FPU state until the return to userland.
530  */
531 static inline void switch_fpu_finish(struct fpu *new_fpu)
532 {
533         u32 pkru_val = init_pkru_value;
534         struct pkru_state *pk;
535
536         if (!static_cpu_has(X86_FEATURE_FPU))
537                 return;
538
539         set_thread_flag(TIF_NEED_FPU_LOAD);
540
541         if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
542                 return;
543
544         /*
545          * PKRU state is switched eagerly because it needs to be valid before we
546          * return to userland e.g. for a copy_to_user() operation.
547          */
548         if (!(current->flags & PF_KTHREAD)) {
549                 /*
550                  * If the PKRU bit in xsave.header.xfeatures is not set,
551                  * then the PKRU component was in init state, which means
552                  * XRSTOR will set PKRU to 0. If the bit is not set then
553                  * get_xsave_addr() will return NULL because the PKRU value
554                  * in memory is not valid. This means pkru_val has to be
555                  * set to 0 and not to init_pkru_value.
556                  */
557                 pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
558                 pkru_val = pk ? pk->pkru : 0;
559         }
560         __write_pkru(pkru_val);
561 }
562
563 #endif /* _ASM_X86_FPU_INTERNAL_H */