de8e8c21f355677e21f933a9d926d82e37cae160
[linux-2.6-microblaze.git] / arch / x86 / kernel / fpu / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1994 Linus Torvalds
4  *
5  *  Pentium III FXSR, SSE support
6  *  General FPU state handling cleanups
7  *      Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 #include <asm/fpu/api.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/sched.h>
12 #include <asm/fpu/signal.h>
13 #include <asm/fpu/types.h>
14 #include <asm/traps.h>
15 #include <asm/irq_regs.h>
16
17 #include <linux/hardirq.h>
18 #include <linux/pkeys.h>
19 #include <linux/vmalloc.h>
20
21 #include "context.h"
22 #include "internal.h"
23 #include "legacy.h"
24 #include "xstate.h"
25
26 #define CREATE_TRACE_POINTS
27 #include <asm/trace/fpu.h>
28
29 #ifdef CONFIG_X86_64
30 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
31 DEFINE_PER_CPU(u64, xfd_state);
32 #endif
33
34 /* The FPU state configuration data for kernel and user space */
35 struct fpu_state_config fpu_kernel_cfg __ro_after_init;
36 struct fpu_state_config fpu_user_cfg __ro_after_init;
37
38 /*
39  * Represents the initial FPU state. It's mostly (but not completely) zeroes,
40  * depending on the FPU hardware format:
41  */
42 struct fpstate init_fpstate __ro_after_init;
43
44 /*
45  * Track whether the kernel is using the FPU state
46  * currently.
47  *
48  * This flag is used:
49  *
50  *   - by IRQ context code to potentially use the FPU
51  *     if it's unused.
52  *
53  *   - to debug kernel_fpu_begin()/end() correctness
54  */
55 static DEFINE_PER_CPU(bool, in_kernel_fpu);
56
57 /*
58  * Track which context is using the FPU on the CPU:
59  */
60 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
61
62 static bool kernel_fpu_disabled(void)
63 {
64         return this_cpu_read(in_kernel_fpu);
65 }
66
67 static bool interrupted_kernel_fpu_idle(void)
68 {
69         return !kernel_fpu_disabled();
70 }
71
72 /*
73  * Were we in user mode (or vm86 mode) when we were
74  * interrupted?
75  *
76  * Doing kernel_fpu_begin/end() is ok if we are running
77  * in an interrupt context from user mode - we'll just
78  * save the FPU state as required.
79  */
80 static bool interrupted_user_mode(void)
81 {
82         struct pt_regs *regs = get_irq_regs();
83         return regs && user_mode(regs);
84 }
85
86 /*
87  * Can we use the FPU in kernel mode with the
88  * whole "kernel_fpu_begin/end()" sequence?
89  *
90  * It's always ok in process context (ie "not interrupt")
91  * but it is sometimes ok even from an irq.
92  */
93 bool irq_fpu_usable(void)
94 {
95         return !in_interrupt() ||
96                 interrupted_user_mode() ||
97                 interrupted_kernel_fpu_idle();
98 }
99 EXPORT_SYMBOL(irq_fpu_usable);
100
101 /*
102  * Save the FPU register state in fpu->fpstate->regs. The register state is
103  * preserved.
104  *
105  * Must be called with fpregs_lock() held.
106  *
107  * The legacy FNSAVE instruction clears all FPU state unconditionally, so
108  * register state has to be reloaded. That might be a pointless exercise
109  * when the FPU is going to be used by another task right after that. But
110  * this only affects 20+ years old 32bit systems and avoids conditionals all
111  * over the place.
112  *
113  * FXSAVE and all XSAVE variants preserve the FPU register state.
114  */
115 void save_fpregs_to_fpstate(struct fpu *fpu)
116 {
117         if (likely(use_xsave())) {
118                 os_xsave(fpu->fpstate);
119
120                 /*
121                  * AVX512 state is tracked here because its use is
122                  * known to slow the max clock speed of the core.
123                  */
124                 if (fpu->fpstate->regs.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
125                         fpu->avx512_timestamp = jiffies;
126                 return;
127         }
128
129         if (likely(use_fxsr())) {
130                 fxsave(&fpu->fpstate->regs.fxsave);
131                 return;
132         }
133
134         /*
135          * Legacy FPU register saving, FNSAVE always clears FPU registers,
136          * so we have to reload them from the memory state.
137          */
138         asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
139         frstor(&fpu->fpstate->regs.fsave);
140 }
141
142 void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
143 {
144         /*
145          * AMD K7/K8 and later CPUs up to Zen don't save/restore
146          * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
147          * here by setting it to fixed values.  "m" is a random variable
148          * that should be in L1.
149          */
150         if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
151                 asm volatile(
152                         "fnclex\n\t"
153                         "emms\n\t"
154                         "fildl %P[addr]"        /* set F?P to defined value */
155                         : : [addr] "m" (fpstate));
156         }
157
158         if (use_xsave()) {
159                 /*
160                  * Dynamically enabled features are enabled in XCR0, but
161                  * usage requires also that the corresponding bits in XFD
162                  * are cleared.  If the bits are set then using a related
163                  * instruction will raise #NM. This allows to do the
164                  * allocation of the larger FPU buffer lazy from #NM or if
165                  * the task has no permission to kill it which would happen
166                  * via #UD if the feature is disabled in XCR0.
167                  *
168                  * XFD state is following the same life time rules as
169                  * XSTATE and to restore state correctly XFD has to be
170                  * updated before XRSTORS otherwise the component would
171                  * stay in or go into init state even if the bits are set
172                  * in fpstate::regs::xsave::xfeatures.
173                  */
174                 xfd_update_state(fpstate);
175
176                 /*
177                  * Restoring state always needs to modify all features
178                  * which are in @mask even if the current task cannot use
179                  * extended features.
180                  *
181                  * So fpstate->xfeatures cannot be used here, because then
182                  * a feature for which the task has no permission but was
183                  * used by the previous task would not go into init state.
184                  */
185                 mask = fpu_kernel_cfg.max_features & mask;
186
187                 os_xrstor(fpstate, mask);
188         } else {
189                 if (use_fxsr())
190                         fxrstor(&fpstate->regs.fxsave);
191                 else
192                         frstor(&fpstate->regs.fsave);
193         }
194 }
195
196 void fpu_reset_from_exception_fixup(void)
197 {
198         restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
199 }
200
201 #if IS_ENABLED(CONFIG_KVM)
202 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
203
204 static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
205 {
206         struct fpu_state_perm *fpuperm;
207         u64 perm;
208
209         if (!IS_ENABLED(CONFIG_X86_64))
210                 return;
211
212         spin_lock_irq(&current->sighand->siglock);
213         fpuperm = &current->group_leader->thread.fpu.guest_perm;
214         perm = fpuperm->__state_perm;
215
216         /* First fpstate allocation locks down permissions. */
217         WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
218
219         spin_unlock_irq(&current->sighand->siglock);
220
221         gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED;
222 }
223
224 bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
225 {
226         struct fpstate *fpstate;
227         unsigned int size;
228
229         size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
230         fpstate = vzalloc(size);
231         if (!fpstate)
232                 return false;
233
234         /* Leave xfd to 0 (the reset value defined by spec) */
235         __fpstate_reset(fpstate, 0);
236         fpstate_init_user(fpstate);
237         fpstate->is_valloc      = true;
238         fpstate->is_guest       = true;
239
240         gfpu->fpstate           = fpstate;
241         gfpu->xfeatures         = fpu_user_cfg.default_features;
242         gfpu->perm              = fpu_user_cfg.default_features;
243         gfpu->uabi_size         = fpu_user_cfg.default_size;
244         fpu_init_guest_permissions(gfpu);
245
246         return true;
247 }
248 EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
249
250 void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
251 {
252         struct fpstate *fps = gfpu->fpstate;
253
254         if (!fps)
255                 return;
256
257         if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
258                 return;
259
260         gfpu->fpstate = NULL;
261         vfree(fps);
262 }
263 EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
264
265 /*
266   * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable
267   * @guest_fpu:         Pointer to the guest FPU container
268   * @xfeatures:         Features requested by guest CPUID
269   *
270   * Enable all dynamic xfeatures according to guest perm and requested CPUID.
271   *
272   * Return: 0 on success, error code otherwise
273   */
274 int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
275 {
276         lockdep_assert_preemption_enabled();
277
278         /* Nothing to do if all requested features are already enabled. */
279         xfeatures &= ~guest_fpu->xfeatures;
280         if (!xfeatures)
281                 return 0;
282
283         return __xfd_enable_feature(xfeatures, guest_fpu);
284 }
285 EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
286
287 #ifdef CONFIG_X86_64
288 void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
289 {
290         fpregs_lock();
291         guest_fpu->fpstate->xfd = xfd;
292         if (guest_fpu->fpstate->in_use)
293                 xfd_update_state(guest_fpu->fpstate);
294         fpregs_unlock();
295 }
296 EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
297 #endif /* CONFIG_X86_64 */
298
299 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
300 {
301         struct fpstate *guest_fps = guest_fpu->fpstate;
302         struct fpu *fpu = &current->thread.fpu;
303         struct fpstate *cur_fps = fpu->fpstate;
304
305         fpregs_lock();
306         if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
307                 save_fpregs_to_fpstate(fpu);
308
309         /* Swap fpstate */
310         if (enter_guest) {
311                 fpu->__task_fpstate = cur_fps;
312                 fpu->fpstate = guest_fps;
313                 guest_fps->in_use = true;
314         } else {
315                 guest_fps->in_use = false;
316                 fpu->fpstate = fpu->__task_fpstate;
317                 fpu->__task_fpstate = NULL;
318         }
319
320         cur_fps = fpu->fpstate;
321
322         if (!cur_fps->is_confidential) {
323                 /* Includes XFD update */
324                 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
325         } else {
326                 /*
327                  * XSTATE is restored by firmware from encrypted
328                  * memory. Make sure XFD state is correct while
329                  * running with guest fpstate
330                  */
331                 xfd_update_state(cur_fps);
332         }
333
334         fpregs_mark_activate();
335         fpregs_unlock();
336         return 0;
337 }
338 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
339
340 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
341                                     unsigned int size, u32 pkru)
342 {
343         struct fpstate *kstate = gfpu->fpstate;
344         union fpregs_state *ustate = buf;
345         struct membuf mb = { .p = buf, .left = size };
346
347         if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
348                 __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
349         } else {
350                 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
351                        sizeof(ustate->fxsave));
352                 /* Make it restorable on a XSAVE enabled host */
353                 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
354         }
355 }
356 EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
357
358 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
359                                    u64 xcr0, u32 *vpkru)
360 {
361         struct fpstate *kstate = gfpu->fpstate;
362         const union fpregs_state *ustate = buf;
363         struct pkru_state *xpkru;
364         int ret;
365
366         if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
367                 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
368                         return -EINVAL;
369                 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
370                         return -EINVAL;
371                 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
372                 return 0;
373         }
374
375         if (ustate->xsave.header.xfeatures & ~xcr0)
376                 return -EINVAL;
377
378         ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
379         if (ret)
380                 return ret;
381
382         /* Retrieve PKRU if not in init state */
383         if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
384                 xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
385                 *vpkru = xpkru->pkru;
386         }
387
388         /* Ensure that XCOMP_BV is set up for XSAVES */
389         xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
390         return 0;
391 }
392 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
393 #endif /* CONFIG_KVM */
394
395 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
396 {
397         preempt_disable();
398
399         WARN_ON_FPU(!irq_fpu_usable());
400         WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
401
402         this_cpu_write(in_kernel_fpu, true);
403
404         if (!(current->flags & PF_KTHREAD) &&
405             !test_thread_flag(TIF_NEED_FPU_LOAD)) {
406                 set_thread_flag(TIF_NEED_FPU_LOAD);
407                 save_fpregs_to_fpstate(&current->thread.fpu);
408         }
409         __cpu_invalidate_fpregs_state();
410
411         /* Put sane initial values into the control registers. */
412         if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
413                 ldmxcsr(MXCSR_DEFAULT);
414
415         if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
416                 asm volatile ("fninit");
417 }
418 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
419
420 void kernel_fpu_end(void)
421 {
422         WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
423
424         this_cpu_write(in_kernel_fpu, false);
425         preempt_enable();
426 }
427 EXPORT_SYMBOL_GPL(kernel_fpu_end);
428
429 /*
430  * Sync the FPU register state to current's memory register state when the
431  * current task owns the FPU. The hardware register state is preserved.
432  */
433 void fpu_sync_fpstate(struct fpu *fpu)
434 {
435         WARN_ON_FPU(fpu != &current->thread.fpu);
436
437         fpregs_lock();
438         trace_x86_fpu_before_save(fpu);
439
440         if (!test_thread_flag(TIF_NEED_FPU_LOAD))
441                 save_fpregs_to_fpstate(fpu);
442
443         trace_x86_fpu_after_save(fpu);
444         fpregs_unlock();
445 }
446
447 static inline unsigned int init_fpstate_copy_size(void)
448 {
449         if (!use_xsave())
450                 return fpu_kernel_cfg.default_size;
451
452         /* XSAVE(S) just needs the legacy and the xstate header part */
453         return sizeof(init_fpstate.regs.xsave);
454 }
455
456 static inline void fpstate_init_fxstate(struct fpstate *fpstate)
457 {
458         fpstate->regs.fxsave.cwd = 0x37f;
459         fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
460 }
461
462 /*
463  * Legacy x87 fpstate state init:
464  */
465 static inline void fpstate_init_fstate(struct fpstate *fpstate)
466 {
467         fpstate->regs.fsave.cwd = 0xffff037fu;
468         fpstate->regs.fsave.swd = 0xffff0000u;
469         fpstate->regs.fsave.twd = 0xffffffffu;
470         fpstate->regs.fsave.fos = 0xffff0000u;
471 }
472
473 /*
474  * Used in two places:
475  * 1) Early boot to setup init_fpstate for non XSAVE systems
476  * 2) fpu_init_fpstate_user() which is invoked from KVM
477  */
478 void fpstate_init_user(struct fpstate *fpstate)
479 {
480         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
481                 fpstate_init_soft(&fpstate->regs.soft);
482                 return;
483         }
484
485         xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
486
487         if (cpu_feature_enabled(X86_FEATURE_FXSR))
488                 fpstate_init_fxstate(fpstate);
489         else
490                 fpstate_init_fstate(fpstate);
491 }
492
493 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd)
494 {
495         /* Initialize sizes and feature masks */
496         fpstate->size           = fpu_kernel_cfg.default_size;
497         fpstate->user_size      = fpu_user_cfg.default_size;
498         fpstate->xfeatures      = fpu_kernel_cfg.default_features;
499         fpstate->user_xfeatures = fpu_user_cfg.default_features;
500         fpstate->xfd            = xfd;
501 }
502
503 void fpstate_reset(struct fpu *fpu)
504 {
505         /* Set the fpstate pointer to the default fpstate */
506         fpu->fpstate = &fpu->__fpstate;
507         __fpstate_reset(fpu->fpstate, init_fpstate.xfd);
508
509         /* Initialize the permission related info in fpu */
510         fpu->perm.__state_perm          = fpu_kernel_cfg.default_features;
511         fpu->perm.__state_size          = fpu_kernel_cfg.default_size;
512         fpu->perm.__user_state_size     = fpu_user_cfg.default_size;
513         /* Same defaults for guests */
514         fpu->guest_perm = fpu->perm;
515 }
516
517 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
518 {
519         if (fpu_state_size_dynamic()) {
520                 struct fpu *src_fpu = &current->group_leader->thread.fpu;
521
522                 spin_lock_irq(&current->sighand->siglock);
523                 /* Fork also inherits the permissions of the parent */
524                 dst_fpu->perm = src_fpu->perm;
525                 dst_fpu->guest_perm = src_fpu->guest_perm;
526                 spin_unlock_irq(&current->sighand->siglock);
527         }
528 }
529
530 /* Clone current's FPU state on fork */
531 int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
532 {
533         struct fpu *src_fpu = &current->thread.fpu;
534         struct fpu *dst_fpu = &dst->thread.fpu;
535
536         /* The new task's FPU state cannot be valid in the hardware. */
537         dst_fpu->last_cpu = -1;
538
539         fpstate_reset(dst_fpu);
540
541         if (!cpu_feature_enabled(X86_FEATURE_FPU))
542                 return 0;
543
544         /*
545          * Enforce reload for user space tasks and prevent kernel threads
546          * from trying to save the FPU registers on context switch.
547          */
548         set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
549
550         /*
551          * No FPU state inheritance for kernel threads and IO
552          * worker threads.
553          */
554         if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
555                 /* Clear out the minimal state */
556                 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
557                        init_fpstate_copy_size());
558                 return 0;
559         }
560
561         /*
562          * If a new feature is added, ensure all dynamic features are
563          * caller-saved from here!
564          */
565         BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
566
567         /*
568          * Save the default portion of the current FPU state into the
569          * clone. Assume all dynamic features to be defined as caller-
570          * saved, which enables skipping both the expansion of fpstate
571          * and the copying of any dynamic state.
572          *
573          * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
574          * copying is not valid when current uses non-default states.
575          */
576         fpregs_lock();
577         if (test_thread_flag(TIF_NEED_FPU_LOAD))
578                 fpregs_restore_userregs();
579         save_fpregs_to_fpstate(dst_fpu);
580         if (!(clone_flags & CLONE_THREAD))
581                 fpu_inherit_perms(dst_fpu);
582         fpregs_unlock();
583
584         trace_x86_fpu_copy_src(src_fpu);
585         trace_x86_fpu_copy_dst(dst_fpu);
586
587         return 0;
588 }
589
590 /*
591  * Whitelist the FPU register state embedded into task_struct for hardened
592  * usercopy.
593  */
594 void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
595 {
596         *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
597         *size = fpu_kernel_cfg.default_size;
598 }
599
600 /*
601  * Drops current FPU state: deactivates the fpregs and
602  * the fpstate. NOTE: it still leaves previous contents
603  * in the fpregs in the eager-FPU case.
604  *
605  * This function can be used in cases where we know that
606  * a state-restore is coming: either an explicit one,
607  * or a reschedule.
608  */
609 void fpu__drop(struct fpu *fpu)
610 {
611         preempt_disable();
612
613         if (fpu == &current->thread.fpu) {
614                 /* Ignore delayed exceptions from user space */
615                 asm volatile("1: fwait\n"
616                              "2:\n"
617                              _ASM_EXTABLE(1b, 2b));
618                 fpregs_deactivate(fpu);
619         }
620
621         trace_x86_fpu_dropped(fpu);
622
623         preempt_enable();
624 }
625
626 /*
627  * Clear FPU registers by setting them up from the init fpstate.
628  * Caller must do fpregs_[un]lock() around it.
629  */
630 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
631 {
632         if (use_xsave())
633                 os_xrstor(&init_fpstate, features_mask);
634         else if (use_fxsr())
635                 fxrstor(&init_fpstate.regs.fxsave);
636         else
637                 frstor(&init_fpstate.regs.fsave);
638
639         pkru_write_default();
640 }
641
642 /*
643  * Reset current->fpu memory state to the init values.
644  */
645 static void fpu_reset_fpregs(void)
646 {
647         struct fpu *fpu = &current->thread.fpu;
648
649         fpregs_lock();
650         fpu__drop(fpu);
651         /*
652          * This does not change the actual hardware registers. It just
653          * resets the memory image and sets TIF_NEED_FPU_LOAD so a
654          * subsequent return to usermode will reload the registers from the
655          * task's memory image.
656          *
657          * Do not use fpstate_init() here. Just copy init_fpstate which has
658          * the correct content already except for PKRU.
659          *
660          * PKRU handling does not rely on the xstate when restoring for
661          * user space as PKRU is eagerly written in switch_to() and
662          * flush_thread().
663          */
664         memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
665         set_thread_flag(TIF_NEED_FPU_LOAD);
666         fpregs_unlock();
667 }
668
669 /*
670  * Reset current's user FPU states to the init states.  current's
671  * supervisor states, if any, are not modified by this function.  The
672  * caller guarantees that the XSTATE header in memory is intact.
673  */
674 void fpu__clear_user_states(struct fpu *fpu)
675 {
676         WARN_ON_FPU(fpu != &current->thread.fpu);
677
678         fpregs_lock();
679         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
680                 fpu_reset_fpregs();
681                 fpregs_unlock();
682                 return;
683         }
684
685         /*
686          * Ensure that current's supervisor states are loaded into their
687          * corresponding registers.
688          */
689         if (xfeatures_mask_supervisor() &&
690             !fpregs_state_valid(fpu, smp_processor_id()))
691                 os_xrstor_supervisor(fpu->fpstate);
692
693         /* Reset user states in registers. */
694         restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
695
696         /*
697          * Now all FPU registers have their desired values.  Inform the FPU
698          * state machine that current's FPU registers are in the hardware
699          * registers. The memory image does not need to be updated because
700          * any operation relying on it has to save the registers first when
701          * current's FPU is marked active.
702          */
703         fpregs_mark_activate();
704         fpregs_unlock();
705 }
706
707 void fpu_flush_thread(void)
708 {
709         fpstate_reset(&current->thread.fpu);
710         fpu_reset_fpregs();
711 }
712 /*
713  * Load FPU context before returning to userspace.
714  */
715 void switch_fpu_return(void)
716 {
717         if (!static_cpu_has(X86_FEATURE_FPU))
718                 return;
719
720         fpregs_restore_userregs();
721 }
722 EXPORT_SYMBOL_GPL(switch_fpu_return);
723
724 #ifdef CONFIG_X86_DEBUG_FPU
725 /*
726  * If current FPU state according to its tracking (loaded FPU context on this
727  * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
728  * loaded on return to userland.
729  */
730 void fpregs_assert_state_consistent(void)
731 {
732         struct fpu *fpu = &current->thread.fpu;
733
734         if (test_thread_flag(TIF_NEED_FPU_LOAD))
735                 return;
736
737         WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
738 }
739 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
740 #endif
741
742 void fpregs_mark_activate(void)
743 {
744         struct fpu *fpu = &current->thread.fpu;
745
746         fpregs_activate(fpu);
747         fpu->last_cpu = smp_processor_id();
748         clear_thread_flag(TIF_NEED_FPU_LOAD);
749 }
750
751 /*
752  * x87 math exception handling:
753  */
754
755 int fpu__exception_code(struct fpu *fpu, int trap_nr)
756 {
757         int err;
758
759         if (trap_nr == X86_TRAP_MF) {
760                 unsigned short cwd, swd;
761                 /*
762                  * (~cwd & swd) will mask out exceptions that are not set to unmasked
763                  * status.  0x3f is the exception bits in these regs, 0x200 is the
764                  * C1 reg you need in case of a stack fault, 0x040 is the stack
765                  * fault bit.  We should only be taking one exception at a time,
766                  * so if this combination doesn't produce any single exception,
767                  * then we have a bad program that isn't synchronizing its FPU usage
768                  * and it will suffer the consequences since we won't be able to
769                  * fully reproduce the context of the exception.
770                  */
771                 if (boot_cpu_has(X86_FEATURE_FXSR)) {
772                         cwd = fpu->fpstate->regs.fxsave.cwd;
773                         swd = fpu->fpstate->regs.fxsave.swd;
774                 } else {
775                         cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
776                         swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
777                 }
778
779                 err = swd & ~cwd;
780         } else {
781                 /*
782                  * The SIMD FPU exceptions are handled a little differently, as there
783                  * is only a single status/control register.  Thus, to determine which
784                  * unmasked exception was caught we must mask the exception mask bits
785                  * at 0x1f80, and then use these to mask the exception bits at 0x3f.
786                  */
787                 unsigned short mxcsr = MXCSR_DEFAULT;
788
789                 if (boot_cpu_has(X86_FEATURE_XMM))
790                         mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
791
792                 err = ~(mxcsr >> 7) & mxcsr;
793         }
794
795         if (err & 0x001) {      /* Invalid op */
796                 /*
797                  * swd & 0x240 == 0x040: Stack Underflow
798                  * swd & 0x240 == 0x240: Stack Overflow
799                  * User must clear the SF bit (0x40) if set
800                  */
801                 return FPE_FLTINV;
802         } else if (err & 0x004) { /* Divide by Zero */
803                 return FPE_FLTDIV;
804         } else if (err & 0x008) { /* Overflow */
805                 return FPE_FLTOVF;
806         } else if (err & 0x012) { /* Denormal, Underflow */
807                 return FPE_FLTUND;
808         } else if (err & 0x020) { /* Precision */
809                 return FPE_FLTRES;
810         }
811
812         /*
813          * If we're using IRQ 13, or supposedly even some trap
814          * X86_TRAP_MF implementations, it's possible
815          * we get a spurious trap, which is not an error.
816          */
817         return 0;
818 }