1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/api.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/sched.h>
12 #include <asm/fpu/signal.h>
13 #include <asm/fpu/types.h>
14 #include <asm/traps.h>
15 #include <asm/irq_regs.h>
17 #include <linux/hardirq.h>
18 #include <linux/pkeys.h>
19 #include <linux/vmalloc.h>
26 #define CREATE_TRACE_POINTS
27 #include <asm/trace/fpu.h>
30 DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
31 DEFINE_PER_CPU(u64, xfd_state);
34 /* The FPU state configuration data for kernel and user space */
35 struct fpu_state_config fpu_kernel_cfg __ro_after_init;
36 struct fpu_state_config fpu_user_cfg __ro_after_init;
39 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
40 * depending on the FPU hardware format:
42 struct fpstate init_fpstate __ro_after_init;
45 * Track whether the kernel is using the FPU state
50 * - by IRQ context code to potentially use the FPU
53 * - to debug kernel_fpu_begin()/end() correctness
55 static DEFINE_PER_CPU(bool, in_kernel_fpu);
58 * Track which context is using the FPU on the CPU:
60 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
62 static bool kernel_fpu_disabled(void)
64 return this_cpu_read(in_kernel_fpu);
67 static bool interrupted_kernel_fpu_idle(void)
69 return !kernel_fpu_disabled();
73 * Were we in user mode (or vm86 mode) when we were
76 * Doing kernel_fpu_begin/end() is ok if we are running
77 * in an interrupt context from user mode - we'll just
78 * save the FPU state as required.
80 static bool interrupted_user_mode(void)
82 struct pt_regs *regs = get_irq_regs();
83 return regs && user_mode(regs);
87 * Can we use the FPU in kernel mode with the
88 * whole "kernel_fpu_begin/end()" sequence?
90 * It's always ok in process context (ie "not interrupt")
91 * but it is sometimes ok even from an irq.
93 bool irq_fpu_usable(void)
95 return !in_interrupt() ||
96 interrupted_user_mode() ||
97 interrupted_kernel_fpu_idle();
99 EXPORT_SYMBOL(irq_fpu_usable);
102 * Save the FPU register state in fpu->fpstate->regs. The register state is
105 * Must be called with fpregs_lock() held.
107 * The legacy FNSAVE instruction clears all FPU state unconditionally, so
108 * register state has to be reloaded. That might be a pointless exercise
109 * when the FPU is going to be used by another task right after that. But
110 * this only affects 20+ years old 32bit systems and avoids conditionals all
113 * FXSAVE and all XSAVE variants preserve the FPU register state.
115 void save_fpregs_to_fpstate(struct fpu *fpu)
117 if (likely(use_xsave())) {
118 os_xsave(fpu->fpstate);
121 * AVX512 state is tracked here because its use is
122 * known to slow the max clock speed of the core.
124 if (fpu->fpstate->regs.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
125 fpu->avx512_timestamp = jiffies;
129 if (likely(use_fxsr())) {
130 fxsave(&fpu->fpstate->regs.fxsave);
135 * Legacy FPU register saving, FNSAVE always clears FPU registers,
136 * so we have to reload them from the memory state.
138 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
139 frstor(&fpu->fpstate->regs.fsave);
142 void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
145 * AMD K7/K8 and later CPUs up to Zen don't save/restore
146 * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
147 * here by setting it to fixed values. "m" is a random variable
148 * that should be in L1.
150 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
154 "fildl %P[addr]" /* set F?P to defined value */
155 : : [addr] "m" (fpstate));
160 * Dynamically enabled features are enabled in XCR0, but
161 * usage requires also that the corresponding bits in XFD
162 * are cleared. If the bits are set then using a related
163 * instruction will raise #NM. This allows to do the
164 * allocation of the larger FPU buffer lazy from #NM or if
165 * the task has no permission to kill it which would happen
166 * via #UD if the feature is disabled in XCR0.
168 * XFD state is following the same life time rules as
169 * XSTATE and to restore state correctly XFD has to be
170 * updated before XRSTORS otherwise the component would
171 * stay in or go into init state even if the bits are set
172 * in fpstate::regs::xsave::xfeatures.
174 xfd_update_state(fpstate);
177 * Restoring state always needs to modify all features
178 * which are in @mask even if the current task cannot use
181 * So fpstate->xfeatures cannot be used here, because then
182 * a feature for which the task has no permission but was
183 * used by the previous task would not go into init state.
185 mask = fpu_kernel_cfg.max_features & mask;
187 os_xrstor(fpstate, mask);
190 fxrstor(&fpstate->regs.fxsave);
192 frstor(&fpstate->regs.fsave);
196 void fpu_reset_from_exception_fixup(void)
198 restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
201 #if IS_ENABLED(CONFIG_KVM)
202 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
204 static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
206 struct fpu_state_perm *fpuperm;
209 if (!IS_ENABLED(CONFIG_X86_64))
212 spin_lock_irq(¤t->sighand->siglock);
213 fpuperm = ¤t->group_leader->thread.fpu.guest_perm;
214 perm = fpuperm->__state_perm;
216 /* First fpstate allocation locks down permissions. */
217 WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
219 spin_unlock_irq(¤t->sighand->siglock);
221 gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED;
224 bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
226 struct fpstate *fpstate;
229 size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
230 fpstate = vzalloc(size);
234 /* Leave xfd to 0 (the reset value defined by spec) */
235 __fpstate_reset(fpstate, 0);
236 fpstate_init_user(fpstate);
237 fpstate->is_valloc = true;
238 fpstate->is_guest = true;
240 gfpu->fpstate = fpstate;
241 gfpu->xfeatures = fpu_user_cfg.default_features;
242 gfpu->perm = fpu_user_cfg.default_features;
243 gfpu->uabi_size = fpu_user_cfg.default_size;
244 fpu_init_guest_permissions(gfpu);
248 EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
250 void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
252 struct fpstate *fps = gfpu->fpstate;
257 if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use))
260 gfpu->fpstate = NULL;
263 EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
266 * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable
267 * @guest_fpu: Pointer to the guest FPU container
268 * @xfeatures: Features requested by guest CPUID
270 * Enable all dynamic xfeatures according to guest perm and requested CPUID.
272 * Return: 0 on success, error code otherwise
274 int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
276 lockdep_assert_preemption_enabled();
278 /* Nothing to do if all requested features are already enabled. */
279 xfeatures &= ~guest_fpu->xfeatures;
283 return __xfd_enable_feature(xfeatures, guest_fpu);
285 EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
288 void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
291 guest_fpu->fpstate->xfd = xfd;
292 if (guest_fpu->fpstate->in_use)
293 xfd_update_state(guest_fpu->fpstate);
296 EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
297 #endif /* CONFIG_X86_64 */
299 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
301 struct fpstate *guest_fps = guest_fpu->fpstate;
302 struct fpu *fpu = ¤t->thread.fpu;
303 struct fpstate *cur_fps = fpu->fpstate;
306 if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
307 save_fpregs_to_fpstate(fpu);
311 fpu->__task_fpstate = cur_fps;
312 fpu->fpstate = guest_fps;
313 guest_fps->in_use = true;
315 guest_fps->in_use = false;
316 fpu->fpstate = fpu->__task_fpstate;
317 fpu->__task_fpstate = NULL;
320 cur_fps = fpu->fpstate;
322 if (!cur_fps->is_confidential) {
323 /* Includes XFD update */
324 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
327 * XSTATE is restored by firmware from encrypted
328 * memory. Make sure XFD state is correct while
329 * running with guest fpstate
331 xfd_update_state(cur_fps);
334 fpregs_mark_activate();
338 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
340 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
341 unsigned int size, u32 pkru)
343 struct fpstate *kstate = gfpu->fpstate;
344 union fpregs_state *ustate = buf;
345 struct membuf mb = { .p = buf, .left = size };
347 if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
348 __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
350 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
351 sizeof(ustate->fxsave));
352 /* Make it restorable on a XSAVE enabled host */
353 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
356 EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
358 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
359 u64 xcr0, u32 *vpkru)
361 struct fpstate *kstate = gfpu->fpstate;
362 const union fpregs_state *ustate = buf;
363 struct pkru_state *xpkru;
366 if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
367 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
369 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
371 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
375 if (ustate->xsave.header.xfeatures & ~xcr0)
378 ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
382 /* Retrieve PKRU if not in init state */
383 if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
384 xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
385 *vpkru = xpkru->pkru;
388 /* Ensure that XCOMP_BV is set up for XSAVES */
389 xstate_init_xcomp_bv(&kstate->regs.xsave, kstate->xfeatures);
392 EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
393 #endif /* CONFIG_KVM */
395 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
399 WARN_ON_FPU(!irq_fpu_usable());
400 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
402 this_cpu_write(in_kernel_fpu, true);
404 if (!(current->flags & PF_KTHREAD) &&
405 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
406 set_thread_flag(TIF_NEED_FPU_LOAD);
407 save_fpregs_to_fpstate(¤t->thread.fpu);
409 __cpu_invalidate_fpregs_state();
411 /* Put sane initial values into the control registers. */
412 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
413 ldmxcsr(MXCSR_DEFAULT);
415 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
416 asm volatile ("fninit");
418 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
420 void kernel_fpu_end(void)
422 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
424 this_cpu_write(in_kernel_fpu, false);
427 EXPORT_SYMBOL_GPL(kernel_fpu_end);
430 * Sync the FPU register state to current's memory register state when the
431 * current task owns the FPU. The hardware register state is preserved.
433 void fpu_sync_fpstate(struct fpu *fpu)
435 WARN_ON_FPU(fpu != ¤t->thread.fpu);
438 trace_x86_fpu_before_save(fpu);
440 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
441 save_fpregs_to_fpstate(fpu);
443 trace_x86_fpu_after_save(fpu);
447 static inline unsigned int init_fpstate_copy_size(void)
450 return fpu_kernel_cfg.default_size;
452 /* XSAVE(S) just needs the legacy and the xstate header part */
453 return sizeof(init_fpstate.regs.xsave);
456 static inline void fpstate_init_fxstate(struct fpstate *fpstate)
458 fpstate->regs.fxsave.cwd = 0x37f;
459 fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
463 * Legacy x87 fpstate state init:
465 static inline void fpstate_init_fstate(struct fpstate *fpstate)
467 fpstate->regs.fsave.cwd = 0xffff037fu;
468 fpstate->regs.fsave.swd = 0xffff0000u;
469 fpstate->regs.fsave.twd = 0xffffffffu;
470 fpstate->regs.fsave.fos = 0xffff0000u;
474 * Used in two places:
475 * 1) Early boot to setup init_fpstate for non XSAVE systems
476 * 2) fpu_init_fpstate_user() which is invoked from KVM
478 void fpstate_init_user(struct fpstate *fpstate)
480 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
481 fpstate_init_soft(&fpstate->regs.soft);
485 xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
487 if (cpu_feature_enabled(X86_FEATURE_FXSR))
488 fpstate_init_fxstate(fpstate);
490 fpstate_init_fstate(fpstate);
493 static void __fpstate_reset(struct fpstate *fpstate, u64 xfd)
495 /* Initialize sizes and feature masks */
496 fpstate->size = fpu_kernel_cfg.default_size;
497 fpstate->user_size = fpu_user_cfg.default_size;
498 fpstate->xfeatures = fpu_kernel_cfg.default_features;
499 fpstate->user_xfeatures = fpu_user_cfg.default_features;
503 void fpstate_reset(struct fpu *fpu)
505 /* Set the fpstate pointer to the default fpstate */
506 fpu->fpstate = &fpu->__fpstate;
507 __fpstate_reset(fpu->fpstate, init_fpstate.xfd);
509 /* Initialize the permission related info in fpu */
510 fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
511 fpu->perm.__state_size = fpu_kernel_cfg.default_size;
512 fpu->perm.__user_state_size = fpu_user_cfg.default_size;
513 /* Same defaults for guests */
514 fpu->guest_perm = fpu->perm;
517 static inline void fpu_inherit_perms(struct fpu *dst_fpu)
519 if (fpu_state_size_dynamic()) {
520 struct fpu *src_fpu = ¤t->group_leader->thread.fpu;
522 spin_lock_irq(¤t->sighand->siglock);
523 /* Fork also inherits the permissions of the parent */
524 dst_fpu->perm = src_fpu->perm;
525 dst_fpu->guest_perm = src_fpu->guest_perm;
526 spin_unlock_irq(¤t->sighand->siglock);
530 /* Clone current's FPU state on fork */
531 int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
533 struct fpu *src_fpu = ¤t->thread.fpu;
534 struct fpu *dst_fpu = &dst->thread.fpu;
536 /* The new task's FPU state cannot be valid in the hardware. */
537 dst_fpu->last_cpu = -1;
539 fpstate_reset(dst_fpu);
541 if (!cpu_feature_enabled(X86_FEATURE_FPU))
545 * Enforce reload for user space tasks and prevent kernel threads
546 * from trying to save the FPU registers on context switch.
548 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
551 * No FPU state inheritance for kernel threads and IO
554 if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
555 /* Clear out the minimal state */
556 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
557 init_fpstate_copy_size());
562 * If a new feature is added, ensure all dynamic features are
563 * caller-saved from here!
565 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
568 * Save the default portion of the current FPU state into the
569 * clone. Assume all dynamic features to be defined as caller-
570 * saved, which enables skipping both the expansion of fpstate
571 * and the copying of any dynamic state.
573 * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
574 * copying is not valid when current uses non-default states.
577 if (test_thread_flag(TIF_NEED_FPU_LOAD))
578 fpregs_restore_userregs();
579 save_fpregs_to_fpstate(dst_fpu);
580 if (!(clone_flags & CLONE_THREAD))
581 fpu_inherit_perms(dst_fpu);
584 trace_x86_fpu_copy_src(src_fpu);
585 trace_x86_fpu_copy_dst(dst_fpu);
591 * Whitelist the FPU register state embedded into task_struct for hardened
594 void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
596 *offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
597 *size = fpu_kernel_cfg.default_size;
601 * Drops current FPU state: deactivates the fpregs and
602 * the fpstate. NOTE: it still leaves previous contents
603 * in the fpregs in the eager-FPU case.
605 * This function can be used in cases where we know that
606 * a state-restore is coming: either an explicit one,
609 void fpu__drop(struct fpu *fpu)
613 if (fpu == ¤t->thread.fpu) {
614 /* Ignore delayed exceptions from user space */
615 asm volatile("1: fwait\n"
617 _ASM_EXTABLE(1b, 2b));
618 fpregs_deactivate(fpu);
621 trace_x86_fpu_dropped(fpu);
627 * Clear FPU registers by setting them up from the init fpstate.
628 * Caller must do fpregs_[un]lock() around it.
630 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
633 os_xrstor(&init_fpstate, features_mask);
635 fxrstor(&init_fpstate.regs.fxsave);
637 frstor(&init_fpstate.regs.fsave);
639 pkru_write_default();
643 * Reset current->fpu memory state to the init values.
645 static void fpu_reset_fpregs(void)
647 struct fpu *fpu = ¤t->thread.fpu;
652 * This does not change the actual hardware registers. It just
653 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
654 * subsequent return to usermode will reload the registers from the
655 * task's memory image.
657 * Do not use fpstate_init() here. Just copy init_fpstate which has
658 * the correct content already except for PKRU.
660 * PKRU handling does not rely on the xstate when restoring for
661 * user space as PKRU is eagerly written in switch_to() and
664 memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
665 set_thread_flag(TIF_NEED_FPU_LOAD);
670 * Reset current's user FPU states to the init states. current's
671 * supervisor states, if any, are not modified by this function. The
672 * caller guarantees that the XSTATE header in memory is intact.
674 void fpu__clear_user_states(struct fpu *fpu)
676 WARN_ON_FPU(fpu != ¤t->thread.fpu);
679 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
686 * Ensure that current's supervisor states are loaded into their
687 * corresponding registers.
689 if (xfeatures_mask_supervisor() &&
690 !fpregs_state_valid(fpu, smp_processor_id()))
691 os_xrstor_supervisor(fpu->fpstate);
693 /* Reset user states in registers. */
694 restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
697 * Now all FPU registers have their desired values. Inform the FPU
698 * state machine that current's FPU registers are in the hardware
699 * registers. The memory image does not need to be updated because
700 * any operation relying on it has to save the registers first when
701 * current's FPU is marked active.
703 fpregs_mark_activate();
707 void fpu_flush_thread(void)
709 fpstate_reset(¤t->thread.fpu);
713 * Load FPU context before returning to userspace.
715 void switch_fpu_return(void)
717 if (!static_cpu_has(X86_FEATURE_FPU))
720 fpregs_restore_userregs();
722 EXPORT_SYMBOL_GPL(switch_fpu_return);
724 #ifdef CONFIG_X86_DEBUG_FPU
726 * If current FPU state according to its tracking (loaded FPU context on this
727 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
728 * loaded on return to userland.
730 void fpregs_assert_state_consistent(void)
732 struct fpu *fpu = ¤t->thread.fpu;
734 if (test_thread_flag(TIF_NEED_FPU_LOAD))
737 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
739 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
742 void fpregs_mark_activate(void)
744 struct fpu *fpu = ¤t->thread.fpu;
746 fpregs_activate(fpu);
747 fpu->last_cpu = smp_processor_id();
748 clear_thread_flag(TIF_NEED_FPU_LOAD);
752 * x87 math exception handling:
755 int fpu__exception_code(struct fpu *fpu, int trap_nr)
759 if (trap_nr == X86_TRAP_MF) {
760 unsigned short cwd, swd;
762 * (~cwd & swd) will mask out exceptions that are not set to unmasked
763 * status. 0x3f is the exception bits in these regs, 0x200 is the
764 * C1 reg you need in case of a stack fault, 0x040 is the stack
765 * fault bit. We should only be taking one exception at a time,
766 * so if this combination doesn't produce any single exception,
767 * then we have a bad program that isn't synchronizing its FPU usage
768 * and it will suffer the consequences since we won't be able to
769 * fully reproduce the context of the exception.
771 if (boot_cpu_has(X86_FEATURE_FXSR)) {
772 cwd = fpu->fpstate->regs.fxsave.cwd;
773 swd = fpu->fpstate->regs.fxsave.swd;
775 cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
776 swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
782 * The SIMD FPU exceptions are handled a little differently, as there
783 * is only a single status/control register. Thus, to determine which
784 * unmasked exception was caught we must mask the exception mask bits
785 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
787 unsigned short mxcsr = MXCSR_DEFAULT;
789 if (boot_cpu_has(X86_FEATURE_XMM))
790 mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
792 err = ~(mxcsr >> 7) & mxcsr;
795 if (err & 0x001) { /* Invalid op */
797 * swd & 0x240 == 0x040: Stack Underflow
798 * swd & 0x240 == 0x240: Stack Overflow
799 * User must clear the SF bit (0x40) if set
802 } else if (err & 0x004) { /* Divide by Zero */
804 } else if (err & 0x008) { /* Overflow */
806 } else if (err & 0x012) { /* Denormal, Underflow */
808 } else if (err & 0x020) { /* Precision */
813 * If we're using IRQ 13, or supposedly even some trap
814 * X86_TRAP_MF implementations, it's possible
815 * we get a spurious trap, which is not an error.