x86/fpu: Rename copy_xregs_to_kernel() and copy_kernel_to_xregs()
authorThomas Gleixner <tglx@linutronix.de>
Wed, 23 Jun 2021 12:01:52 +0000 (14:01 +0200)
committerBorislav Petkov <bp@suse.de>
Wed, 23 Jun 2021 15:57:57 +0000 (17:57 +0200)
The function names for xsave[s]/xrstor[s] operations are horribly named and
a permanent source of confusion.

Rename:
copy_xregs_to_kernel() to os_xsave()
copy_kernel_to_xregs() to os_xrstor()

These are truly low level wrappers around the actual instructions
XSAVE[OPT]/XRSTOR and XSAVES/XRSTORS with the twist that the selection
based on the available CPU features happens with an alternative to avoid
conditionals all over the place and to provide the best performance for hot
paths.

The os_ prefix tells that this is the OS selected mechanism.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210623121453.830239347@linutronix.de
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c

index 854bfb0..dae1545 100644 (file)
@@ -261,7 +261,7 @@ static inline void fxsave(struct fxregs_state *fx)
  * This function is called only during boot time when x86 caps are not set
  * up and alternative can not be used yet.
  */
  * This function is called only during boot time when x86 caps are not set
  * up and alternative can not be used yet.
  */
-static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
+static inline void os_xrstor_booting(struct xregs_state *xstate)
 {
        u64 mask = -1;
        u32 lmask = mask;
 {
        u64 mask = -1;
        u32 lmask = mask;
@@ -284,8 +284,11 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
 
 /*
  * Save processor xstate to xsave area.
 
 /*
  * Save processor xstate to xsave area.
+ *
+ * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
+ * and command line options. The choice is permanent until the next reboot.
  */
  */
-static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
+static inline void os_xsave(struct xregs_state *xstate)
 {
        u64 mask = xfeatures_mask_all;
        u32 lmask = mask;
 {
        u64 mask = xfeatures_mask_all;
        u32 lmask = mask;
@@ -302,8 +305,10 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
 
 /*
  * Restore processor xstate from xsave area.
 
 /*
  * Restore processor xstate from xsave area.
+ *
+ * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
  */
  */
-static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
+static inline void os_xrstor(struct xregs_state *xstate, u64 mask)
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
@@ -364,13 +369,13 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
  * Restore xstate from kernel space xsave area, return an error code instead of
  * an exception.
  */
  * Restore xstate from kernel space xsave area, return an error code instead of
  * an exception.
  */
-static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
+static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
        int err;
 
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
        int err;
 
-       if (static_cpu_has(X86_FEATURE_XSAVES))
+       if (cpu_feature_enabled(X86_FEATURE_XSAVES))
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -383,7 +388,7 @@ extern int copy_fpregs_to_fpstate(struct fpu *fpu);
 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
 {
        if (use_xsave()) {
 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
 {
        if (use_xsave()) {
-               copy_kernel_to_xregs(&fpstate->xsave, mask);
+               os_xrstor(&fpstate->xsave, mask);
        } else {
                if (use_fxsr())
                        copy_kernel_to_fxregs(&fpstate->fxsave);
        } else {
                if (use_fxsr())
                        copy_kernel_to_fxregs(&fpstate->fxsave);
index 2243701..bfdcf7f 100644 (file)
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(irq_fpu_usable);
 int copy_fpregs_to_fpstate(struct fpu *fpu)
 {
        if (likely(use_xsave())) {
 int copy_fpregs_to_fpstate(struct fpu *fpu)
 {
        if (likely(use_xsave())) {
-               copy_xregs_to_kernel(&fpu->state.xsave);
+               os_xsave(&fpu->state.xsave);
 
                /*
                 * AVX512 state is tracked here because its use is
 
                /*
                 * AVX512 state is tracked here because its use is
@@ -314,7 +314,7 @@ void fpu__drop(struct fpu *fpu)
 static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
 {
        if (use_xsave())
 static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
 {
        if (use_xsave())
-               copy_kernel_to_xregs(&init_fpstate.xsave, features_mask);
+               os_xrstor(&init_fpstate.xsave, features_mask);
        else if (static_cpu_has(X86_FEATURE_FXSR))
                copy_kernel_to_fxregs(&init_fpstate.fxsave);
        else
        else if (static_cpu_has(X86_FEATURE_FXSR))
                copy_kernel_to_fxregs(&init_fpstate.fxsave);
        else
@@ -345,8 +345,7 @@ static void fpu__clear(struct fpu *fpu, bool user_only)
        if (user_only) {
                if (!fpregs_state_valid(fpu, smp_processor_id()) &&
                    xfeatures_mask_supervisor())
        if (user_only) {
                if (!fpregs_state_valid(fpu, smp_processor_id()) &&
                    xfeatures_mask_supervisor())
-                       copy_kernel_to_xregs(&fpu->state.xsave,
-                                            xfeatures_mask_supervisor());
+                       os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
                copy_init_fpstate_to_fpregs(xfeatures_mask_user());
        } else {
                copy_init_fpstate_to_fpregs(xfeatures_mask_all);
                copy_init_fpstate_to_fpregs(xfeatures_mask_user());
        } else {
                copy_init_fpstate_to_fpregs(xfeatures_mask_all);
index 5010595..33675b3 100644 (file)
@@ -261,14 +261,14 @@ static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
 
                        r = copy_user_to_fxregs(buf);
                        if (!r)
 
                        r = copy_user_to_fxregs(buf);
                        if (!r)
-                               copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                               os_xrstor(&init_fpstate.xsave, init_bv);
                        return r;
                } else {
                        init_bv = xfeatures_mask_user() & ~xbv;
 
                        r = copy_user_to_xregs(buf, xbv);
                        if (!r && unlikely(init_bv))
                        return r;
                } else {
                        init_bv = xfeatures_mask_user() & ~xbv;
 
                        r = copy_user_to_xregs(buf, xbv);
                        if (!r && unlikely(init_bv))
-                               copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                               os_xrstor(&init_fpstate.xsave, init_bv);
                        return r;
                }
        } else if (use_fxsr()) {
                        return r;
                }
        } else if (use_fxsr()) {
@@ -356,9 +356,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                         * has been copied to the kernel one.
                         */
                        if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
                         * has been copied to the kernel one.
                         */
                        if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
-                           xfeatures_mask_supervisor())
-                               copy_kernel_to_xregs(&fpu->state.xsave,
-                                                    xfeatures_mask_supervisor());
+                           xfeatures_mask_supervisor()) {
+                               os_xrstor(&fpu->state.xsave,
+                                         xfeatures_mask_supervisor());
+                       }
                        fpregs_mark_activate();
                        fpregs_unlock();
                        return 0;
                        fpregs_mark_activate();
                        fpregs_unlock();
                        return 0;
@@ -412,7 +413,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                 * above XRSTOR failed or ia32_fxstate is true. Shrug.
                 */
                if (xfeatures_mask_supervisor())
                 * above XRSTOR failed or ia32_fxstate is true. Shrug.
                 */
                if (xfeatures_mask_supervisor())
-                       copy_xregs_to_kernel(&fpu->state.xsave);
+                       os_xsave(&fpu->state.xsave);
                set_thread_flag(TIF_NEED_FPU_LOAD);
        }
        __fpu_invalidate_fpregs_state(fpu);
                set_thread_flag(TIF_NEED_FPU_LOAD);
        }
        __fpu_invalidate_fpregs_state(fpu);
@@ -430,14 +431,14 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
 
                fpregs_lock();
                if (unlikely(init_bv))
 
                fpregs_lock();
                if (unlikely(init_bv))
-                       copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                       os_xrstor(&init_fpstate.xsave, init_bv);
 
                /*
                 * Restore previously saved supervisor xstates along with
                 * copied-in user xstates.
                 */
 
                /*
                 * Restore previously saved supervisor xstates along with
                 * copied-in user xstates.
                 */
-               ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
-                                              user_xfeatures | xfeatures_mask_supervisor());
+               ret = os_xrstor_safe(&fpu->state.xsave,
+                                    user_xfeatures | xfeatures_mask_supervisor());
 
        } else if (use_fxsr()) {
                ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
 
        } else if (use_fxsr()) {
                ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
@@ -454,7 +455,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                        u64 init_bv;
 
                        init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
                        u64 init_bv;
 
                        init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
-                       copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                       os_xrstor(&init_fpstate.xsave, init_bv);
                }
 
                ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
                }
 
                ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
index 427977b..4fca8a8 100644 (file)
@@ -400,7 +400,7 @@ static void __init setup_init_fpu_buf(void)
        /*
         * Init all the features state with header.xfeatures being 0x0
         */
        /*
         * Init all the features state with header.xfeatures being 0x0
         */
-       copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+       os_xrstor_booting(&init_fpstate.xsave);
 
        /*
         * All components are now in init state. Read the state back so
 
        /*
         * All components are now in init state. Read the state back so