x86/paravirt: Add new features for paravirt patching
authorJuergen Gross <jgross@suse.com>
Thu, 11 Mar 2021 14:23:13 +0000 (15:23 +0100)
committerBorislav Petkov <bp@suse.de>
Thu, 11 Mar 2021 18:51:49 +0000 (19:51 +0100)
For being able to switch paravirt patching from special cased custom
code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
as new features. This enables to have the standard indirect pv call
as the default code and to patch that with the non-Xen custom code
sequence via ALTERNATIVE patching later.

Make sure paravirt patching is performed before alternatives patching.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-9-jgross@suse.com
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/paravirt.h
arch/x86/kernel/alternative.c
arch/x86/kernel/paravirt-spinlocks.c

index cc96e26..b440c95 100644 (file)
 #define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 #define X86_FEATURE_VMCALL             ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
 #define X86_FEATURE_VMW_VMMCALL                ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK           ( 8*32+20) /* "" PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT                ( 8*32+21) /* "" PV vcpu_is_preempted function */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
index 6408fd0..def450f 100644 (file)
@@ -45,6 +45,10 @@ static inline u64 paravirt_steal_clock(int cpu)
        return static_call(pv_steal_clock)(cpu);
 }
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init paravirt_set_cap(void);
+#endif
+
 /* The paravirtualized I/O functions */
 static inline void slow_down_io(void)
 {
@@ -809,5 +813,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 {
 }
 #endif
+
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void paravirt_set_cap(void)
+{
+}
+#endif
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PARAVIRT_H */
index 133b549..76ad4ce 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/insn.h>
 #include <asm/io.h>
 #include <asm/fixmap.h>
+#include <asm/paravirt.h>
 
 int __read_mostly alternatives_patched;
 
@@ -733,6 +734,33 @@ void __init alternative_instructions(void)
         * patching.
         */
 
+       /*
+        * Paravirt patching and alternative patching can be combined to
+        * replace a function call with a short direct code sequence (e.g.
+        * by setting a constant return value instead of doing that in an
+        * external function).
+        * In order to make this work the following sequence is required:
+        * 1. set (artificial) features depending on used paravirt
+        *    functions which can later influence alternative patching
+        * 2. apply paravirt patching (generally replacing an indirect
+        *    function call with a direct one)
+        * 3. apply alternative patching (e.g. replacing a direct function
+        *    call with a custom code sequence)
+        * Doing paravirt patching after alternative patching would clobber
+        * the optimization of the custom code with a function call again.
+        */
+       paravirt_set_cap();
+
+       /*
+        * First patch paravirt functions, such that we overwrite the indirect
+        * call with the direct call.
+        */
+       apply_paravirt(__parainstructions, __parainstructions_end);
+
+       /*
+        * Then patch alternatives, such that those paravirt calls that are in
+        * alternatives can be overwritten by their immediate fragments.
+        */
        apply_alternatives(__alt_instructions, __alt_instructions_end);
 
 #ifdef CONFIG_SMP
@@ -751,8 +779,6 @@ void __init alternative_instructions(void)
        }
 #endif
 
-       apply_paravirt(__parainstructions, __parainstructions_end);
-
        restart_nmi();
        alternatives_patched = 1;
 }
index 4f75d0c..9e1ea99 100644 (file)
@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
        return pv_ops.lock.vcpu_is_preempted.func ==
                __raw_callee_save___native_vcpu_is_preempted;
 }
+
+void __init paravirt_set_cap(void)
+{
+       if (!pv_is_native_spin_unlock())
+               setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
+
+       if (!pv_is_native_vcpu_is_preempted())
+               setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
+}