x86/reboot: KVM: Handle VMXOFF in KVM's reboot callback
authorSean Christopherson <seanjc@google.com>
Fri, 21 Jul 2023 20:18:43 +0000 (13:18 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 3 Aug 2023 22:37:14 +0000 (15:37 -0700)
Use KVM VMX's reboot/crash callback to do VMXOFF in an emergency instead
of manually and blindly doing VMXOFF.  There's no need to attempt VMXOFF
if a hypervisor, i.e. KVM, isn't loaded/active, i.e. if the CPU can't
possibly be post-VMXON.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/virtext.h
arch/x86/kernel/reboot.c
arch/x86/kvm/vmx/vmx.c

index 3b12e6b..5bc29fa 100644 (file)
@@ -70,16 +70,6 @@ static inline void __cpu_emergency_vmxoff(void)
                cpu_vmxoff();
 }
 
-/** Disable VMX if it is supported and enabled on the current CPU
- */
-static inline void cpu_emergency_vmxoff(void)
-{
-       if (cpu_has_vmx())
-               __cpu_emergency_vmxoff();
-}
-
-
-
 
 /*
  * SVM functions:
index 62ccede..d2d0f26 100644 (file)
@@ -787,13 +787,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
 }
 #endif
 
-/*
- * This is used to VMCLEAR all VMCSs loaded on the
- * processor. And when loading kvm_intel module, the
- * callback function pointer will be assigned.
- *
- * protected by rcu.
- */
+/* RCU-protected callback to disable virtualization prior to reboot. */
 static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback;
 
 void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback)
@@ -815,17 +809,6 @@ void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback)
 }
 EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback);
 
-static inline void cpu_crash_vmclear_loaded_vmcss(void)
-{
-       cpu_emergency_virt_cb *callback;
-
-       rcu_read_lock();
-       callback = rcu_dereference(cpu_emergency_virt_callback);
-       if (callback)
-               callback();
-       rcu_read_unlock();
-}
-
 /* This is the CPU performing the emergency shutdown work. */
 int crashing_cpu = -1;
 
@@ -836,9 +819,15 @@ int crashing_cpu = -1;
  */
 void cpu_emergency_disable_virtualization(void)
 {
-       cpu_crash_vmclear_loaded_vmcss();
+       cpu_emergency_virt_cb *callback;
+
+       rcu_read_lock();
+       callback = rcu_dereference(cpu_emergency_virt_callback);
+       if (callback)
+               callback();
+       rcu_read_unlock();
 
-       cpu_emergency_vmxoff();
+       /* KVM_AMD doesn't yet utilize the common callback. */
        cpu_emergency_svm_disable();
 }
 
index 661ba09..df991f1 100644 (file)
@@ -725,7 +725,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
        return ret;
 }
 
-static void crash_vmclear_local_loaded_vmcss(void)
+static void vmx_emergency_disable(void)
 {
        int cpu = raw_smp_processor_id();
        struct loaded_vmcs *v;
@@ -733,6 +733,8 @@ static void crash_vmclear_local_loaded_vmcss(void)
        list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
                            loaded_vmcss_on_cpu_link)
                vmcs_clear(v->vmcs);
+
+       __cpu_emergency_vmxoff();
 }
 
 static void __loaded_vmcs_clear(void *arg)
@@ -8571,7 +8573,7 @@ static void __vmx_exit(void)
 {
        allow_smaller_maxphyaddr = false;
 
-       cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss);
+       cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
 
        vmx_cleanup_l1d_flush();
 }
@@ -8619,7 +8621,7 @@ static int __init vmx_init(void)
                pi_init_cpu(cpu);
        }
 
-       cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss);
+       cpu_emergency_register_virt_callback(vmx_emergency_disable);
 
        vmx_check_vmcs12_offsets();