KVM: x86: Rename virtualization {en,dis}abling APIs to match common KVM
authorSean Christopherson <seanjc@google.com>
Fri, 30 Aug 2024 04:35:56 +0000 (21:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 4 Sep 2024 15:02:33 +0000 (11:02 -0400)
Rename x86's the per-CPU vendor hooks used to enable virtualization in
hardware to align with the recently renamed arch hooks.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Message-ID: <20240830043600.127750-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index 68ad4f9..03b7e13 100644 (file)
@@ -14,8 +14,8 @@ BUILD_BUG_ON(1)
  * be __static_call_return0.
  */
 KVM_X86_OP(check_processor_compatibility)
-KVM_X86_OP(hardware_enable)
-KVM_X86_OP(hardware_disable)
+KVM_X86_OP(enable_virtualization_cpu)
+KVM_X86_OP(disable_virtualization_cpu)
 KVM_X86_OP(hardware_unsetup)
 KVM_X86_OP(has_emulated_msr)
 KVM_X86_OP(vcpu_after_set_cpuid)
index e4fc362..704aeec 100644 (file)
@@ -1629,8 +1629,8 @@ struct kvm_x86_ops {
 
        int (*check_processor_compatibility)(void);
 
-       int (*hardware_enable)(void);
-       void (*hardware_disable)(void);
+       int (*enable_virtualization_cpu)(void);
+       void (*disable_virtualization_cpu)(void);
        void (*hardware_unsetup)(void);
        bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
        void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
index d6f2525..a9adbe1 100644 (file)
@@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void)
        }
 }
 
-static void svm_emergency_disable(void)
+static void svm_emergency_disable_virtualization_cpu(void)
 {
        kvm_rebooting = true;
 
        kvm_cpu_svm_disable();
 }
 
-static void svm_hardware_disable(void)
+static void svm_disable_virtualization_cpu(void)
 {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
@@ -610,7 +610,7 @@ static void svm_hardware_disable(void)
        amd_pmu_disable_virt();
 }
 
-static int svm_hardware_enable(void)
+static int svm_enable_virtualization_cpu(void)
 {
 
        struct svm_cpu_data *sd;
@@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
         * TSC_AUX is always virtualized for SEV-ES guests when the feature is
         * available. The user return MSR support is not required in this case
         * because TSC_AUX is restored on #VMEXIT from the host save area
-        * (which has been initialized in svm_hardware_enable()).
+        * (which has been initialized in svm_enable_virtualization_cpu()).
         */
        if (likely(tsc_aux_uret_slot >= 0) &&
            (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@ -3132,7 +3132,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                 * feature is available. The user return MSR support is not
                 * required in this case because TSC_AUX is restored on #VMEXIT
                 * from the host save area (which has been initialized in
-                * svm_hardware_enable()).
+                * svm_enable_virtualization_cpu()).
                 */
                if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
                        break;
@@ -4980,8 +4980,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .check_processor_compatibility = svm_check_processor_compat,
 
        .hardware_unsetup = svm_hardware_unsetup,
-       .hardware_enable = svm_hardware_enable,
-       .hardware_disable = svm_hardware_disable,
+       .enable_virtualization_cpu = svm_enable_virtualization_cpu,
+       .disable_virtualization_cpu = svm_disable_virtualization_cpu,
        .has_emulated_msr = svm_has_emulated_msr,
 
        .vcpu_create = svm_vcpu_create,
@@ -5411,7 +5411,7 @@ static void __svm_exit(void)
 {
        kvm_x86_vendor_exit();
 
-       cpu_emergency_unregister_virt_callback(svm_emergency_disable);
+       cpu_emergency_unregister_virt_callback(svm_emergency_disable_virtualization_cpu);
 }
 
 static int __init svm_init(void)
@@ -5427,7 +5427,7 @@ static int __init svm_init(void)
        if (r)
                return r;
 
-       cpu_emergency_register_virt_callback(svm_emergency_disable);
+       cpu_emergency_register_virt_callback(svm_emergency_disable_virtualization_cpu);
 
        /*
         * Common KVM initialization _must_ come last, after this, /dev/kvm is
index 0bf35eb..4a5bf92 100644 (file)
@@ -23,8 +23,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 
        .hardware_unsetup = vmx_hardware_unsetup,
 
-       .hardware_enable = vmx_hardware_enable,
-       .hardware_disable = vmx_hardware_disable,
+       .enable_virtualization_cpu = vmx_enable_virtualization_cpu,
+       .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
        .has_emulated_msr = vmx_has_emulated_msr,
 
        .vm_size = sizeof(struct kvm_vmx),
index f18c2d8..cf7d937 100644 (file)
@@ -755,7 +755,7 @@ fault:
        return -EIO;
 }
 
-static void vmx_emergency_disable(void)
+static void vmx_emergency_disable_virtualization_cpu(void)
 {
        int cpu = raw_smp_processor_id();
        struct loaded_vmcs *v;
@@ -2844,7 +2844,7 @@ fault:
        return -EFAULT;
 }
 
-int vmx_hardware_enable(void)
+int vmx_enable_virtualization_cpu(void)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2881,7 +2881,7 @@ static void vmclear_local_loaded_vmcss(void)
                __loaded_vmcs_clear(v);
 }
 
-void vmx_hardware_disable(void)
+void vmx_disable_virtualization_cpu(void)
 {
        vmclear_local_loaded_vmcss();
 
@@ -8584,7 +8584,7 @@ static void __vmx_exit(void)
 {
        allow_smaller_maxphyaddr = false;
 
-       cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
+       cpu_emergency_unregister_virt_callback(vmx_emergency_disable_virtualization_cpu);
 
        vmx_cleanup_l1d_flush();
 }
@@ -8632,7 +8632,7 @@ static int __init vmx_init(void)
                pi_init_cpu(cpu);
        }
 
-       cpu_emergency_register_virt_callback(vmx_emergency_disable);
+       cpu_emergency_register_virt_callback(vmx_emergency_disable_virtualization_cpu);
 
        vmx_check_vmcs12_offsets();
 
index ce3221c..205692c 100644 (file)
@@ -13,8 +13,8 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata;
 
 void vmx_hardware_unsetup(void);
 int vmx_check_processor_compat(void);
-int vmx_hardware_enable(void);
-void vmx_hardware_disable(void);
+int vmx_enable_virtualization_cpu(void);
+void vmx_disable_virtualization_cpu(void);
 int vmx_vm_init(struct kvm *kvm);
 void vmx_vm_destroy(struct kvm *kvm);
 int vmx_vcpu_precreate(struct kvm *kvm);
index 1182baf..4313581 100644 (file)
@@ -9749,7 +9749,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
 
        guard(mutex)(&vendor_module_lock);
 
-       if (kvm_x86_ops.hardware_enable) {
+       if (kvm_x86_ops.enable_virtualization_cpu) {
                pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
                return -EEXIST;
        }
@@ -9876,7 +9876,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
        return 0;
 
 out_unwind_ops:
-       kvm_x86_ops.hardware_enable = NULL;
+       kvm_x86_ops.enable_virtualization_cpu = NULL;
        kvm_x86_call(hardware_unsetup)();
 out_mmu_exit:
        kvm_mmu_vendor_module_exit();
@@ -9917,7 +9917,7 @@ void kvm_x86_vendor_exit(void)
        WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
 #endif
        mutex_lock(&vendor_module_lock);
-       kvm_x86_ops.hardware_enable = NULL;
+       kvm_x86_ops.enable_virtualization_cpu = NULL;
        mutex_unlock(&vendor_module_lock);
 }
 EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
@@ -12528,7 +12528,7 @@ int kvm_arch_enable_virtualization_cpu(void)
        if (ret)
                return ret;
 
-       ret = kvm_x86_call(hardware_enable)();
+       ret = kvm_x86_call(enable_virtualization_cpu)();
        if (ret != 0)
                return ret;
 
@@ -12610,7 +12610,7 @@ int kvm_arch_enable_virtualization_cpu(void)
 
 void kvm_arch_disable_virtualization_cpu(void)
 {
-       kvm_x86_call(hardware_disable)();
+       kvm_x86_call(disable_virtualization_cpu)();
        drop_user_return_notifiers();
 }