KVM: x86: SVM: drop preempt-safe wrappers for avic_vcpu_load/put
authorMaxim Levitsky <mlevitsk@redhat.com>
Mon, 6 Jun 2022 18:08:29 +0000 (21:08 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jun 2022 14:52:20 +0000 (10:52 -0400)
Now that these functions are always called with preemption disabled,
remove the preempt_disable()/preempt_enable() pair inside them.

No functional change intended.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220606180829.102503-8-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 5d98ac5..5542d89 100644 (file)
@@ -946,7 +946,7 @@ out:
        return ret;
 }
 
-void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
 }
 
-void __avic_vcpu_put(struct kvm_vcpu *vcpu)
+void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
        u64 entry;
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu)
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
 }
 
-static void avic_vcpu_load(struct kvm_vcpu *vcpu)
-{
-       int cpu = get_cpu();
-
-       WARN_ON(cpu != vcpu->cpu);
-
-       __avic_vcpu_load(vcpu, cpu);
-
-       put_cpu();
-}
-
-static void avic_vcpu_put(struct kvm_vcpu *vcpu)
-{
-       preempt_disable();
-
-       __avic_vcpu_put(vcpu);
-
-       preempt_enable();
-}
 
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
@@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        vmcb_mark_dirty(vmcb, VMCB_AVIC);
 
        if (activated)
-               avic_vcpu_load(vcpu);
+               avic_vcpu_load(vcpu, vcpu->cpu);
        else
                avic_vcpu_put(vcpu);
 
@@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
        if (!kvm_vcpu_apicv_active(vcpu))
                return;
 
-       avic_vcpu_load(vcpu);
+       avic_vcpu_load(vcpu, vcpu->cpu);
 }
index 1dc02cd..1ac66fb 100644 (file)
@@ -1400,13 +1400,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                indirect_branch_prediction_barrier();
        }
        if (kvm_vcpu_apicv_active(vcpu))
-               __avic_vcpu_load(vcpu, cpu);
+               avic_vcpu_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 {
        if (kvm_vcpu_apicv_active(vcpu))
-               __avic_vcpu_put(vcpu);
+               avic_vcpu_put(vcpu);
 
        svm_prepare_host_switch(vcpu);
 
index 500348c..1bddd33 100644 (file)
@@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 int avic_init_vcpu(struct vcpu_svm *svm);
-void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-void __avic_vcpu_put(struct kvm_vcpu *vcpu);
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void avic_vcpu_put(struct kvm_vcpu *vcpu);
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);