KVM: x86: Add wrappers for setting/clearing APICv inhibits
authorSean Christopherson <seanjc@google.com>
Fri, 11 Mar 2022 04:35:16 +0000 (04:35 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 2 Apr 2022 09:34:44 +0000 (05:34 -0400)
Add set/clear wrappers for toggling APICv inhibits to make the call sites
more readable, and opportunistically rename the inner helpers to align
with the new wrappers and to make them more readable as well.  Invert the
flag from "activate" to "set"; activate is painfully ambiguous as it's
not obvious if the inhibit is being activated, or if APICv is being
activated, in which case the inhibit is being deactivated.

For the functions that take @set, swap the order of the inhibit reason
and @set so that the call sites are visually similar to those that bounce
through the wrapper.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220311043517.17027-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/i8254.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/x86.c

index 82d1493..93a2671 100644 (file)
@@ -1799,10 +1799,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 
 bool kvm_apicv_activated(struct kvm *kvm);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
-void kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                             enum kvm_apicv_inhibit reason);
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                               enum kvm_apicv_inhibit reason);
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set);
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set);
+
+static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
+                                        enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
+}
+
+static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
+                                          enum kvm_apicv_inhibit reason)
+{
+       kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
+}
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
index 4177c17..123b677 100644 (file)
@@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
        else
                hv->synic_auto_eoi_used--;
 
-       __kvm_request_apicv_update(vcpu->kvm,
-                                  !hv->synic_auto_eoi_used,
-                                  APICV_INHIBIT_REASON_HYPERV);
+       /*
+        * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
+        * the hypervisor to manually inject IRQs.
+        */
+       __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
+                                        APICV_INHIBIT_REASON_HYPERV,
+                                        !!hv->synic_auto_eoi_used);
 
        up_write(&vcpu->kvm->arch.apicv_update_lock);
 }
index 0b65a76..1c83076 100644 (file)
@@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
         * So, deactivate APICv when PIT is in reinject mode.
         */
        if (reinject) {
-               kvm_request_apicv_update(kvm, false,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                /* The initial state is preserved while ps->reinject == 0. */
                kvm_pit_reset_reinject(pit);
                kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        } else {
-               kvm_request_apicv_update(kvm, true,
-                                        APICV_INHIBIT_REASON_PIT_REINJ);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
                kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
                kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        }
index 0884c34..e42586a 100644 (file)
@@ -2918,7 +2918,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
         * In this case AVIC was temporarily disabled for
         * requesting the IRQ window and we have to re-enable it.
         */
-       kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
+       kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
 
        ++vcpu->stat.irq_window_exits;
        return 1;
@@ -3516,7 +3516,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
                 * via AVIC. In such case, we need to temporarily disable AVIC,
                 * and fallback to injecting IRQ via V_IRQ.
                 */
-               kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
+               kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
                svm_set_vintr(svm);
        }
 }
@@ -3948,6 +3948,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_cpuid_entry2 *best;
+       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                                    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3974,16 +3975,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                 * is exposed to the guest, disable AVIC.
                 */
                if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_X2APIC);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
 
                /*
                 * Currently, AVIC does not work with nested virtualization.
                 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
                 */
                if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                       kvm_request_apicv_update(vcpu->kvm, false,
-                                                APICV_INHIBIT_REASON_NESTED);
+                       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
        }
        init_vmcb_after_set_cpuid(vcpu);
 }
index cf3e483..105037a 100644 (file)
@@ -1340,17 +1340,17 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
 );
 
 TRACE_EVENT(kvm_apicv_update_request,
-           TP_PROTO(bool activate, int reason),
-           TP_ARGS(activate, reason),
+           TP_PROTO(int reason, bool activate),
+           TP_ARGS(reason, activate),
 
        TP_STRUCT__entry(
-               __field(bool, activate)
                __field(int, reason)
+               __field(bool, activate)
        ),
 
        TP_fast_assign(
-               __entry->activate = activate;
                __entry->reason = reason;
+               __entry->activate = activate;
        ),
 
        TP_printk("%s reason=%u",
index 1438151..ede8126 100644 (file)
@@ -5938,7 +5938,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6335,7 +6335,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
-               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
+               kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -9744,8 +9744,8 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                               enum kvm_apicv_inhibit reason)
+void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                     enum kvm_apicv_inhibit reason, bool set)
 {
        unsigned long old, new;
 
@@ -9756,13 +9756,13 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
 
        old = new = kvm->arch.apicv_inhibit_reasons;
 
-       if (activate)
-               __clear_bit(reason, &new);
-       else
+       if (set)
                __set_bit(reason, &new);
+       else
+               __clear_bit(reason, &new);
 
        if (!!old != !!new) {
-               trace_kvm_apicv_update_request(activate, reason);
+               trace_kvm_apicv_update_request(reason, !set);
                /*
                 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
                 * false positives in the sanity check WARN in svm_vcpu_run().
@@ -9786,17 +9786,17 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
        }
 }
 
-void kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                             enum kvm_apicv_inhibit reason)
+void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
+                                   enum kvm_apicv_inhibit reason, bool set)
 {
        if (!enable_apicv)
                return;
 
        down_write(&kvm->arch.apicv_update_lock);
-       __kvm_request_apicv_update(kvm, activate, reason);
+       __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
        up_write(&kvm->arch.apicv_update_lock);
 }
-EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
+EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
@@ -10944,7 +10944,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 {
-       bool inhibit = false;
+       bool set = false;
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
@@ -10952,11 +10952,11 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
-                       inhibit = true;
+                       set = true;
                        break;
                }
        }
-       __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ);
+       __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
        up_write(&kvm->arch.apicv_update_lock);
 }