KVM: x86: Add a return code to kvm_apic_accept_events
authorJim Mattson <jmattson@google.com>
Fri, 4 Jun 2021 17:26:04 +0000 (10:26 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:31 +0000 (13:09 -0400)
No functional change intended. At present, the only negative value
returned by kvm_check_nested_events is -EBUSY.

Signed-off-by: Jim Mattson <jmattson@google.com>
Message-Id: <20210604172611.281819-6-jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/x86.c

index 17fa4ab..4b80e61 100644 (file)
@@ -2872,7 +2872,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
        return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
 }
 
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        u8 sipi_vector;
@@ -2880,7 +2880,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
        unsigned long pe;
 
        if (!lapic_in_kernel(vcpu))
-               return;
+               return 0;
 
        /*
         * Read pending events before calling the check_events
@@ -2888,12 +2888,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
         */
        pe = smp_load_acquire(&apic->pending_events);
        if (!pe)
-               return;
+               return 0;
 
        if (is_guest_mode(vcpu)) {
                r = kvm_check_nested_events(vcpu);
                if (r < 0)
-                       return;
+                       return r == -EBUSY ? 0 : r;
                /*
                 * If an event has happened and caused a vmexit,
                 * we know INITs are latched and therefore
@@ -2914,7 +2914,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
                if (test_bit(KVM_APIC_SIPI, &pe))
                        clear_bit(KVM_APIC_SIPI, &apic->pending_events);
-               return;
+               return 0;
        }
 
        if (test_bit(KVM_APIC_INIT, &pe)) {
@@ -2935,6 +2935,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                }
        }
+       return 0;
 }
 
 void kvm_lapic_exit(void)
index 997c45a..d7c25d0 100644 (file)
@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
index 1ae827f..d1fdbaa 100644 (file)
@@ -9335,7 +9335,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
            kvm_xen_has_interrupt(vcpu)) {
                ++vcpu->stat.req_event;
-               kvm_apic_accept_events(vcpu);
+               r = kvm_apic_accept_events(vcpu);
+               if (r < 0) {
+                       r = 0;
+                       goto out;
+               }
                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
                        r = 1;
                        goto out;
@@ -9547,7 +9551,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                        return 1;
        }
 
-       kvm_apic_accept_events(vcpu);
+       if (kvm_apic_accept_events(vcpu) < 0)
+               return 0;
        switch(vcpu->arch.mp_state) {
        case KVM_MP_STATE_HALTED:
        case KVM_MP_STATE_AP_RESET_HOLD:
@@ -9771,7 +9776,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        goto out;
                }
                kvm_vcpu_block(vcpu);
-               kvm_apic_accept_events(vcpu);
+               if (kvm_apic_accept_events(vcpu) < 0) {
+                       r = 0;
+                       goto out;
+               }
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                r = -EAGAIN;
                if (signal_pending(current)) {
@@ -9973,11 +9981,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
+       int r;
+
        vcpu_load(vcpu);
        if (kvm_mpx_supported())
                kvm_load_guest_fpu(vcpu);
 
-       kvm_apic_accept_events(vcpu);
+       r = kvm_apic_accept_events(vcpu);
+       if (r < 0)
+               goto out;
+       r = 0;
+
        if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
             vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
            vcpu->arch.pv.pv_unhalted)
@@ -9985,10 +9999,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
        else
                mp_state->mp_state = vcpu->arch.mp_state;
 
+out:
        if (kvm_mpx_supported())
                kvm_put_guest_fpu(vcpu);
        vcpu_put(vcpu);
-       return 0;
+       return r;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,