KVM: VMX: Split out architectural interrupt/NMI blocking checks
authorSean Christopherson <sean.j.christopherson@intel.com>
Thu, 23 Apr 2020 02:25:44 +0000 (19:25 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 May 2020 16:14:39 +0000 (12:14 -0400)
Move the architectural (non-KVM specific) interrupt/NMI blocking checks
to a separate helper so that they can be used in a future patch by
vmx_check_nested_events().

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200423022550.15113-8-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index f688e6e..a5140ed 100644 (file)
@@ -4510,21 +4510,35 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
        }
 }
 
+bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
+               return false;
+
+       if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+               return true;
+
+       return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+               (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
+                GUEST_INTR_STATE_NMI));
+}
+
 static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
 {
        if (to_vmx(vcpu)->nested.nested_run_pending)
                return false;
 
-       if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
-               return true;
+       return !vmx_nmi_blocked(vcpu);
+}
 
-       if (!enable_vnmi &&
-           to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
                return false;
 
-       return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-                 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
-                  | GUEST_INTR_STATE_NMI));
+       return !(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) ||
+              (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+               (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
 }
 
 static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
@@ -4532,12 +4546,7 @@ static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
        if (to_vmx(vcpu)->nested.nested_run_pending)
                return false;
 
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
-               return true;
-
-       return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
-               !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-                       (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
+       return !vmx_interrupt_blocked(vcpu);
 }
 
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
index edfb739..b5e7732 100644 (file)
@@ -344,6 +344,8 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
 void update_exception_bitmap(struct kvm_vcpu *vcpu);
 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
+bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);