KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode()
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 26 Sep 2018 16:23:47 +0000 (09:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 16 Oct 2018 22:29:53 +0000 (00:29 +0200)
Handling all VMExits due to failed consistency checks on VMEnter in
nested_vmx_enter_non_root_mode() consolidates all relevant code into
a single location, and removing nested_vmx_entry_failure() eliminates
a confusing function name and label.  For a VMEntry, "fail" and its
derivatives has a very specific meaning due to the different behavior
of a VMEnter VMFail versus VMExit, i.e. it wasn't obvious that
nested_vmx_entry_failure() handled VMExit scenarios.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx.c

index 9326f6a..7906dd4 100644 (file)
@@ -2048,9 +2048,6 @@ static inline bool is_nmi(u32 intr_info)
 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                              u32 exit_intr_info,
                              unsigned long exit_qualification);
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
-                       struct vmcs12 *vmcs12,
-                       u32 reason, unsigned long qualification);
 
 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
 {
@@ -12640,26 +12637,29 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        return 0;
 }
 
+static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+                                  struct vmcs12 *vmcs12);
+
 /*
- * If exit_qual is NULL, this is being called from state restore (either RSM
+ * If from_vmentry is false, this is being called from state restore (either RSM
  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
  */
-static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
+static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                         bool from_vmentry)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       bool from_vmentry = !!exit_qual;
-       u32 dummy_exit_qual;
        bool evaluate_pending_interrupts;
-       int r = 0;
+       u32 exit_reason = EXIT_REASON_INVALID_STATE;
+       u32 exit_qual;
 
        evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
                (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
        if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
                evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
 
-       if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual))
-               return EXIT_REASON_INVALID_STATE;
+       if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+               goto vmentry_fail_vmexit;
 
        enter_guest_mode(vcpu);
 
@@ -12674,18 +12674,17 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
                vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
-       r = EXIT_REASON_INVALID_STATE;
-       if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
+       if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
                goto fail;
 
        if (from_vmentry) {
                nested_get_vmcs12_pages(vcpu);
 
-               r = EXIT_REASON_MSR_LOAD_FAIL;
-               *exit_qual = nested_vmx_load_msr(vcpu,
-                                                vmcs12->vm_entry_msr_load_addr,
-                                                vmcs12->vm_entry_msr_load_count);
-               if (*exit_qual)
+               exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+               exit_qual = nested_vmx_load_msr(vcpu,
+                                               vmcs12->vm_entry_msr_load_addr,
+                                               vmcs12->vm_entry_msr_load_count);
+               if (exit_qual)
                        goto fail;
        } else {
                /*
@@ -12723,12 +12722,28 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
         */
        return 0;
 
+       /*
+        * A failed consistency check that leads to a VMExit during L1's
+        * VMEnter to L2 is a variation of a normal VMexit, as explained in
+        * 26.7 "VM-entry failures during or after loading guest state".
+        */
 fail:
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
                vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
        leave_guest_mode(vcpu);
        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-       return r;
+
+       if (!from_vmentry)
+               return 1;
+
+vmentry_fail_vmexit:
+       load_vmcs12_host_state(vcpu, vmcs12);
+       vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+       vmcs12->exit_qualification = exit_qual;
+       nested_vmx_succeed(vcpu);
+       if (enable_shadow_vmcs)
+               vmx->nested.sync_shadow_vmcs = true;
+       return 1;
 }
 
 /*
@@ -12740,7 +12755,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
-       u32 exit_qual;
        int ret;
 
        if (!nested_vmx_check_permission(vcpu))
@@ -12809,9 +12823,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         */
 
        vmx->nested.nested_run_pending = 1;
-       ret = nested_vmx_enter_non_root_mode(vcpu, &exit_qual);
+       ret = nested_vmx_enter_non_root_mode(vcpu, true);
        if (ret) {
-               nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
                vmx->nested.nested_run_pending = 0;
                return 1;
        }
@@ -13609,25 +13622,6 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
        free_nested(to_vmx(vcpu));
 }
 
-/*
- * L1's failure to enter L2 is a subset of a normal exit, as explained in
- * 23.7 "VM-entry failures during or after loading guest state" (this also
- * lists the acceptable exit-reason and exit-qualification parameters).
- * It should only be called before L2 actually succeeded to run, and when
- * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
- */
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
-                       struct vmcs12 *vmcs12,
-                       u32 reason, unsigned long qualification)
-{
-       load_vmcs12_host_state(vcpu, vmcs12);
-       vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
-       vmcs12->exit_qualification = qualification;
-       nested_vmx_succeed(vcpu);
-       if (enable_shadow_vmcs)
-               to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
-}
-
 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
                               struct x86_instruction_info *info,
                               enum x86_intercept_stage stage)
@@ -14051,7 +14045,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
 
        if (vmx->nested.smm.guest_mode) {
                vcpu->arch.hflags &= ~HF_SMM_MASK;
-               ret = nested_vmx_enter_non_root_mode(vcpu, NULL);
+               ret = nested_vmx_enter_non_root_mode(vcpu, false);
                vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
@@ -14257,7 +14251,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                return -EINVAL;
 
        vmx->nested.dirty_vmcs12 = true;
-       ret = nested_vmx_enter_non_root_mode(vcpu, NULL);
+       ret = nested_vmx_enter_non_root_mode(vcpu, false);
        if (ret)
                return -EINVAL;