Merge branch 'kvm-basic-exit-reason' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 9 Jun 2020 10:08:48 +0000 (06:08 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jun 2020 16:35:14 +0000 (12:35 -0400)
Using a topic branch so that stable branches can simply cherry-pick the
patch.

Reviewed-by: Oliver Upton <oupton@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1  2 
arch/x86/kvm/vmx/nested.c

@@@ -5665,81 -5551,45 +5665,81 @@@ static bool nested_vmx_exit_handled_mtf
  }
  
  /*
 - * Return true if we should exit from L2 to L1 to handle an exit, or false if we
 - * should handle it ourselves in L0 (and then continue L2). Only call this
 - * when in is_guest_mode (L2).
 + * Return true if L0 wants to handle an exit from L2 regardless of whether or not
 + * L1 wants the exit.  Only call this when in is_guest_mode (L2).
   */
 -bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
 +static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
  {
 -      u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 -      struct vcpu_vmx *vmx = to_vmx(vcpu);
 -      struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 -
 -      WARN_ON_ONCE(vmx->nested.nested_run_pending);
 -
 -      if (unlikely(vmx->fail)) {
 -              trace_kvm_nested_vmenter_failed(
 -                      "hardware VM-instruction error: ",
 -                      vmcs_read32(VM_INSTRUCTION_ERROR));
 -              return true;
 -      }
 -
 -      trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
 -                              vmcs_readl(EXIT_QUALIFICATION),
 -                              vmx->idt_vectoring_info,
 -                              intr_info,
 -                              vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
 -                              KVM_ISA_VMX);
 +      u32 intr_info;
  
-       switch (exit_reason) {
+       switch ((u16)exit_reason) {
        case EXIT_REASON_EXCEPTION_NMI:
 +              intr_info = vmx_get_intr_info(vcpu);
                if (is_nmi(intr_info))
 -                      return false;
 +                      return true;
                else if (is_page_fault(intr_info))
 -                      return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
 +                      return vcpu->arch.apf.host_apf_flags || !enable_ept;
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
 -                      return false;
 +                      return true;
                else if (is_breakpoint(intr_info) &&
                         vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
 -                      return false;
 +                      return true;
 +              return false;
 +      case EXIT_REASON_EXTERNAL_INTERRUPT:
 +              return true;
 +      case EXIT_REASON_MCE_DURING_VMENTRY:
 +              return true;
 +      case EXIT_REASON_EPT_VIOLATION:
 +              /*
 +               * L0 always deals with the EPT violation. If nested EPT is
 +               * used, and the nested mmu code discovers that the address is
 +               * missing in the guest EPT table (EPT12), the EPT violation
 +               * will be injected with nested_ept_inject_page_fault()
 +               */
 +              return true;
 +      case EXIT_REASON_EPT_MISCONFIG:
 +              /*
 +               * L2 never uses directly L1's EPT, but rather L0's own EPT
 +               * table (shadow on EPT) or a merged EPT table that L0 built
 +               * (EPT on EPT). So any problems with the structure of the
 +               * table is L0's fault.
 +               */
 +              return true;
 +      case EXIT_REASON_PREEMPTION_TIMER:
 +              return true;
 +      case EXIT_REASON_PML_FULL:
 +              /* We emulate PML support to L1. */
 +              return true;
 +      case EXIT_REASON_VMFUNC:
 +              /* VM functions are emulated through L2->L0 vmexits. */
 +              return true;
 +      case EXIT_REASON_ENCLS:
 +              /* SGX is never exposed to L1 */
 +              return true;
 +      default:
 +              break;
 +      }
 +      return false;
 +}
 +
 +/*
 + * Return 1 if L1 wants to intercept an exit from L2.  Only call this when in
 + * is_guest_mode (L2).
 + */
 +static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
 +{
 +      struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 +      u32 intr_info;
 +
-       switch (exit_reason) {
++      switch ((u16)exit_reason) {
 +      case EXIT_REASON_EXCEPTION_NMI:
 +              intr_info = vmx_get_intr_info(vcpu);
 +              if (is_nmi(intr_info))
 +                      return true;
 +              else if (is_page_fault(intr_info))
 +                      return true;
                return vmcs12->exception_bitmap &
                                (1u << (intr_info & INTR_INFO_VECTOR_MASK));
        case EXIT_REASON_EXTERNAL_INTERRUPT: