KVM: nVMX: Add VM-Enter failed tracepoints for super early checks
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / nested.c
index 23b58c2..473fa40 100644 (file)
@@ -2314,6 +2314,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
                        vmcs_write16(GUEST_INTR_STATUS,
                                vmcs12->guest_intr_status);
 
+               if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+                   exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
                secondary_exec_controls_set(vmx, exec_control);
        }
 
@@ -3468,11 +3471,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (evmptrld_status == EVMPTRLD_ERROR) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
-       } else if (evmptrld_status == EVMPTRLD_VMFAIL) {
+       } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
                return nested_vmx_failInvalid(vcpu);
        }
 
-       if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
+       if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
                return nested_vmx_failInvalid(vcpu);
 
        vmcs12 = get_vmcs12(vcpu);
@@ -3483,7 +3486,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * rather than RFLAGS.ZF, and no error number is stored to the
         * VM-instruction error field.
         */
-       if (vmcs12->hdr.shadow_vmcs)
+       if (CC(vmcs12->hdr.shadow_vmcs))
                return nested_vmx_failInvalid(vcpu);
 
        if (vmx->nested.hv_evmcs) {
@@ -3504,10 +3507,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * for misconfigurations which will anyway be caught by the processor
         * when using the merged vmcs02.
         */
-       if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
+       if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
                return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
 
-       if (vmcs12->launch_state == launch)
+       if (CC(vmcs12->launch_state == launch))
                return nested_vmx_fail(vcpu,
                        launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
                               : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
@@ -3528,6 +3531,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (unlikely(status != NVMX_VMENTRY_SUCCESS))
                goto vmentry_failed;
 
+       /* Emulate processing of posted interrupts on VM-Enter. */
+       if (nested_cpu_has_posted_intr(vmcs12) &&
+           kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
+               vmx->nested.pi_pending = true;
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+               kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
+       }
+
        /* Hide L1D cache contents from the nested guest.  */
        vmx->vcpu.arch.l1tf_flush_l1d = true;
 
@@ -4404,6 +4415,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
                kvm_vcpu_flush_tlb_current(vcpu);
 
+       /*
+        * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+        * now and the new vmentry.  Ensure that the VMCS02 PDPTR fields are
+        * up-to-date before switching to L1.
+        */
+       if (enable_ept && is_pae_paging(vcpu))
+               vmx_ept_load_pdptrs(vcpu);
+
        leave_guest_mode(vcpu);
 
        if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4687,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
                vmx->nested.msrs.entry_ctls_high &=
                                ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
                vmx->nested.msrs.exit_ctls_high &=
-                               ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+                               ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
        }
 }
 
@@ -4688,7 +4707,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
 
        r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
        if (r != X86EMUL_CONTINUE) {
-               *ret = vmx_handle_memory_failure(vcpu, r, &e);
+               *ret = kvm_handle_memory_failure(vcpu, r, &e);
                return -EINVAL;
        }
 
@@ -4995,7 +5014,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
                r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
                if (r != X86EMUL_CONTINUE)
-                       return vmx_handle_memory_failure(vcpu, r, &e);
+                       return kvm_handle_memory_failure(vcpu, r, &e);
        }
 
        return nested_vmx_succeed(vcpu);
@@ -5068,7 +5087,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                        return 1;
                r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
                if (r != X86EMUL_CONTINUE)
-                       return vmx_handle_memory_failure(vcpu, r, &e);
+                       return kvm_handle_memory_failure(vcpu, r, &e);
        }
 
        field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5230,7 +5249,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
                                        sizeof(gpa_t), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        return nested_vmx_succeed(vcpu);
 }
@@ -5283,7 +5302,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
                return 1;
        r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        /*
         * Nested EPT roots are always held through guest_mmu,
@@ -5365,7 +5384,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
                return 1;
        r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        if (operand.vpid >> 16)
                return nested_vmx_fail(vcpu,
@@ -6310,7 +6329,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
 #ifdef CONFIG_X86_64
                VM_EXIT_HOST_ADDR_SPACE_SIZE |
 #endif
-               VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
+               VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
+               VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
        msrs->exit_ctls_high |=
                VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
                VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
@@ -6329,7 +6349,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
 #ifdef CONFIG_X86_64
                VM_ENTRY_IA32E_MODE |
 #endif
-               VM_ENTRY_LOAD_IA32_PAT;
+               VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
+               VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
        msrs->entry_ctls_high |=
                (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);