Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx.c
index aafcc98..aa66ccd 100644 (file)
@@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                vmx_update_msr_bitmap(&vmx->vcpu);
 }
 
-/*
- * reads and returns guest's timestamp counter "register"
- * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
- * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
- */
-static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
+static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
 {
-       u64 host_tsc, tsc_offset;
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
-       host_tsc = rdtsc();
-       tsc_offset = vmcs_read64(TSC_OFFSET);
-       return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
+       if (is_guest_mode(vcpu) &&
+           (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+               return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
+
+       return vcpu->arch.tsc_offset;
 }
 
 /*
@@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 #endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_info);
-       case MSR_IA32_TSC:
-               msr_info->data = guest_read_tsc(vcpu);
-               break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                vmcs_write64(GUEST_BNDCFGS, data);
                break;
-       case MSR_IA32_TSC:
-               kvm_write_tsc(vcpu, msr_info);
-               break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@@ -10608,6 +10599,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
+                                         struct vmcs12 *vmcs12)
+{
+       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
+           !page_address_valid(vcpu, vmcs12->apic_access_addr))
+               return -EINVAL;
+       else
+               return 0;
+}
+
 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
                                           struct vmcs12 *vmcs12)
 {
@@ -11176,11 +11177,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
        }
 
-       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
-               vmcs_write64(TSC_OFFSET,
-                       vcpu->arch.tsc_offset + vmcs12->tsc_offset);
-       else
-               vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+       vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+
        if (kvm_has_tsc_control)
                decache_tsc_multiplier(vmx);
 
@@ -11299,6 +11297,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
+       if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
+               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
        if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
@@ -11420,6 +11421,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        u32 msr_entry_idx;
        u32 exit_qual;
+       int r;
 
        enter_guest_mode(vcpu);
 
@@ -11429,26 +11431,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
        vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
        vmx_segment_cache_clear(vmx);
 
-       if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
-               leave_guest_mode(vcpu);
-               vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-               nested_vmx_entry_failure(vcpu, vmcs12,
-                                        EXIT_REASON_INVALID_STATE, exit_qual);
-               return 1;
-       }
+       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+               vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+
+       r = EXIT_REASON_INVALID_STATE;
+       if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual))
+               goto fail;
 
        nested_get_vmcs12_pages(vcpu, vmcs12);
 
+       r = EXIT_REASON_MSR_LOAD_FAIL;
        msr_entry_idx = nested_vmx_load_msr(vcpu,
                                            vmcs12->vm_entry_msr_load_addr,
                                            vmcs12->vm_entry_msr_load_count);
-       if (msr_entry_idx) {
-               leave_guest_mode(vcpu);
-               vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-               nested_vmx_entry_failure(vcpu, vmcs12,
-                               EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
-               return 1;
-       }
+       if (msr_entry_idx)
+               goto fail;
 
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11457,6 +11454,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
         * the success flag) when L2 exits (see nested_vmx_vmexit()).
         */
        return 0;
+
+fail:
+       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+               vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+       leave_guest_mode(vcpu);
+       vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+       nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
+       return 1;
 }
 
 /*
@@ -12028,6 +12033,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 
        leave_guest_mode(vcpu);
 
+       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+               vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+
        if (likely(!vmx->fail)) {
                if (exit_reason == -1)
                        sync_vmcs12(vcpu, vmcs12);
@@ -12224,10 +12232,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
 
 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
 {
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       u64 tscl = rdtsc();
-       u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
-       u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
+       struct vcpu_vmx *vmx;
+       u64 tscl, guest_tscl, delta_tsc;
+
+       if (kvm_mwait_in_guest(vcpu->kvm))
+               return -EOPNOTSUPP;
+
+       vmx = to_vmx(vcpu);
+       tscl = rdtsc();
+       guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
+       delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
 
        /* Convert to host delta tsc if tsc scaling is enabled */
        if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
@@ -12533,7 +12547,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
                vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
                vcpu_info.vector = irq.vector;
 
-               trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
+               trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
                                vcpu_info.vector, vcpu_info.pi_desc_addr, set);
 
                if (set)
@@ -12712,6 +12726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
+       .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
        .write_tsc_offset = vmx_write_tsc_offset,
 
        .set_tdp_cr3 = vmx_set_cr3,