Merge branch 'kvm-on-hv-msrbm-fix' into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.c
index 5aadad3..9d7f26e 100644 (file)
@@ -602,15 +602,13 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
        unsigned int slot = msr - vmx->guest_uret_msrs;
        int ret = 0;
 
-       u64 old_msr_data = msr->data;
-       msr->data = data;
        if (msr->load_into_hardware) {
                preempt_disable();
-               ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
+               ret = kvm_set_user_return_msr(slot, data, msr->mask);
                preempt_enable();
-               if (ret)
-                       msr->data = old_msr_data;
        }
+       if (!ret)
+               msr->data = data;
        return ret;
 }
 
@@ -1105,6 +1103,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
 #endif
+       unsigned long cr3;
        unsigned long fs_base, gs_base;
        u16 fs_sel, gs_sel;
        int i;
@@ -1169,6 +1168,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #endif
 
        vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
+
+       /* Host CR3 including its PCID is stable when guest state is loaded. */
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != host_state->cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               host_state->cr3 = cr3;
+       }
+
        vmx->guest_state_loaded = true;
 }
 
@@ -1271,7 +1278,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
 
        if (!already_loaded) {
                void *gdt = get_current_gdt_ro();
-               unsigned long sysenter_esp;
 
                /*
                 * Flush all EPTP/VPID contexts, the new pCPU may have stale
@@ -1287,8 +1293,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
                            (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
-               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
-               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+               if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
+                       /* 22.2.3 */
+                       vmcs_writel(HOST_IA32_SYSENTER_ESP,
+                                   (unsigned long)(cpu_entry_stack(cpu) + 1));
+               }
 
                vmx->loaded_vmcs->cpu = cpu;
        }
@@ -1748,7 +1757,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
 }
 
 /*
- * Reads an msr value (of 'msr_index') into 'pdata'.
+ * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
@@ -2095,9 +2104,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                }
                ret = kvm_set_msr_common(vcpu, msr_info);
                break;
-       case MSR_IA32_TSC_ADJUST:
-               ret = kvm_set_msr_common(vcpu, msr_info);
-               break;
        case MSR_IA32_MCG_EXT_CTL:
                if ((!msr_info->host_initiated &&
                     !(to_vmx(vcpu)->msr_ia32_feature_control &
@@ -2980,7 +2986,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
        mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
        mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
 
-       kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+       kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
 }
 
 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
@@ -3105,9 +3111,9 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 
                if (!enable_unrestricted_guest && !is_paging(vcpu))
                        guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
-               else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+               else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
                        guest_cr3 = vcpu->arch.cr3;
-               else /* vmcs01.GUEST_CR3 is already up-to-date. */
+               else /* vmcs.GUEST_CR3 is already up-to-date. */
                        update_guest_cr3 = false;
                vmx_ept_load_pdptrs(vcpu);
        } else {
@@ -4017,6 +4023,12 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
        rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
        vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+
+       /*
+        * If 32-bit syscall is enabled, vmx_vcpu_load_vcms rewrites
+        * HOST_IA32_SYSENTER_ESP.
+        */
+       vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
        rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
        vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
 
@@ -4035,8 +4047,10 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 
        vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
                                          ~vcpu->arch.cr4_guest_rsvd_bits;
-       if (!enable_ept)
-               vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
+       if (!enable_ept) {
+               vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
+               vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
+       }
        if (is_guest_mode(&vmx->vcpu))
                vcpu->arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(vcpu)->cr4_guest_host_mask;
@@ -4688,7 +4702,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
                if (kvm_emulate_instruction(vcpu, 0)) {
                        if (vcpu->arch.halt_request) {
                                vcpu->arch.halt_request = 0;
-                               return kvm_vcpu_halt(vcpu);
+                               return kvm_emulate_halt_noskip(vcpu);
                        }
                        return 1;
                }
@@ -5359,7 +5373,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 
                if (vcpu->arch.halt_request) {
                        vcpu->arch.halt_request = 0;
-                       return kvm_vcpu_halt(vcpu);
+                       return kvm_emulate_halt_noskip(vcpu);
                }
 
                /*
@@ -6595,7 +6609,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4;
+       unsigned long cr4;
 
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
@@ -6638,12 +6652,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
        if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
+       vcpu->arch.regs_dirty = 0;
 
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
@@ -6732,7 +6741,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        loadsegment(es, __USER_DS);
 #endif
 
-       vmx_register_cache_reset(vcpu);
+       vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
 
        pt_guest_exit(vmx);
 
@@ -6951,7 +6960,6 @@ static int __init vmx_check_processor_compat(void)
 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
        u8 cache;
-       u64 ipat = 0;
 
        /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
         * memory aliases with conflicting memory types and sometimes MCEs.
@@ -6971,30 +6979,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
         * EPT memory type is used to emulate guest CD/MTRR.
         */
 
-       if (is_mmio) {
-               cache = MTRR_TYPE_UNCACHABLE;
-               goto exit;
-       }
+       if (is_mmio)
+               return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
 
-       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
-               ipat = VMX_EPT_IPAT_BIT;
-               cache = MTRR_TYPE_WRBACK;
-               goto exit;
-       }
+       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+               return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
 
        if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
-               ipat = VMX_EPT_IPAT_BIT;
                if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                        cache = MTRR_TYPE_WRBACK;
                else
                        cache = MTRR_TYPE_UNCACHABLE;
-               goto exit;
-       }
 
-       cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+               return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+       }
 
-exit:
-       return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
+       return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
 }
 
 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
@@ -7704,7 +7704,7 @@ static __init int hardware_setup(void)
 {
        unsigned long host_bndcfgs;
        struct desc_ptr dt;
-       int r, ept_lpage_level;
+       int r;
 
        store_idt(&dt);
        host_idt_base = dt.address;
@@ -7801,16 +7801,8 @@ static __init int hardware_setup(void)
                kvm_mmu_set_ept_masks(enable_ept_ad_bits,
                                      cpu_has_vmx_ept_execute_only());
 
-       if (!enable_ept)
-               ept_lpage_level = 0;
-       else if (cpu_has_vmx_ept_1g_page())
-               ept_lpage_level = PG_LEVEL_1G;
-       else if (cpu_has_vmx_ept_2m_page())
-               ept_lpage_level = PG_LEVEL_2M;
-       else
-               ept_lpage_level = PG_LEVEL_4K;
        kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
-                         ept_lpage_level);
+                         ept_caps_to_lpage_level(vmx_capability.ept));
 
        /*
         * Only enable PML when hardware supports PML feature, and both EPT