KVM: VMX: Use kvm_set_msr_common() for MSR_IA32_TSC_ADJUST in the default way
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.c
index ffe4543..00f8203 100644 (file)
@@ -602,15 +602,13 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
        unsigned int slot = msr - vmx->guest_uret_msrs;
        int ret = 0;
 
-       u64 old_msr_data = msr->data;
-       msr->data = data;
        if (msr->load_into_hardware) {
                preempt_disable();
-               ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
+               ret = kvm_set_user_return_msr(slot, data, msr->mask);
                preempt_enable();
-               if (ret)
-                       msr->data = old_msr_data;
        }
+       if (!ret)
+               msr->data = data;
        return ret;
 }
 
@@ -1105,6 +1103,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
 #endif
+       unsigned long cr3;
        unsigned long fs_base, gs_base;
        u16 fs_sel, gs_sel;
        int i;
@@ -1169,6 +1168,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #endif
 
        vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
+
+       /* Host CR3 including its PCID is stable when guest state is loaded. */
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != host_state->cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               host_state->cr3 = cr3;
+       }
+
        vmx->guest_state_loaded = true;
 }
 
@@ -1271,7 +1278,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
 
        if (!already_loaded) {
                void *gdt = get_current_gdt_ro();
-               unsigned long sysenter_esp;
 
                /*
                 * Flush all EPTP/VPID contexts, the new pCPU may have stale
@@ -1287,8 +1293,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
                            (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
-               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
-               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+               if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
+                       /* 22.2.3 */
+                       vmcs_writel(HOST_IA32_SYSENTER_ESP,
+                                   (unsigned long)(cpu_entry_stack(cpu) + 1));
+               }
 
                vmx->loaded_vmcs->cpu = cpu;
        }
@@ -2095,9 +2104,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                }
                ret = kvm_set_msr_common(vcpu, msr_info);
                break;
-       case MSR_IA32_TSC_ADJUST:
-               ret = kvm_set_msr_common(vcpu, msr_info);
-               break;
        case MSR_IA32_MCG_EXT_CTL:
                if ((!msr_info->host_initiated &&
                     !(to_vmx(vcpu)->msr_ia32_feature_control &
@@ -4026,6 +4032,12 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
        rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
        vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+
+       /*
+        * If 32-bit syscall is enabled, vmx_vcpu_load_vcms rewrites
+        * HOST_IA32_SYSENTER_ESP.
+        */
+       vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
        rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
        vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
 
@@ -6606,7 +6618,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4;
+       unsigned long cr4;
 
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
@@ -6649,12 +6661,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
        if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
+       vcpu->arch.regs_dirty = 0;
 
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
@@ -6743,7 +6750,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        loadsegment(es, __USER_DS);
 #endif
 
-       vmx_register_cache_reset(vcpu);
+       vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
 
        pt_guest_exit(vmx);