KVM: x86: Move setting of sregs during vCPU RESET/INIT to common x86
authorSean Christopherson <seanjc@google.com>
Tue, 13 Jul 2021 16:33:14 +0000 (09:33 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 2 Aug 2021 15:01:57 +0000 (11:01 -0400)
Move the setting of CR0, CR4, EFER, RFLAGS, and RIP from vendor code to
common x86.  VMX and SVM now have near-identical sequences, the only
difference being that VMX updates the exception bitmap.  Updating the
bitmap on SVM is unnecessary, but benign.  Unfortunately it can't be left
behind in VMX due to the need to update exception intercepts after the
control registers are set.

Reviewed-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210713163324.627647-37-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 64563f8..8ebdcd9 100644 (file)
@@ -1249,12 +1249,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
        init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
        init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
 
-       svm_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
-       svm_set_cr4(vcpu, 0);
-       svm_set_efer(vcpu, 0);
-       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
-       vcpu->arch.regs[VCPU_REGS_RIP] = 0x0000fff0;
-
        if (npt_enabled) {
                /* Setup VMCB for Nested Paging */
                control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
index 2e2b469..7518d4c 100644 (file)
@@ -4455,9 +4455,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
        }
 
-       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
-       kvm_rip_write(vcpu, 0xfff0);
-
        vmcs_writel(GUEST_GDTR_BASE, 0);
        vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
 
@@ -4485,12 +4482,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
-       vmx_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
-       vmx_set_cr4(vcpu, 0);
-       vmx_set_efer(vcpu, 0);
-
-       vmx_update_exception_bitmap(vcpu);
-
        vpid_sync_context(vmx->vpid);
        if (init_event)
                vmx_clear_hlt(vcpu);
index c56788f..6c55f2e 100644 (file)
@@ -10875,6 +10875,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
 
+       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
+       kvm_rip_write(vcpu, 0xfff0);
+
+       static_call(kvm_x86_set_cr0)(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+       static_call(kvm_x86_set_cr4)(vcpu, 0);
+       static_call(kvm_x86_set_efer)(vcpu, 0);
+       static_call(kvm_x86_update_exception_bitmap)(vcpu);
+
        /*
         * Reset the MMU context if paging was enabled prior to INIT (which is
         * implied if CR0.PG=1 as CR0 will be '0' prior to RESET).  Unlike the