KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 19 May 2020 10:18:31 +0000 (06:18 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 1 Jun 2020 08:26:03 +0000 (04:26 -0400)
This allows fetching the registers from the hsave area when setting
up the NPT shadow MMU, and is needed for KVM_SET_NESTED_STATE (which
runs long after the CR0, CR4 and EFER values in vcpu have been switched
to hold L2 guest state).

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/nested.c

index 048e865..0ad06bf 100644 (file)
@@ -57,7 +57,7 @@ void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
                             bool accessed_dirty, gpa_t new_eptp);
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
index fd1c914..2e62a03 100644 (file)
@@ -4952,7 +4952,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
        return role;
 }
 
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
 {
        struct kvm_mmu *context = vcpu->arch.mmu;
        union kvm_mmu_role new_role =
@@ -4961,11 +4961,11 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
        if (new_role.as_u64 == context->mmu_role.as_u64)
                return;
 
-       if (!is_paging(vcpu))
+       if (!(cr0 & X86_CR0_PG))
                nonpaging_init_context(vcpu, context);
-       else if (is_long_mode(vcpu))
+       else if (efer & EFER_LMA)
                paging64_init_context(vcpu, context);
-       else if (is_pae(vcpu))
+       else if (cr4 & X86_CR4_PAE)
                paging32E_init_context(vcpu, context);
        else
                paging32_init_context(vcpu, context);
@@ -5043,7 +5043,11 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = vcpu->arch.mmu;
 
-       kvm_init_shadow_mmu(vcpu);
+       kvm_init_shadow_mmu(vcpu,
+                           kvm_read_cr0_bits(vcpu, X86_CR0_PG),
+                           kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
+                           vcpu->arch.efer);
+
        context->get_guest_pgd     = get_cr3;
        context->get_pdptr         = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
index 369eca7..c712fe5 100644 (file)
@@ -80,10 +80,13 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
 
 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb *hsave = svm->nested.hsave;
+
        WARN_ON(mmu_is_nested(vcpu));
 
        vcpu->arch.mmu = &vcpu->arch.guest_mmu;
-       kvm_init_shadow_mmu(vcpu);
+       kvm_init_shadow_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer);
        vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
        vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
        vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;