KVM: X86: Add parameter huge_page_level to kvm_init_shadow_ept_mmu()
authorLai Jiangshan <laijs@linux.alibaba.com>
Wed, 24 Nov 2021 12:20:49 +0000 (20:20 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:25:12 +0000 (04:25 -0500)
The level of supported large page on nEPT affects the rsvds_bits_mask.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211124122055.64424-8-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/vmx/capabilities.h
arch/x86/kvm/vmx/nested.c

index 97e13c2..e9fbb2c 100644 (file)
@@ -71,7 +71,8 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
                             unsigned long cr4, u64 efer, gpa_t nested_cr3);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
-                            bool accessed_dirty, gpa_t new_eptp);
+                            int huge_page_level, bool accessed_dirty,
+                            gpa_t new_eptp);
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
index ad7e3c5..4161396 100644 (file)
@@ -4905,7 +4905,8 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
 }
 
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
-                            bool accessed_dirty, gpa_t new_eptp)
+                            int huge_page_level, bool accessed_dirty,
+                            gpa_t new_eptp)
 {
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
        u8 level = vmx_eptp_page_walk_level(new_eptp);
@@ -4932,7 +4933,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 
        update_permission_bitmask(context, true);
        context->pkru_mask = 0;
-       reset_rsvds_bits_mask_ept(vcpu, context, execonly, max_huge_page_level);
+       reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
index 4705ad5..c8029b7 100644 (file)
@@ -312,6 +312,15 @@ static inline bool cpu_has_vmx_ept_1g_page(void)
        return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
 }
 
+static inline int ept_caps_to_lpage_level(u32 ept_caps)
+{
+       if (ept_caps & VMX_EPT_1GB_PAGE_BIT)
+               return PG_LEVEL_1G;
+       if (ept_caps & VMX_EPT_2MB_PAGE_BIT)
+               return PG_LEVEL_2M;
+       return PG_LEVEL_4K;
+}
+
 static inline bool cpu_has_vmx_ept_ad_bits(void)
 {
        return vmx_capability.ept & VMX_EPT_AD_BIT;
index b03df82..e6230cd 100644 (file)
@@ -397,9 +397,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
 
 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
 {
-       kvm_init_shadow_ept_mmu(vcpu,
-                               to_vmx(vcpu)->nested.msrs.ept_caps &
-                               VMX_EPT_EXECUTE_ONLY_BIT,
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
+       int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
+
+       kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
                                nested_ept_ad_enabled(vcpu),
                                nested_ept_get_eptp(vcpu));
 }