KVM: x86/mmu: Add accessors to query mmu_role bits
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
index 54514f0..1e5beac 100644 (file)
@@ -176,9 +176,67 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
 
+struct kvm_mmu_role_regs {
+       const unsigned long cr0;
+       const unsigned long cr4;
+       const u64 efer;
+};
+
 #define CREATE_TRACE_POINTS
 #include "mmutrace.h"
 
+/*
+ * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
+ * reading from the role_regs.  Once the mmu_role is constructed, it becomes
+ * the single source of truth for the MMU's state.
+ */
+#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)                  \
+static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
+{                                                                      \
+       return !!(regs->reg & flag);                                    \
+}
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
+
+/*
+ * The MMU itself (with a valid role) is the single source of truth for the
+ * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
+ * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
+ * and the vCPU may be incorrect/irrelevant.
+ */
+#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)                \
+static inline bool is_##reg##_##name(struct kvm_mmu *mmu)      \
+{                                                              \
+       return !!(mmu->mmu_role. base_or_ext . reg##_##name);   \
+}
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
+BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
+BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
+BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
+
+static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_role_regs regs = {
+               .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
+               .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
+               .efer = vcpu->arch.efer,
+       };
+
+       return regs;
+}
 
 static inline bool kvm_available_flush_tlb_with_range(void)
 {
@@ -1780,17 +1838,10 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
                if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
 
-static inline bool is_ept_sp(struct kvm_mmu_page *sp)
-{
-       return sp->role.cr0_wp && sp->role.smap_andnot_wp;
-}
-
-/* @sp->gfn should be write-protected at the call site */
-static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                           struct list_head *invalid_list)
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                        struct list_head *invalid_list)
 {
-       if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
-           vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+       if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
                return false;
        }
@@ -1836,31 +1887,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
               unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                        struct list_head *invalid_list)
-{
-       kvm_unlink_unsync_page(vcpu->kvm, sp);
-       return __kvm_sync_page(vcpu, sp, invalid_list);
-}
-
-/* @gfn should be write-protected at the call site */
-static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
-                          struct list_head *invalid_list)
-{
-       struct kvm_mmu_page *s;
-       bool ret = false;
-
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
-               if (!s->unsync)
-                       continue;
-
-               WARN_ON(s->role.level != PG_LEVEL_4K);
-               ret |= kvm_sync_page(vcpu, s, invalid_list);
-       }
-
-       return ret;
-}
-
 struct mmu_page_path {
        struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
        unsigned int idx[PT64_ROOT_MAX_LEVEL];
@@ -1955,6 +1981,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
                }
 
                for_each_sp(pages, sp, parents, i) {
+                       kvm_unlink_unsync_page(vcpu->kvm, sp);
                        flush |= kvm_sync_page(vcpu, sp, &invalid_list);
                        mmu_pages_clear_parents(&parents);
                }
@@ -1990,8 +2017,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        struct hlist_head *sp_list;
        unsigned quadrant;
        struct kvm_mmu_page *sp;
-       bool need_sync = false;
-       bool flush = false;
        int collisions = 0;
        LIST_HEAD(invalid_list);
 
@@ -2014,20 +2039,39 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        continue;
                }
 
-               if (!need_sync && sp->unsync)
-                       need_sync = true;
-
-               if (sp->role.word != role.word)
+               if (sp->role.word != role.word) {
+                       /*
+                        * If the guest is creating an upper-level page, zap
+                        * unsync pages for the same gfn.  While it's possible
+                        * the guest is using recursive page tables, in all
+                        * likelihood the guest has stopped using the unsync
+                        * page and is installing a completely unrelated page.
+                        * Unsync pages must not be left as is, because the new
+                        * upper-level page will be write-protected.
+                        */
+                       if (level > PG_LEVEL_4K && sp->unsync)
+                               kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+                                                        &invalid_list);
                        continue;
+               }
 
                if (direct_mmu)
                        goto trace_get_page;
 
                if (sp->unsync) {
-                       /* The page is good, but __kvm_sync_page might still end
-                        * up zapping it.  If so, break in order to rebuild it.
+                       /*
+                        * The page is good, but is stale.  kvm_sync_page does
+                        * get the latest guest state, but (unlike mmu_unsync_children)
+                        * it doesn't write-protect the page or mark it synchronized!
+                        * This way the validity of the mapping is ensured, but the
+                        * overhead of write protection is not incurred until the
+                        * guest invalidates the TLB mapping.  This allows multiple
+                        * SPs for a single gfn to be unsync.
+                        *
+                        * If the sync fails, the page is zapped.  If so, break
+                        * in order to rebuild it.
                         */
-                       if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+                       if (!kvm_sync_page(vcpu, sp, &invalid_list))
                                break;
 
                        WARN_ON(!list_empty(&invalid_list));
@@ -2052,22 +2096,14 @@ trace_get_page:
        sp->role = role;
        hlist_add_head(&sp->hash_link, sp_list);
        if (!direct) {
-               /*
-                * we should do write protection before syncing pages
-                * otherwise the content of the synced shadow page may
-                * be inconsistent with guest page table.
-                */
                account_shadowed(vcpu->kvm, sp);
                if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
                        kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
-
-               if (level > PG_LEVEL_4K && need_sync)
-                       flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
        trace_kvm_mmu_get_page(sp, true);
-
-       kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
 out:
+       kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
        if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
                vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
        return sp;
@@ -2480,17 +2516,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        kvm_mmu_mark_parents_unsync(sp);
 }
 
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
-                           bool can_unsync)
+/*
+ * Attempt to unsync any shadow pages that can be reached by the specified gfn,
+ * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
+ * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
+ * be write-protected.
+ */
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
 {
        struct kvm_mmu_page *sp;
 
+       /*
+        * Force write-protection if the page is being tracked.  Note, the page
+        * track machinery is used to write-protect upper-level shadow pages,
+        * i.e. this guards the role.level == 4K assertion below!
+        */
        if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
-               return true;
+               return -EPERM;
 
+       /*
+        * The page is not write-tracked, mark existing shadow pages unsync
+        * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
+        * that case, KVM must complete emulation of the guest TLB flush before
+        * allowing shadow pages to become unsync (writable by the guest).
+        */
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
                if (!can_unsync)
-                       return true;
+                       return -EPERM;
 
                if (sp->unsync)
                        continue;
@@ -2521,8 +2573,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
         *                      2.2 Guest issues TLB flush.
         *                          That causes a VM Exit.
         *
-        *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
-        *                          Since it is false, so it just returns.
+        *                      2.3 Walking of unsync pages sees sp->unsync is
+        *                          false and skips the page.
         *
         *                      2.4 Guest accesses GVA X.
         *                          Since the mapping in the SP was not updated,
@@ -2538,7 +2590,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
         */
        smp_wmb();
 
-       return false;
+       return 0;
 }
 
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -3483,8 +3535,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                 * flush strictly after those changes are made. We only need to
                 * ensure that the other CPU sets these flags before any actual
                 * changes to the page tables are made. The comments in
-                * mmu_need_write_protect() describe what could go wrong if this
-                * requirement isn't satisfied.
+                * mmu_try_to_unsync_pages() describe what could go wrong if
+                * this requirement isn't satisfied.
                 */
                if (!smp_load_acquire(&sp->unsync) &&
                    !smp_load_acquire(&sp->unsync_children))
@@ -3904,7 +3956,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
        context->sync_page = nonpaging_sync_page;
        context->invlpg = NULL;
        context->root_level = 0;
-       context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->direct_map = true;
        context->nx = false;
 }
@@ -4218,8 +4269,8 @@ static inline u64 reserved_hpa_bits(void)
  * table in guest or amd nested guest, its mmu features completely
  * follow the features in guest.
  */
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+                                       struct kvm_mmu *context)
 {
        /*
         * KVM uses NX when TDP is disabled to handle a variety of scenarios,
@@ -4229,8 +4280,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
         * NX can be used by any non-nested shadow MMU to avoid having to reset
         * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
         */
-       bool uses_nx = context->nx || !tdp_enabled ||
-               context->mmu_role.base.smep_andnot_wp;
+       bool uses_nx = context->nx || !tdp_enabled;
        struct rsvd_bits_validate *shadow_zero_check;
        int i;
 
@@ -4254,7 +4304,6 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
        }
 
 }
-EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
 
 static inline bool boot_cpu_is_amd(void)
 {
@@ -4474,22 +4523,16 @@ static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
 
 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
                                         struct kvm_mmu *context,
-                                        int level)
+                                        int root_level)
 {
        context->nx = is_nx(vcpu);
-       context->root_level = level;
-
-       reset_rsvds_bits_mask(vcpu, context);
-       update_permission_bitmask(vcpu, context, false);
-       update_pkru_bitmask(vcpu, context, false);
-       update_last_nonleaf_level(vcpu, context);
+       context->root_level = root_level;
 
        MMU_WARN_ON(!is_pae(vcpu));
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
        context->sync_page = paging64_sync_page;
        context->invlpg = paging64_invlpg;
-       context->shadow_root_level = level;
        context->direct_map = false;
 }
 
@@ -4507,17 +4550,10 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
 {
        context->nx = false;
        context->root_level = PT32_ROOT_LEVEL;
-
-       reset_rsvds_bits_mask(vcpu, context);
-       update_permission_bitmask(vcpu, context, false);
-       update_pkru_bitmask(vcpu, context, false);
-       update_last_nonleaf_level(vcpu, context);
-
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->sync_page = paging32_sync_page;
        context->invlpg = paging32_invlpg;
-       context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->direct_map = false;
 }
 
@@ -4527,17 +4563,18 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
        paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
 }
 
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
+                                                        struct kvm_mmu_role_regs *regs)
 {
        union kvm_mmu_extended_role ext = {0};
 
-       ext.cr0_pg = !!is_paging(vcpu);
-       ext.cr4_pae = !!is_pae(vcpu);
-       ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
-       ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
-       ext.cr4_pse = !!is_pse(vcpu);
-       ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
-       ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+       ext.cr0_pg = ____is_cr0_pg(regs);
+       ext.cr4_pae = ____is_cr4_pae(regs);
+       ext.cr4_smep = ____is_cr4_smep(regs);
+       ext.cr4_smap = ____is_cr4_smap(regs);
+       ext.cr4_pse = ____is_cr4_pse(regs);
+       ext.cr4_pke = ____is_cr4_pke(regs);
+       ext.cr4_la57 = ____is_cr4_la57(regs);
 
        ext.valid = 1;
 
@@ -4545,20 +4582,21 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
 }
 
 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+                                                  struct kvm_mmu_role_regs *regs,
                                                   bool base_only)
 {
        union kvm_mmu_role role = {0};
 
        role.base.access = ACC_ALL;
-       role.base.nxe = !!is_nx(vcpu);
-       role.base.cr0_wp = is_write_protection(vcpu);
+       role.base.efer_nx = ____is_efer_nx(regs);
+       role.base.cr0_wp = ____is_cr0_wp(regs);
        role.base.smm = is_smm(vcpu);
        role.base.guest_mode = is_guest_mode(vcpu);
 
        if (base_only)
                return role;
 
-       role.ext = kvm_calc_mmu_role_ext(vcpu);
+       role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
 
        return role;
 }
@@ -4573,9 +4611,10 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
 }
 
 static union kvm_mmu_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
+                               struct kvm_mmu_role_regs *regs, bool base_only)
 {
-       union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+       union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
 
        role.base.ad_disabled = (shadow_accessed_mask == 0);
        role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4588,8 +4627,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = &vcpu->arch.root_mmu;
+       struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
        union kvm_mmu_role new_role =
-               kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+               kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
 
        if (new_role.as_u64 == context->mmu_role.as_u64)
                return;
@@ -4633,30 +4673,30 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 }
 
 static union kvm_mmu_role
-kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
+                                     struct kvm_mmu_role_regs *regs, bool base_only)
 {
-       union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+       union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
 
-       role.base.smep_andnot_wp = role.ext.cr4_smep &&
-               !is_write_protection(vcpu);
-       role.base.smap_andnot_wp = role.ext.cr4_smap &&
-               !is_write_protection(vcpu);
-       role.base.gpte_is_8_bytes = !!is_pae(vcpu);
+       role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
+       role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
+       role.base.gpte_is_8_bytes = ____is_cr4_pae(regs);
 
        return role;
 }
 
 static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
+                                  struct kvm_mmu_role_regs *regs, bool base_only)
 {
        union kvm_mmu_role role =
-               kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+               kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
 
-       role.base.direct = !is_paging(vcpu);
+       role.base.direct = !____is_cr0_pg(regs);
 
-       if (!is_long_mode(vcpu))
+       if (!____is_efer_lma(regs))
                role.base.level = PT32E_ROOT_LEVEL;
-       else if (is_la57_mode(vcpu))
+       else if (____is_cr4_la57(regs))
                role.base.level = PT64_ROOT_5LEVEL;
        else
                role.base.level = PT64_ROOT_4LEVEL;
@@ -4665,37 +4705,47 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 }
 
 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
-                                   u32 cr0, u32 cr4, u32 efer,
+                                   struct kvm_mmu_role_regs *regs,
                                    union kvm_mmu_role new_role)
 {
-       if (!(cr0 & X86_CR0_PG))
+       if (!____is_cr0_pg(regs))
                nonpaging_init_context(vcpu, context);
-       else if (efer & EFER_LMA)
+       else if (____is_efer_lma(regs))
                paging64_init_context(vcpu, context);
-       else if (cr4 & X86_CR4_PAE)
+       else if (____is_cr4_pae(regs))
                paging32E_init_context(vcpu, context);
        else
                paging32_init_context(vcpu, context);
 
+       if (____is_cr0_pg(regs)) {
+               reset_rsvds_bits_mask(vcpu, context);
+               update_permission_bitmask(vcpu, context, false);
+               update_pkru_bitmask(vcpu, context, false);
+               update_last_nonleaf_level(vcpu, context);
+       }
+       context->shadow_root_level = new_role.base.level;
+
        context->mmu_role.as_u64 = new_role.as_u64;
        reset_shadow_zero_bits_mask(vcpu, context);
 }
 
-static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
+static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
+                               struct kvm_mmu_role_regs *regs)
 {
        struct kvm_mmu *context = &vcpu->arch.root_mmu;
        union kvm_mmu_role new_role =
-               kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+               kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
 
        if (new_role.as_u64 != context->mmu_role.as_u64)
-               shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+               shadow_mmu_init_context(vcpu, context, regs, new_role);
 }
 
 static union kvm_mmu_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
+                                  struct kvm_mmu_role_regs *regs)
 {
        union kvm_mmu_role role =
-               kvm_calc_shadow_root_page_role_common(vcpu, false);
+               kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
 
        role.base.direct = false;
        role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4703,23 +4753,29 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
        return role;
 }
 
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
-                            gpa_t nested_cr3)
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+                            unsigned long cr4, u64 efer, gpa_t nested_cr3)
 {
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
-       union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+       struct kvm_mmu_role_regs regs = {
+               .cr0 = cr0,
+               .cr4 = cr4,
+               .efer = efer,
+       };
+       union kvm_mmu_role new_role;
+
+       new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
 
        __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
 
-       if (new_role.as_u64 != context->mmu_role.as_u64) {
-               shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+       if (new_role.as_u64 != context->mmu_role.as_u64)
+               shadow_mmu_init_context(vcpu, context, &regs, new_role);
 
-               /*
-                * Override the level set by the common init helper, nested TDP
-                * always uses the host's TDP configuration.
-                */
-               context->shadow_root_level = new_role.base.level;
-       }
+       /*
+        * Redo the shadow bits, the reset done by shadow_mmu_init_context()
+        * (above) may use the wrong shadow_root_level.
+        */
+       reset_shadow_zero_bits_mask(vcpu, context);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
 
@@ -4739,15 +4795,10 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
        role.base.guest_mode = true;
        role.base.access = ACC_ALL;
 
-       /*
-        * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
-        * SMAP variation to denote shadow EPT entries.
-        */
-       role.base.cr0_wp = true;
-       role.base.smap_andnot_wp = true;
-
-       role.ext = kvm_calc_mmu_role_ext(vcpu);
+       /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
+       role.ext.word = 0;
        role.ext.execonly = execonly;
+       role.ext.valid = 1;
 
        return role;
 }
@@ -4789,20 +4840,21 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = &vcpu->arch.root_mmu;
+       struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
 
-       kvm_init_shadow_mmu(vcpu,
-                           kvm_read_cr0_bits(vcpu, X86_CR0_PG),
-                           kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
-                           vcpu->arch.efer);
+       kvm_init_shadow_mmu(vcpu, &regs);
 
        context->get_guest_pgd     = get_cr3;
        context->get_pdptr         = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
 }
 
-static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+static union kvm_mmu_role
+kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
 {
-       union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+       union kvm_mmu_role role;
+
+       role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
 
        /*
         * Nested MMUs are used only for walking L2's gva->gpa, they never have
@@ -4811,12 +4863,12 @@ static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
         */
        role.base.direct = true;
 
-       if (!is_paging(vcpu))
+       if (!____is_cr0_pg(regs))
                role.base.level = 0;
-       else if (is_long_mode(vcpu))
-               role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
-                                                      PT64_ROOT_4LEVEL;
-       else if (is_pae(vcpu))
+       else if (____is_efer_lma(regs))
+               role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
+                                                         PT64_ROOT_4LEVEL;
+       else if (____is_cr4_pae(regs))
                role.base.level = PT32E_ROOT_LEVEL;
        else
                role.base.level = PT32_ROOT_LEVEL;
@@ -4826,7 +4878,8 @@ static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
 
 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 {
-       union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
+       struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+       union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
        struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
        if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -4892,12 +4945,13 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
 {
+       struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
        union kvm_mmu_role role;
 
        if (tdp_enabled)
-               role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
+               role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
        else
-               role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+               role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
 
        return role.base;
 }