mmu_role represents the role of the root of the page tables.
It does not need any extended bits, as those govern only KVM's
page table walking; the is_* functions used for page table
walking always use the CPU role.
ext.valid is not present anymore in the MMU role, but an
all-zero MMU role is impossible because the level field is
never zero in the MMU role. So just zap the whole mmu_role
in order to force invalidation after CPUID is updated.
While making this change, which requires touching almost every
occurrence of "mmu_role", rename it to "root_role".
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
struct kvm_mmu_root_info root;
union kvm_mmu_role cpu_role;
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
struct kvm_mmu_root_info root;
union kvm_mmu_role cpu_role;
- union kvm_mmu_role mmu_role;
+ union kvm_mmu_page_role root_role;
u8 root_level;
u8 shadow_root_level;
bool direct_map;
u8 root_level;
u8 shadow_root_level;
bool direct_map;
/*
* Yes, lot's of underscores. They're a hint that you probably shouldn't be
/*
* Yes, lot's of underscores. They're a hint that you probably shouldn't be
- * reading from the role_regs. Once the mmu_role is constructed, it becomes
+ * reading from the role_regs. Once the root_role is constructed, it becomes
* the single source of truth for the MMU's state.
*/
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
* the single source of truth for the MMU's state.
*/
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
int collisions = 0;
LIST_HEAD(invalid_list);
int collisions = 0;
LIST_HEAD(invalid_list);
- role = vcpu->arch.mmu->mmu_role.base;
+ role = vcpu->arch.mmu->root_role;
role.level = level;
role.direct = direct;
role.access = access;
role.level = level;
role.direct = direct;
role.access = access;
* This should not be called while L2 is active, L2 can't invalidate
* _only_ its own roots, e.g. INVVPID unconditionally exits.
*/
* This should not be called while L2 is active, L2 can't invalidate
* _only_ its own roots, e.g. INVVPID unconditionally exits.
*/
- WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
+ WARN_ON_ONCE(mmu->root_role.guest_mode);
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
root_hpa = mmu->prev_roots[i].hpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
root_hpa = mmu->prev_roots[i].hpa;
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
- union kvm_mmu_page_role new_role = mmu->mmu_role.base;
+ union kvm_mmu_page_role new_role = mmu->root_role;
if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
/* kvm_mmu_ensure_valid_pgd will set up a new root. */
if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
/* kvm_mmu_ensure_valid_pgd will set up a new root. */
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level,
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level,
- context->mmu_role.base.efer_nx,
+ context->root_role.efer_nx,
guest_can_use_gbpages(vcpu), is_pse, is_amd);
if (!shadow_me_mask)
guest_can_use_gbpages(vcpu), is_pse, is_amd);
if (!shadow_me_mask)
-static union kvm_mmu_role
+static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
- union kvm_mmu_role role = {0};
+ union kvm_mmu_page_role role = {0};
- role.base.access = ACC_ALL;
- role.base.cr0_wp = true;
- role.base.efer_nx = true;
- role.base.smm = cpu_role.base.smm;
- role.base.guest_mode = cpu_role.base.guest_mode;
- role.base.ad_disabled = (shadow_accessed_mask == 0);
- role.base.level = kvm_mmu_get_tdp_level(vcpu);
- role.base.direct = true;
- role.base.has_4_byte_gpte = false;
- role.ext.valid = true;
+ role.access = ACC_ALL;
+ role.cr0_wp = true;
+ role.efer_nx = true;
+ role.smm = cpu_role.base.smm;
+ role.guest_mode = cpu_role.base.guest_mode;
+ role.ad_disabled = (shadow_accessed_mask == 0);
+ role.level = kvm_mmu_get_tdp_level(vcpu);
+ role.direct = true;
+ role.has_4_byte_gpte = false;
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
- union kvm_mmu_role mmu_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
+ union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
- mmu_role.as_u64 == context->mmu_role.as_u64)
+ root_role.word == context->root_role.word)
return;
context->cpu_role.as_u64 = cpu_role.as_u64;
return;
context->cpu_role.as_u64 = cpu_role.as_u64;
- context->mmu_role.as_u64 = mmu_role.as_u64;
+ context->root_role.word = root_role.word;
context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
reset_tdp_shadow_zero_bits_mask(context);
}
reset_tdp_shadow_zero_bits_mask(context);
}
-static union kvm_mmu_role
+static union kvm_mmu_page_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
- union kvm_mmu_role role;
+ union kvm_mmu_page_role role;
if (!cpu_role.ext.efer_lma)
if (!cpu_role.ext.efer_lma)
- role.base.level = PT32E_ROOT_LEVEL;
+ role.level = PT32E_ROOT_LEVEL;
else if (cpu_role.ext.cr4_la57)
else if (cpu_role.ext.cr4_la57)
- role.base.level = PT64_ROOT_5LEVEL;
+ role.level = PT64_ROOT_5LEVEL;
- role.base.level = PT64_ROOT_4LEVEL;
+ role.level = PT64_ROOT_4LEVEL;
/*
* KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
/*
* KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
* NX can be used by any non-nested shadow MMU to avoid having to reset
* MMU contexts.
*/
* NX can be used by any non-nested shadow MMU to avoid having to reset
* MMU contexts.
*/
- role.base.efer_nx = true;
return role;
}
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
union kvm_mmu_role cpu_role,
return role;
}
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
union kvm_mmu_role cpu_role,
- union kvm_mmu_role mmu_role)
+ union kvm_mmu_page_role root_role)
{
if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
{
if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
- mmu_role.as_u64 == context->mmu_role.as_u64)
+ root_role.word == context->root_role.word)
return;
context->cpu_role.as_u64 = cpu_role.as_u64;
return;
context->cpu_role.as_u64 = cpu_role.as_u64;
- context->mmu_role.as_u64 = mmu_role.as_u64;
+ context->root_role.word = root_role.word;
if (!is_cr0_pg(context))
nonpaging_init_context(context);
if (!is_cr0_pg(context))
nonpaging_init_context(context);
context->root_level = cpu_role.base.level;
reset_guest_paging_metadata(vcpu, context);
context->root_level = cpu_role.base.level;
reset_guest_paging_metadata(vcpu, context);
- context->shadow_root_level = mmu_role.base.level;
+ context->shadow_root_level = root_role.level;
reset_shadow_zero_bits_mask(vcpu, context);
}
reset_shadow_zero_bits_mask(vcpu, context);
}
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
- union kvm_mmu_role mmu_role =
+ union kvm_mmu_page_role root_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role);
kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role);
- shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
+ shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
-static union kvm_mmu_role
+static union kvm_mmu_page_role
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
union kvm_mmu_role cpu_role)
{
- union kvm_mmu_role role;
+ union kvm_mmu_page_role role;
WARN_ON_ONCE(cpu_role.base.direct);
WARN_ON_ONCE(cpu_role.base.direct);
- role = cpu_role;
- role.base.level = kvm_mmu_get_tdp_level(vcpu);
-
+ role = cpu_role.base;
+ role.level = kvm_mmu_get_tdp_level(vcpu);
.efer = efer,
};
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
.efer = efer,
};
union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
- union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);
+ union kvm_mmu_page_role root_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);
- shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
+ shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
kvm_mmu_new_pgd(vcpu, nested_cr3);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
kvm_mmu_new_pgd(vcpu, nested_cr3);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
if (new_mode.as_u64 != context->cpu_role.as_u64) {
/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
context->cpu_role.as_u64 = new_mode.as_u64;
if (new_mode.as_u64 != context->cpu_role.as_u64) {
/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
context->cpu_role.as_u64 = new_mode.as_u64;
- context->mmu_role.as_u64 = new_mode.as_u64;
+ context->root_role.word = new_mode.base.word;
context->shadow_root_level = level;
context->shadow_root_level = level;
* problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
* problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
- vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
- vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
- vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.root_mmu.root_role.word = 0;
+ vcpu->arch.guest_mmu.root_role.word = 0;
+ vcpu->arch.nested_mmu.root_role.word = 0;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
- union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
+ union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
int i;
bool host_writable;
gpa_t first_pte_gpa;
int i;
bool host_writable;
gpa_t first_pte_gpa;
* reserved bits checks will be wrong, etc...
*/
if (WARN_ON_ONCE(sp->role.direct ||
* reserved bits checks will be wrong, etc...
*/
if (WARN_ON_ONCE(sp->role.direct ||
- (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
+ (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
return -1;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
return -1;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
+ union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root;