KVM: VMX: Process CR0.PG side effects after setting CR0 assets
authorSean Christopherson <seanjc@google.com>
Tue, 13 Jul 2021 16:33:05 +0000 (09:33 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 2 Aug 2021 15:01:55 +0000 (11:01 -0400)
Move the long mode and EPT w/o unrestricted guest side effect processing
down in vmx_set_cr0() so that the EPT && !URG case doesn't have to stuff
vcpu->arch.cr0 early.  This also fixes an oddity where CR0 might not be
marked available, i.e. the early vcpu->arch.cr0 write would appear to be
in danger of being overwritten, though that can't actually happen in the
current code since CR0.TS is the only guest-owned bit, and CR0.TS is not
read by vmx_set_cr4().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210713163324.627647-28-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c

index 0c1e578..aa54710 100644 (file)
@@ -3003,9 +3003,11 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long hw_cr0;
+       unsigned long hw_cr0, old_cr0_pg;
        u32 tmp;
 
+       old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+
        hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
        if (is_unrestricted_guest(vcpu))
                hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
@@ -3021,11 +3023,16 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        enter_rmode(vcpu);
        }
 
+       vmcs_writel(CR0_READ_SHADOW, cr0);
+       vmcs_writel(GUEST_CR0, hw_cr0);
+       vcpu->arch.cr0 = cr0;
+       kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
+
 #ifdef CONFIG_X86_64
        if (vcpu->arch.efer & EFER_LME) {
-               if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
+               if (!old_cr0_pg && (cr0 & X86_CR0_PG))
                        enter_lmode(vcpu);
-               if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
+               else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
                        exit_lmode(vcpu);
        }
 #endif
@@ -3066,17 +3073,11 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        exec_controls_set(vmx, tmp);
                }
 
-               if (!is_paging(vcpu) != !(cr0 & X86_CR0_PG)) {
-                       vcpu->arch.cr0 = cr0;
+               /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
+               if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
                        vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
-               }
        }
 
-       vmcs_writel(CR0_READ_SHADOW, cr0);
-       vmcs_writel(GUEST_CR0, hw_cr0);
-       vcpu->arch.cr0 = cr0;
-       kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
-
        /* depends on vcpu->arch.cr0 to be set to a new value */
        vmx->emulation_required = emulation_required(vcpu);
 }