KVM: nSVM: Use KVM-governed feature framework to track "LBRv enabled"
authorSean Christopherson <seanjc@google.com>
Tue, 15 Aug 2023 20:36:49 +0000 (13:36 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 17 Aug 2023 18:43:30 +0000 (11:43 -0700)
Track "LBR virtualization exposed to L1" via a governed feature flag
instead of using a dedicated bit/flag in vcpu_svm.

Note, checking KVM's capabilities instead of the "lbrv" param means that
the code isn't strictly equivalent, as lbrv_enabled could have been set
if nested=false where as that the governed feature cannot.  But that's a
glorified nop as the feature/flag is consumed only by paths that are
gated by nSVM being enabled.

Link: https://lore.kernel.org/r/20230815203653.519297-12-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/governed_features.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index f01a95f..3a4c0e4 100644 (file)
@@ -11,6 +11,7 @@ KVM_GOVERNED_X86_FEATURE(VMX)
 KVM_GOVERNED_X86_FEATURE(NRIPS)
 KVM_GOVERNED_X86_FEATURE(TSCRATEMSR)
 KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD)
+KVM_GOVERNED_X86_FEATURE(LBRV)
 
 #undef KVM_GOVERNED_X86_FEATURE
 #undef KVM_GOVERNED_FEATURE
index 24d47eb..f50f74b 100644 (file)
@@ -552,6 +552,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
        bool new_vmcb12 = false;
        struct vmcb *vmcb01 = svm->vmcb01.ptr;
        struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
 
        nested_vmcb02_compute_g_pat(svm);
 
@@ -577,18 +578,18 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
                vmcb_mark_dirty(vmcb02, VMCB_DT);
        }
 
-       kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
+       kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
 
-       svm_set_efer(&svm->vcpu, svm->nested.save.efer);
+       svm_set_efer(vcpu, svm->nested.save.efer);
 
-       svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
-       svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
+       svm_set_cr0(vcpu, svm->nested.save.cr0);
+       svm_set_cr4(vcpu, svm->nested.save.cr4);
 
        svm->vcpu.arch.cr2 = vmcb12->save.cr2;
 
-       kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
-       kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
-       kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
+       kvm_rax_write(vcpu, vmcb12->save.rax);
+       kvm_rsp_write(vcpu, vmcb12->save.rsp);
+       kvm_rip_write(vcpu, vmcb12->save.rip);
 
        /* In case we don't even reach vcpu_run, the fields are not updated */
        vmcb02->save.rax = vmcb12->save.rax;
@@ -602,7 +603,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
                vmcb_mark_dirty(vmcb02, VMCB_DR);
        }
 
-       if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+       if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+                    (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
                /*
                 * Reserved bits of DEBUGCTL are ignored.  Be consistent with
                 * svm_set_msr's definition of reserved bits.
@@ -734,7 +736,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
 
        vmcb02->control.virt_ext            = vmcb01->control.virt_ext &
                                              LBR_CTL_ENABLE_MASK;
-       if (svm->lbrv_enabled)
+       if (guest_can_use(vcpu, X86_FEATURE_LBRV))
                vmcb02->control.virt_ext  |=
                        (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
 
@@ -1065,7 +1067,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        if (!nested_exit_on_intr(svm))
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 
-       if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+       if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+                    (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
                svm_copy_lbrs(vmcb12, vmcb02);
                svm_update_lbrv(vcpu);
        } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
index 78d53ea..3b8e412 100644 (file)
@@ -1024,7 +1024,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
        bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
                                      LBR_CTL_ENABLE_MASK);
 
-       if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
+       if (unlikely(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV)))
                if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
                        enable_lbrv = true;
 
@@ -4261,8 +4261,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
        kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
        kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
-
-       svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
+       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
 
        /*
         * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
index b475241..0e21823 100644 (file)
@@ -259,7 +259,6 @@ struct vcpu_svm {
        bool soft_int_injected;
 
        /* optional nested SVM features that are enabled for this guest  */
-       bool lbrv_enabled                 : 1;
        bool pause_filter_enabled         : 1;
        bool pause_threshold_enabled      : 1;
        bool vgif_enabled                 : 1;