KVM: x86: Tie Intel and AMD behavior for MSR_TSC_AUX to guest CPU model
authorSean Christopherson <seanjc@google.com>
Tue, 4 May 2021 17:17:33 +0000 (10:17 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 7 May 2021 10:06:19 +0000 (06:06 -0400)
Squish the Intel and AMD emulation of MSR_TSC_AUX together and tie it to
the guest CPU model instead of the host CPU behavior.  While not strictly
necessary to avoid guest breakage, emulating cross-vendor "architecture"
will provide consistent behavior for the guest, e.g. WRMSR fault behavior
won't change if the vCPU is migrated to a host with divergent behavior.

Note, the "new" kvm_is_supported_user_return_msr() checks do not add new
functionality on either SVM or VMX.  On SVM, the equivalent was
"tsc_aux_uret_slot < 0", and on VMX the check was buried in the
vmx_find_uret_msr() call at the find_uret_msr label.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210504171734.1434054-15-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 8155eaa..f85480b 100644 (file)
@@ -1781,6 +1781,11 @@ int kvm_add_user_return_msr(u32 msr);
 int kvm_find_user_return_msr(u32 msr);
 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
 
+static inline bool kvm_is_supported_user_return_msr(u32 msr)
+{
+       return kvm_find_user_return_msr(msr) >= 0;
+}
+
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
 
index d13b72b..0922d8e 100644 (file)
@@ -2663,12 +2663,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
                break;
        case MSR_TSC_AUX:
-               if (tsc_aux_uret_slot < 0)
-                       return 1;
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
-                       return 1;
                msr_info->data = svm->tsc_aux;
                break;
        /*
@@ -2885,24 +2879,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
                break;
        case MSR_TSC_AUX:
-               if (tsc_aux_uret_slot < 0)
-                       return 1;
-
-               if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
-                       return 1;
-
-               /*
-                * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
-                * incomplete and conflicting architectural behavior.  Current
-                * AMD CPUs completely ignore bits 63:32, i.e. they aren't
-                * reserved and always read as zeros.  Emulate AMD CPU behavior
-                * to avoid explosions if the vCPU is migrated from an AMD host
-                * to an Intel host.
-                */
-               data = (u32)data;
-
                /*
                 * TSC_AUX is usually changed only during boot and never read
                 * directly.  Intercept TSC_AUX instead of exposing it to the
index 042823b..61f7e52 100644 (file)
@@ -1981,12 +1981,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                else
                        msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
                break;
-       case MSR_TSC_AUX:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
-                       return 1;
-               goto find_uret_msr;
        case MSR_IA32_DEBUGCTLMSR:
                msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
                break;
@@ -2302,15 +2296,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                else
                        vmx->pt_desc.guest.addr_a[index / 2] = data;
                break;
-       case MSR_TSC_AUX:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
-                       return 1;
-               /* Check reserved bit, higher 32 bits should be zero */
-               if ((data >> 32) != 0)
-                       return 1;
-               goto find_uret_msr;
        case MSR_IA32_PERF_CAPABILITIES:
                if (data && !vcpu_to_pmu(vcpu)->version)
                        return 1;
index 0a0eebf..4efd297 100644 (file)
@@ -1642,6 +1642,30 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
                 * invokes 64-bit SYSENTER.
                 */
                data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
+               break;
+       case MSR_TSC_AUX:
+               if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+                       return 1;
+
+               if (!host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+                       return 1;
+
+               /*
+                * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
+                * incomplete and conflicting architectural behavior.  Current
+                * AMD CPUs completely ignore bits 63:32, i.e. they aren't
+                * reserved and always read as zeros.  Enforce Intel's reserved
+                * bits check if and only if the guest CPU is Intel, and clear
+                * the bits in all other cases.  This ensures cross-vendor
+                * migration will provide consistent behavior for the guest.
+                */
+               if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
+                       return 1;
+
+               data = (u32)data;
+               break;
        }
 
        msr.data = data;
@@ -1678,6 +1702,18 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
        if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
                return KVM_MSR_RET_FILTERED;
 
+       switch (index) {
+       case MSR_TSC_AUX:
+               if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+                       return 1;
+
+               if (!host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+                       return 1;
+               break;
+       }
+
        msr.index = index;
        msr.host_initiated = host_initiated;