Merge tag 'kvmarm-fixes-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / svm.c
index 9790c73..dfa351e 100644 (file)
@@ -212,7 +212,7 @@ DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
  * defer the restoration of TSC_AUX until the CPU returns to userspace.
  */
-#define TSC_AUX_URET_SLOT      0
+static int tsc_aux_uret_slot __read_mostly = -1;
 
 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
 
@@ -447,6 +447,11 @@ static int has_svm(void)
                return 0;
        }
 
+       if (pgtable_l5_enabled()) {
+               pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n");
+               return 0;
+       }
+
        return 1;
 }
 
@@ -959,8 +964,7 @@ static __init int svm_hardware_setup(void)
                kvm_tsc_scaling_ratio_frac_bits = 32;
        }
 
-       if (boot_cpu_has(X86_FEATURE_RDTSCP))
-               kvm_define_user_return_msr(TSC_AUX_URET_SLOT, MSR_TSC_AUX);
+       tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
 
        /* Check for pause filtering support */
        if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
@@ -1100,7 +1104,9 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        return svm->vmcb->control.tsc_offset;
 }
 
-static void svm_check_invpcid(struct vcpu_svm *svm)
+/* Evaluate instruction intercepts that depend on guest CPUID features. */
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+                                             struct vcpu_svm *svm)
 {
        /*
         * Intercept INVPCID if shadow paging is enabled to sync/free shadow
@@ -1113,6 +1119,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
                else
                        svm_clr_intercept(svm, INTERCEPT_INVPCID);
        }
+
+       if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
+               if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+                       svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+               else
+                       svm_set_intercept(svm, INTERCEPT_RDTSCP);
+       }
 }
 
 static void init_vmcb(struct kvm_vcpu *vcpu)
@@ -1235,8 +1248,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
        svm->current_vmcb->asid_generation = 0;
        svm->asid = 0;
 
-       svm->nested.vmcb12_gpa = 0;
-       svm->nested.last_vmcb12_gpa = 0;
+       svm->nested.vmcb12_gpa = INVALID_GPA;
+       svm->nested.last_vmcb12_gpa = INVALID_GPA;
        vcpu->arch.hflags = 0;
 
        if (!kvm_pause_in_guest(vcpu->kvm)) {
@@ -1248,7 +1261,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
                svm_clr_intercept(svm, INTERCEPT_PAUSE);
        }
 
-       svm_check_invpcid(svm);
+       svm_recalc_instruction_intercepts(vcpu, svm);
 
        /*
         * If the host supports V_SPEC_CTRL then disable the interception
@@ -1424,6 +1437,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
 
+       if (sev_es_guest(vcpu->kvm))
+               sev_es_unmap_ghcb(svm);
+
        if (svm->guest_state_loaded)
                return;
 
@@ -1445,8 +1461,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (static_cpu_has(X86_FEATURE_RDTSCP))
-               kvm_set_user_return_msr(TSC_AUX_URET_SLOT, svm->tsc_aux, -1ull);
+       if (likely(tsc_aux_uret_slot >= 0))
+               kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
 
        svm->guest_state_loaded = true;
 }
@@ -2655,11 +2671,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
                break;
        case MSR_TSC_AUX:
-               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
-                       return 1;
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
                msr_info->data = svm->tsc_aux;
                break;
        /*
@@ -2876,30 +2887,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
                break;
        case MSR_TSC_AUX:
-               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
-                       return 1;
-
-               if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
-
-               /*
-                * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
-                * incomplete and conflicting architectural behavior.  Current
-                * AMD CPUs completely ignore bits 63:32, i.e. they aren't
-                * reserved and always read as zeros.  Emulate AMD CPU behavior
-                * to avoid explosions if the vCPU is migrated from an AMD host
-                * to an Intel host.
-                */
-               data = (u32)data;
-
                /*
                 * TSC_AUX is usually changed only during boot and never read
                 * directly.  Intercept TSC_AUX instead of exposing it to the
                 * guest via direct_access_msrs, and switch it via user return.
                 */
                preempt_disable();
-               r = kvm_set_user_return_msr(TSC_AUX_URET_SLOT, data, -1ull);
+               r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
                preempt_enable();
                if (r)
                        return 1;
@@ -3084,6 +3078,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [SVM_EXIT_STGI]                         = stgi_interception,
        [SVM_EXIT_CLGI]                         = clgi_interception,
        [SVM_EXIT_SKINIT]                       = skinit_interception,
+       [SVM_EXIT_RDTSCP]                       = kvm_handle_invalid_op,
        [SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
        [SVM_EXIT_MONITOR]                      = kvm_emulate_monitor,
        [SVM_EXIT_MWAIT]                        = kvm_emulate_mwait,
@@ -3710,25 +3705,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        unsigned long vmcb_pa = svm->current_vmcb->pa;
 
-       /*
-        * VMENTER enables interrupts (host state), but the kernel state is
-        * interrupts disabled when this is invoked. Also tell RCU about
-        * it. This is the same logic as for exit_to_user_mode().
-        *
-        * This ensures that e.g. latency analysis on the host observes
-        * guest mode as interrupt enabled.
-        *
-        * guest_enter_irqoff() informs context tracking about the
-        * transition to guest mode and if enabled adjusts RCU state
-        * accordingly.
-        */
-       instrumentation_begin();
-       trace_hardirqs_on_prepare();
-       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
-       instrumentation_end();
-
-       guest_enter_irqoff();
-       lockdep_hardirqs_on(CALLER_ADDR0);
+       kvm_guest_enter_irqoff();
 
        if (sev_es_guest(vcpu->kvm)) {
                __svm_sev_es_vcpu_run(vmcb_pa);
@@ -3748,24 +3725,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
                vmload(__sme_page_pa(sd->save_area));
        }
 
-       /*
-        * VMEXIT disables interrupts (host state), but tracing and lockdep
-        * have them in state 'on' as recorded before entering guest mode.
-        * Same as enter_from_user_mode().
-        *
-        * guest_exit_irqoff() restores host context and reinstates RCU if
-        * enabled and required.
-        *
-        * This needs to be done before the below as native_read_msr()
-        * contains a tracepoint and x86_spec_ctrl_restore_host() calls
-        * into world and some more.
-        */
-       lockdep_hardirqs_off(CALLER_ADDR0);
-       guest_exit_irqoff();
-
-       instrumentation_begin();
-       trace_hardirqs_off_finish();
-       instrumentation_end();
+       kvm_guest_exit_irqoff();
 }
 
 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -4007,8 +3967,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
                             guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
 
-       /* Check again if INVPCID interception if required */
-       svm_check_invpcid(svm);
+       svm_recalc_instruction_intercepts(vcpu, svm);
 
        /* For sev guests, the memory encryption bit is not reserved in CR3.  */
        if (sev_guest(vcpu->kvm)) {