Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.c
index 88a4ff2..fa3c3ab 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/spec-ctrl.h>
 #include <asm/vmx.h>
 
+#include <trace/events/ipi.h>
+
 #include "capabilities.h"
 #include "cpuid.h"
 #include "hyperv.h"
@@ -1290,8 +1292,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
        u16 fs_sel, gs_sel;
        int i;
 
-       vmx->req_immediate_exit = false;
-
        /*
         * Note that guest MSRs to be saved/restored can also be changed
         * when guest state is loaded. This happens when guest transitions
@@ -5575,10 +5575,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
 
        reg = DEBUG_REG_ACCESS_REG(exit_qualification);
        if (exit_qualification & TYPE_MOV_FROM_DR) {
-               unsigned long val;
-
-               kvm_get_dr(vcpu, dr, &val);
-               kvm_register_write(vcpu, reg, val);
+               kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
                err = 0;
        } else {
                err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
@@ -6000,22 +5997,46 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
+static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
+                                                  bool force_immediate_exit)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (!vmx->req_immediate_exit &&
-           !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
-               kvm_lapic_expired_hv_timer(vcpu);
+       /*
+        * In the *extremely* unlikely scenario that this is a spurious VM-Exit
+        * due to the timer expiring while it was "soft" disabled, just eat the
+        * exit and re-enter the guest.
+        */
+       if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
                return EXIT_FASTPATH_REENTER_GUEST;
-       }
 
-       return EXIT_FASTPATH_NONE;
+       /*
+        * If the timer expired because KVM used it to force an immediate exit,
+        * then mission accomplished.
+        */
+       if (force_immediate_exit)
+               return EXIT_FASTPATH_EXIT_HANDLED;
+
+       /*
+        * If L2 is active, go down the slow path as emulating the guest timer
+        * expiration likely requires synthesizing a nested VM-Exit.
+        */
+       if (is_guest_mode(vcpu))
+               return EXIT_FASTPATH_NONE;
+
+       kvm_lapic_expired_hv_timer(vcpu);
+       return EXIT_FASTPATH_REENTER_GUEST;
 }
 
 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
 {
-       handle_fastpath_preemption_timer(vcpu);
+       /*
+        * This non-fastpath handler is reached if and only if the preemption
+        * timer was being used to emulate a guest timer while L2 is active.
+        * All other scenarios are supposed to be handled in the fastpath.
+        */
+       WARN_ON_ONCE(!is_guest_mode(vcpu));
+       kvm_lapic_expired_hv_timer(vcpu);
        return 1;
 }
 
@@ -7155,13 +7176,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 tscl;
        u32 delta_tsc;
 
-       if (vmx->req_immediate_exit) {
+       if (force_immediate_exit) {
                vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
                vmx->loaded_vmcs->hv_timer_soft_disabled = false;
        } else if (vmx->hv_deadline_tsc != -1) {
@@ -7214,13 +7235,22 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
        barrier_nospec();
 }
 
-static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
+                                            bool force_immediate_exit)
 {
+       /*
+        * If L2 is active, some VMX preemption timer exits can be handled in
+        * the fastpath even, all other exits must use the slow path.
+        */
+       if (is_guest_mode(vcpu) &&
+           to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER)
+               return EXIT_FASTPATH_NONE;
+
        switch (to_vmx(vcpu)->exit_reason.basic) {
        case EXIT_REASON_MSR_WRITE:
                return handle_fastpath_set_msr_irqoff(vcpu);
        case EXIT_REASON_PREEMPTION_TIMER:
-               return handle_fastpath_preemption_timer(vcpu);
+               return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
        default:
                return EXIT_FASTPATH_NONE;
        }
@@ -7280,7 +7310,7 @@ out:
        guest_state_exit_irqoff();
 }
 
-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
@@ -7307,7 +7337,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                return EXIT_FASTPATH_NONE;
        }
 
-       trace_kvm_entry(vcpu);
+       trace_kvm_entry(vcpu, force_immediate_exit);
 
        if (vmx->ple_window_dirty) {
                vmx->ple_window_dirty = false;
@@ -7366,7 +7396,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmx_passthrough_lbr_msrs(vcpu);
 
        if (enable_preemption_timer)
-               vmx_update_hv_timer(vcpu);
+               vmx_update_hv_timer(vcpu, force_immediate_exit);
+       else if (force_immediate_exit)
+               smp_send_reschedule(vcpu->cpu);
 
        kvm_wait_lapic_expire(vcpu);
 
@@ -7430,10 +7462,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_recover_nmi_blocking(vmx);
        vmx_complete_interrupts(vmx);
 
-       if (is_guest_mode(vcpu))
-               return EXIT_FASTPATH_NONE;
-
-       return vmx_exit_handlers_fastpath(vcpu);
+       return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
 }
 
 static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
@@ -7913,11 +7942,6 @@ static __init void vmx_set_cpu_caps(void)
                kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
 }
 
-static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
-{
-       to_vmx(vcpu)->req_immediate_exit = true;
-}
-
 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
                                  struct x86_instruction_info *info)
 {
@@ -8370,8 +8394,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .check_intercept = vmx_check_intercept,
        .handle_exit_irqoff = vmx_handle_exit_irqoff,
 
-       .request_immediate_exit = vmx_request_immediate_exit,
-
        .sched_in = vmx_sched_in,
 
        .cpu_dirty_log_size = PML_ENTITY_NUM,
@@ -8631,7 +8653,6 @@ static __init int hardware_setup(void)
        if (!enable_preemption_timer) {
                vmx_x86_ops.set_hv_timer = NULL;
                vmx_x86_ops.cancel_hv_timer = NULL;
-               vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
        }
 
        kvm_caps.supported_mce_cap |= MCG_LMCE_P;