KVM: X86: TSCDEADLINE MSR emulation fastpath
authorWanpeng Li <wanpengli@tencent.com>
Tue, 28 Apr 2020 06:23:28 +0000 (14:23 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 15 May 2020 16:26:21 +0000 (12:26 -0400)
This patch implements a fast path for emulation of writes to the TSCDEADLINE
MSR.  Besides shortcutting various housekeeping tasks in the vCPU loop,
the fast path can also deliver the timer interrupt directly without going
through KVM_REQ_PENDING_TIMER because it runs in vCPU context.

Tested-by: Haiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1588055009-12677-7-git-send-email-wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/x86.c

index 73e51ab..2a3b574 100644 (file)
@@ -1600,7 +1600,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
        }
 }
 
-static void apic_timer_expired(struct kvm_lapic *apic)
+static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
 {
        struct kvm_vcpu *vcpu = apic->vcpu;
        struct kvm_timer *ktimer = &apic->lapic_timer;
@@ -1611,6 +1611,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
        if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
                ktimer->expired_tscdeadline = ktimer->tscdeadline;
 
+       if (!from_timer_fn && vcpu->arch.apicv_active) {
+               WARN_ON(kvm_get_running_vcpu() != vcpu);
+               kvm_apic_inject_pending_timer_irqs(apic);
+               return;
+       }
+
        if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
                if (apic->lapic_timer.timer_advance_ns)
                        __kvm_wait_lapic_expire(vcpu);
@@ -1650,7 +1656,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
                expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
                hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
        } else
-               apic_timer_expired(apic);
+               apic_timer_expired(apic, false);
 
        local_irq_restore(flags);
 }
@@ -1758,7 +1764,7 @@ static void start_sw_period(struct kvm_lapic *apic)
 
        if (ktime_after(ktime_get(),
                        apic->lapic_timer.target_expiration)) {
-               apic_timer_expired(apic);
+               apic_timer_expired(apic, false);
 
                if (apic_lvtt_oneshot(apic))
                        return;
@@ -1820,7 +1826,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
                if (atomic_read(&ktimer->pending)) {
                        cancel_hv_timer(apic);
                } else if (expired) {
-                       apic_timer_expired(apic);
+                       apic_timer_expired(apic, false);
                        cancel_hv_timer(apic);
                }
        }
@@ -1870,7 +1876,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
                goto out;
        WARN_ON(rcuwait_active(&vcpu->wait));
        cancel_hv_timer(apic);
-       apic_timer_expired(apic);
+       apic_timer_expired(apic, false);
 
        if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
                advance_periodic_target_expiration(apic);
@@ -2376,7 +2382,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
        struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
        struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
 
-       apic_timer_expired(apic);
+       apic_timer_expired(apic, true);
 
        if (lapic_is_periodic(apic)) {
                advance_periodic_target_expiration(apic);
index 71749fc..7fb7c1a 100644 (file)
@@ -1608,6 +1608,15 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
        return 1;
 }
 
+static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
+{
+       if (!kvm_can_use_hv_timer(vcpu))
+               return 1;
+
+       kvm_set_lapic_tscdeadline_msr(vcpu, data);
+       return 0;
+}
+
 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
 {
        u32 msr = kvm_rcx_read(vcpu);
@@ -1622,6 +1631,13 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
                        ret = EXIT_FASTPATH_EXIT_HANDLED;
                }
                break;
+       case MSR_IA32_TSCDEADLINE:
+               data = kvm_read_edx_eax(vcpu);
+               if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
+                       kvm_skip_emulated_instruction(vcpu);
+                       ret = EXIT_FASTPATH_REENTER_GUEST;
+               }
+               break;
        default:
                break;
        }