1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
28 #include <linux/cpu.h>
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
41 /* "Hv#1" signature */
42 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
44 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
46 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
49 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
51 return atomic64_read(&synic->sint[sint]);
54 static inline int synic_get_sint_vector(u64 sint_value)
56 if (sint_value & HV_SYNIC_SINT_MASKED)
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
61 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
73 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
88 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
91 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
93 int auto_eoi_old, auto_eoi_new;
95 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
98 if (synic_has_vector_connected(synic, vector))
99 __set_bit(vector, synic->vec_bitmap);
101 __clear_bit(vector, synic->vec_bitmap);
103 auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256);
105 if (synic_has_vector_auto_eoi(synic, vector))
106 __set_bit(vector, synic->auto_eoi_bitmap);
108 __clear_bit(vector, synic->auto_eoi_bitmap);
110 auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256);
112 if (!!auto_eoi_old == !!auto_eoi_new)
118 down_write(&vcpu->kvm->arch.apicv_update_lock);
121 hv->synic_auto_eoi_used++;
123 hv->synic_auto_eoi_used--;
125 __kvm_request_apicv_update(vcpu->kvm,
126 !hv->synic_auto_eoi_used,
127 APICV_INHIBIT_REASON_HYPERV);
129 up_write(&vcpu->kvm->arch.apicv_update_lock);
132 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
135 int vector, old_vector;
138 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
139 masked = data & HV_SYNIC_SINT_MASKED;
142 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
143 * default '0x10000' value on boot and this should not #GP. We need to
144 * allow zero-initing the register from host as well.
146 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
149 * Guest may configure multiple SINTs to use the same vector, so
150 * we maintain a bitmap of vectors handled by synic, and a
151 * bitmap of vectors with auto-eoi behavior. The bitmaps are
152 * updated here, and atomically queried on fast paths.
154 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
156 atomic64_set(&synic->sint[sint], data);
158 synic_update_vector(synic, old_vector);
160 synic_update_vector(synic, vector);
162 /* Load SynIC vectors into EOI exit bitmap */
163 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
167 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
169 struct kvm_vcpu *vcpu = NULL;
172 if (vpidx >= KVM_MAX_VCPUS)
175 vcpu = kvm_get_vcpu(kvm, vpidx);
176 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
178 kvm_for_each_vcpu(i, vcpu, kvm)
179 if (kvm_hv_get_vpindex(vcpu) == vpidx)
184 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
186 struct kvm_vcpu *vcpu;
187 struct kvm_vcpu_hv_synic *synic;
189 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
190 if (!vcpu || !to_hv_vcpu(vcpu))
192 synic = to_hv_synic(vcpu);
193 return (synic->active) ? synic : NULL;
196 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
198 struct kvm *kvm = vcpu->kvm;
199 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
200 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
201 struct kvm_vcpu_hv_stimer *stimer;
204 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
206 /* Try to deliver pending Hyper-V SynIC timers messages */
207 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
208 stimer = &hv_vcpu->stimer[idx];
209 if (stimer->msg_pending && stimer->config.enable &&
210 !stimer->config.direct_mode &&
211 stimer->config.sintx == sint)
212 stimer_mark_pending(stimer, false);
215 idx = srcu_read_lock(&kvm->irq_srcu);
216 gsi = atomic_read(&synic->sint_to_gsi[sint]);
218 kvm_notify_acked_gsi(kvm, gsi);
219 srcu_read_unlock(&kvm->irq_srcu, idx);
222 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
224 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
225 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
227 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
228 hv_vcpu->exit.u.synic.msr = msr;
229 hv_vcpu->exit.u.synic.control = synic->control;
230 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
231 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
233 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
236 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
237 u32 msr, u64 data, bool host)
239 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
242 if (!synic->active && !host)
245 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
249 case HV_X64_MSR_SCONTROL:
250 synic->control = data;
252 synic_exit(synic, msr);
254 case HV_X64_MSR_SVERSION:
259 synic->version = data;
261 case HV_X64_MSR_SIEFP:
262 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
263 !synic->dont_zero_synic_pages)
264 if (kvm_clear_guest(vcpu->kvm,
265 data & PAGE_MASK, PAGE_SIZE)) {
269 synic->evt_page = data;
271 synic_exit(synic, msr);
273 case HV_X64_MSR_SIMP:
274 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
275 !synic->dont_zero_synic_pages)
276 if (kvm_clear_guest(vcpu->kvm,
277 data & PAGE_MASK, PAGE_SIZE)) {
281 synic->msg_page = data;
283 synic_exit(synic, msr);
285 case HV_X64_MSR_EOM: {
288 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
289 kvm_hv_notify_acked_sint(vcpu, i);
292 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
293 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
302 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
304 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
306 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
307 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
310 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
312 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
314 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
315 hv->hv_syndbg.control.status =
316 vcpu->run->hyperv.u.syndbg.status;
320 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
322 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
323 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
325 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
326 hv_vcpu->exit.u.syndbg.msr = msr;
327 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
328 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
329 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
330 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
331 vcpu->arch.complete_userspace_io =
332 kvm_hv_syndbg_complete_userspace;
334 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
337 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
339 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
341 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
344 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
345 to_hv_vcpu(vcpu)->vp_index, msr, data);
347 case HV_X64_MSR_SYNDBG_CONTROL:
348 syndbg->control.control = data;
350 syndbg_exit(vcpu, msr);
352 case HV_X64_MSR_SYNDBG_STATUS:
353 syndbg->control.status = data;
355 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
356 syndbg->control.send_page = data;
358 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
359 syndbg->control.recv_page = data;
361 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
362 syndbg->control.pending_page = data;
364 syndbg_exit(vcpu, msr);
366 case HV_X64_MSR_SYNDBG_OPTIONS:
367 syndbg->options = data;
376 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
378 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
380 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
384 case HV_X64_MSR_SYNDBG_CONTROL:
385 *pdata = syndbg->control.control;
387 case HV_X64_MSR_SYNDBG_STATUS:
388 *pdata = syndbg->control.status;
390 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
391 *pdata = syndbg->control.send_page;
393 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
394 *pdata = syndbg->control.recv_page;
396 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
397 *pdata = syndbg->control.pending_page;
399 case HV_X64_MSR_SYNDBG_OPTIONS:
400 *pdata = syndbg->options;
406 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
411 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
416 if (!synic->active && !host)
421 case HV_X64_MSR_SCONTROL:
422 *pdata = synic->control;
424 case HV_X64_MSR_SVERSION:
425 *pdata = synic->version;
427 case HV_X64_MSR_SIEFP:
428 *pdata = synic->evt_page;
430 case HV_X64_MSR_SIMP:
431 *pdata = synic->msg_page;
436 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
437 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
446 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
448 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
449 struct kvm_lapic_irq irq;
452 if (sint >= ARRAY_SIZE(synic->sint))
455 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
459 memset(&irq, 0, sizeof(irq));
460 irq.shorthand = APIC_DEST_SELF;
461 irq.dest_mode = APIC_DEST_PHYSICAL;
462 irq.delivery_mode = APIC_DM_FIXED;
466 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
467 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
471 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
473 struct kvm_vcpu_hv_synic *synic;
475 synic = synic_get(kvm, vpidx);
479 return synic_set_irq(synic, sint);
482 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
484 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
487 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
489 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
490 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
491 kvm_hv_notify_acked_sint(vcpu, i);
494 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
496 struct kvm_vcpu_hv_synic *synic;
498 synic = synic_get(kvm, vpidx);
502 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
505 atomic_set(&synic->sint_to_gsi[sint], gsi);
509 void kvm_hv_irq_routing_update(struct kvm *kvm)
511 struct kvm_irq_routing_table *irq_rt;
512 struct kvm_kernel_irq_routing_entry *e;
515 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
516 lockdep_is_held(&kvm->irq_lock));
518 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
519 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
520 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
521 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
522 e->hv_sint.sint, gsi);
527 static void synic_init(struct kvm_vcpu_hv_synic *synic)
531 memset(synic, 0, sizeof(*synic));
532 synic->version = HV_SYNIC_VERSION_1;
533 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
534 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
535 atomic_set(&synic->sint_to_gsi[i], -1);
539 static u64 get_time_ref_counter(struct kvm *kvm)
541 struct kvm_hv *hv = to_kvm_hv(kvm);
542 struct kvm_vcpu *vcpu;
546 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
547 * is broken, disabled or being updated.
549 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
550 return div_u64(get_kvmclock_ns(kvm), 100);
552 vcpu = kvm_get_vcpu(kvm, 0);
553 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
554 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
555 + hv->tsc_ref.tsc_offset;
558 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
561 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
563 set_bit(stimer->index,
564 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
565 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
570 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
572 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
574 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
577 hrtimer_cancel(&stimer->timer);
578 clear_bit(stimer->index,
579 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
580 stimer->msg_pending = false;
581 stimer->exp_time = 0;
584 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
586 struct kvm_vcpu_hv_stimer *stimer;
588 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
589 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
591 stimer_mark_pending(stimer, true);
593 return HRTIMER_NORESTART;
597 * stimer_start() assumptions:
598 * a) stimer->count is not equal to 0
599 * b) stimer->config has HV_STIMER_ENABLE flag
601 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
606 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
607 ktime_now = ktime_get();
609 if (stimer->config.periodic) {
610 if (stimer->exp_time) {
611 if (time_now >= stimer->exp_time) {
614 div64_u64_rem(time_now - stimer->exp_time,
615 stimer->count, &remainder);
617 time_now + (stimer->count - remainder);
620 stimer->exp_time = time_now + stimer->count;
622 trace_kvm_hv_stimer_start_periodic(
623 hv_stimer_to_vcpu(stimer)->vcpu_id,
625 time_now, stimer->exp_time);
627 hrtimer_start(&stimer->timer,
628 ktime_add_ns(ktime_now,
629 100 * (stimer->exp_time - time_now)),
633 stimer->exp_time = stimer->count;
634 if (time_now >= stimer->count) {
636 * Expire timer according to Hypervisor Top-Level Functional
637 * specification v4(15.3.1):
638 * "If a one shot is enabled and the specified count is in
639 * the past, it will expire immediately."
641 stimer_mark_pending(stimer, false);
645 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
647 time_now, stimer->count);
649 hrtimer_start(&stimer->timer,
650 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
655 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
658 union hv_stimer_config new_config = {.as_uint64 = config},
659 old_config = {.as_uint64 = stimer->config.as_uint64};
660 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
661 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
662 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
664 if (!synic->active && !host)
667 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
668 !(hv_vcpu->cpuid_cache.features_edx &
669 HV_STIMER_DIRECT_MODE_AVAILABLE)))
672 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
673 stimer->index, config, host);
675 stimer_cleanup(stimer);
676 if (old_config.enable &&
677 !new_config.direct_mode && new_config.sintx == 0)
678 new_config.enable = 0;
679 stimer->config.as_uint64 = new_config.as_uint64;
681 if (stimer->config.enable)
682 stimer_mark_pending(stimer, false);
687 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
690 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
691 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
693 if (!synic->active && !host)
696 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
697 stimer->index, count, host);
699 stimer_cleanup(stimer);
700 stimer->count = count;
701 if (stimer->count == 0)
702 stimer->config.enable = 0;
703 else if (stimer->config.auto_enable)
704 stimer->config.enable = 1;
706 if (stimer->config.enable)
707 stimer_mark_pending(stimer, false);
712 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
714 *pconfig = stimer->config.as_uint64;
718 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
720 *pcount = stimer->count;
724 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
725 struct hv_message *src_msg, bool no_retry)
727 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
728 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
730 struct hv_message_header hv_hdr;
733 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
736 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
739 * Strictly following the spec-mandated ordering would assume setting
740 * .msg_pending before checking .message_type. However, this function
741 * is only called in vcpu context so the entire update is atomic from
742 * guest POV and thus the exact order here doesn't matter.
744 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
745 msg_off + offsetof(struct hv_message,
746 header.message_type),
747 sizeof(hv_hdr.message_type));
751 if (hv_hdr.message_type != HVMSG_NONE) {
755 hv_hdr.message_flags.msg_pending = 1;
756 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
757 &hv_hdr.message_flags,
759 offsetof(struct hv_message,
760 header.message_flags),
761 sizeof(hv_hdr.message_flags));
767 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
768 sizeof(src_msg->header) +
769 src_msg->header.payload_size);
773 r = synic_set_irq(synic, sint);
781 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
783 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
784 struct hv_message *msg = &stimer->msg;
785 struct hv_timer_message_payload *payload =
786 (struct hv_timer_message_payload *)&msg->u.payload;
789 * To avoid piling up periodic ticks, don't retry message
790 * delivery for them (within "lazy" lost ticks policy).
792 bool no_retry = stimer->config.periodic;
794 payload->expiration_time = stimer->exp_time;
795 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
796 return synic_deliver_msg(to_hv_synic(vcpu),
797 stimer->config.sintx, msg,
801 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
803 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
804 struct kvm_lapic_irq irq = {
805 .delivery_mode = APIC_DM_FIXED,
806 .vector = stimer->config.apic_vector
809 if (lapic_in_kernel(vcpu))
810 return !kvm_apic_set_irq(vcpu, &irq, NULL);
814 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
816 int r, direct = stimer->config.direct_mode;
818 stimer->msg_pending = true;
820 r = stimer_send_msg(stimer);
822 r = stimer_notify_direct(stimer);
823 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
824 stimer->index, direct, r);
826 stimer->msg_pending = false;
827 if (!(stimer->config.periodic))
828 stimer->config.enable = 0;
832 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
834 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
835 struct kvm_vcpu_hv_stimer *stimer;
836 u64 time_now, exp_time;
842 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
843 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
844 stimer = &hv_vcpu->stimer[i];
845 if (stimer->config.enable) {
846 exp_time = stimer->exp_time;
850 get_time_ref_counter(vcpu->kvm);
851 if (time_now >= exp_time)
852 stimer_expiration(stimer);
855 if ((stimer->config.enable) &&
857 if (!stimer->msg_pending)
858 stimer_start(stimer);
860 stimer_cleanup(stimer);
865 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
867 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
873 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
874 stimer_cleanup(&hv_vcpu->stimer[i]);
877 vcpu->arch.hyperv = NULL;
880 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
882 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
887 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
889 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
891 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
893 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
894 struct hv_vp_assist_page *assist_page)
896 if (!kvm_hv_assist_page_enabled(vcpu))
898 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
899 assist_page, sizeof(*assist_page));
901 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
903 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
905 struct hv_message *msg = &stimer->msg;
906 struct hv_timer_message_payload *payload =
907 (struct hv_timer_message_payload *)&msg->u.payload;
909 memset(&msg->header, 0, sizeof(msg->header));
910 msg->header.message_type = HVMSG_TIMER_EXPIRED;
911 msg->header.payload_size = sizeof(*payload);
913 payload->timer_index = stimer->index;
914 payload->expiration_time = 0;
915 payload->delivery_time = 0;
918 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
920 memset(stimer, 0, sizeof(*stimer));
921 stimer->index = timer_index;
922 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
923 stimer->timer.function = stimer_timer_callback;
924 stimer_prepare_msg(stimer);
927 static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
929 struct kvm_vcpu_hv *hv_vcpu;
932 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
936 vcpu->arch.hyperv = hv_vcpu;
937 hv_vcpu->vcpu = vcpu;
939 synic_init(&hv_vcpu->synic);
941 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
942 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
943 stimer_init(&hv_vcpu->stimer[i], i);
945 hv_vcpu->vp_index = vcpu->vcpu_idx;
950 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
952 struct kvm_vcpu_hv_synic *synic;
955 if (!to_hv_vcpu(vcpu)) {
956 r = kvm_hv_vcpu_init(vcpu);
961 synic = to_hv_synic(vcpu);
963 synic->active = true;
964 synic->dont_zero_synic_pages = dont_zero_synic_pages;
965 synic->control = HV_SYNIC_CONTROL_ENABLE;
969 static bool kvm_hv_msr_partition_wide(u32 msr)
974 case HV_X64_MSR_GUEST_OS_ID:
975 case HV_X64_MSR_HYPERCALL:
976 case HV_X64_MSR_REFERENCE_TSC:
977 case HV_X64_MSR_TIME_REF_COUNT:
978 case HV_X64_MSR_CRASH_CTL:
979 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
980 case HV_X64_MSR_RESET:
981 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
982 case HV_X64_MSR_TSC_EMULATION_CONTROL:
983 case HV_X64_MSR_TSC_EMULATION_STATUS:
984 case HV_X64_MSR_SYNDBG_OPTIONS:
985 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
993 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
995 struct kvm_hv *hv = to_kvm_hv(kvm);
996 size_t size = ARRAY_SIZE(hv->hv_crash_param);
998 if (WARN_ON_ONCE(index >= size))
1001 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1005 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1007 struct kvm_hv *hv = to_kvm_hv(kvm);
1009 *pdata = hv->hv_crash_ctl;
1013 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1015 struct kvm_hv *hv = to_kvm_hv(kvm);
1017 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1022 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1024 struct kvm_hv *hv = to_kvm_hv(kvm);
1025 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1027 if (WARN_ON_ONCE(index >= size))
1030 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1035 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1036 * between them is possible:
1039 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1043 * nsec/100 = ticks * scale / 2^64 + offset
1045 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1046 * By dividing the kvmclock formula by 100 and equating what's left we get:
1047 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1048 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1049 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1051 * Now expand the kvmclock formula and divide by 100:
1052 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1053 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1055 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1056 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1057 * + system_time / 100
1059 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1060 * nsec/100 = ticks * scale / 2^64
1061 * - tsc_timestamp * scale / 2^64
1062 * + system_time / 100
1064 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1065 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1067 * These two equivalencies are implemented in this function.
1069 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1070 struct ms_hyperv_tsc_page *tsc_ref)
1074 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1078 * check if scale would overflow, if so we use the time ref counter
1079 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1080 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1081 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1083 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1084 if (hv_clock->tsc_to_system_mul >= max_mul)
1088 * Otherwise compute the scale and offset according to the formulas
1091 tsc_ref->tsc_scale =
1092 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1093 hv_clock->tsc_to_system_mul,
1096 tsc_ref->tsc_offset = hv_clock->system_time;
1097 do_div(tsc_ref->tsc_offset, 100);
1098 tsc_ref->tsc_offset -=
1099 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1104 * Don't touch TSC page values if the guest has opted for TSC emulation after
1105 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1106 * access emulation and Hyper-V is known to expect the values in TSC page to
1107 * stay constant before TSC access emulation is disabled from guest side
1108 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1109 * frequency and guest visible TSC value across migration (and prevent it when
1110 * TSC scaling is unsupported).
1112 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1114 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1115 hv->hv_tsc_emulation_control;
1118 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1119 struct pvclock_vcpu_time_info *hv_clock)
1121 struct kvm_hv *hv = to_kvm_hv(kvm);
1125 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1126 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1128 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1129 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1132 mutex_lock(&hv->hv_lock);
1133 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1136 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1138 * Because the TSC parameters only vary when there is a
1139 * change in the master clock, do not bother with caching.
1141 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1142 &tsc_seq, sizeof(tsc_seq))))
1145 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1146 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1149 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1154 * While we're computing and writing the parameters, force the
1155 * guest to use the time reference count MSR.
1157 hv->tsc_ref.tsc_sequence = 0;
1158 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1159 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1162 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1165 /* Ensure sequence is zero before writing the rest of the struct. */
1167 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1171 * Now switch to the TSC page mechanism by writing the sequence.
1174 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1177 /* Write the struct entirely before the non-zero sequence. */
1180 hv->tsc_ref.tsc_sequence = tsc_seq;
1181 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1182 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1185 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1189 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1191 mutex_unlock(&hv->hv_lock);
1194 void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1196 struct kvm_hv *hv = to_kvm_hv(kvm);
1200 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1201 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
1202 tsc_page_update_unsafe(hv))
1205 mutex_lock(&hv->hv_lock);
1207 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1210 /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
1211 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1212 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1214 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1216 hv->tsc_ref.tsc_sequence = 0;
1219 * Take the srcu lock as memslots will be accessed to check the gfn
1220 * cache generation against the memslots generation.
1222 idx = srcu_read_lock(&kvm->srcu);
1223 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1224 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1225 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1226 srcu_read_unlock(&kvm->srcu, idx);
1229 mutex_unlock(&hv->hv_lock);
1233 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1235 if (!hv_vcpu->enforce_cpuid)
1239 case HV_X64_MSR_GUEST_OS_ID:
1240 case HV_X64_MSR_HYPERCALL:
1241 return hv_vcpu->cpuid_cache.features_eax &
1242 HV_MSR_HYPERCALL_AVAILABLE;
1243 case HV_X64_MSR_VP_RUNTIME:
1244 return hv_vcpu->cpuid_cache.features_eax &
1245 HV_MSR_VP_RUNTIME_AVAILABLE;
1246 case HV_X64_MSR_TIME_REF_COUNT:
1247 return hv_vcpu->cpuid_cache.features_eax &
1248 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1249 case HV_X64_MSR_VP_INDEX:
1250 return hv_vcpu->cpuid_cache.features_eax &
1251 HV_MSR_VP_INDEX_AVAILABLE;
1252 case HV_X64_MSR_RESET:
1253 return hv_vcpu->cpuid_cache.features_eax &
1254 HV_MSR_RESET_AVAILABLE;
1255 case HV_X64_MSR_REFERENCE_TSC:
1256 return hv_vcpu->cpuid_cache.features_eax &
1257 HV_MSR_REFERENCE_TSC_AVAILABLE;
1258 case HV_X64_MSR_SCONTROL:
1259 case HV_X64_MSR_SVERSION:
1260 case HV_X64_MSR_SIEFP:
1261 case HV_X64_MSR_SIMP:
1262 case HV_X64_MSR_EOM:
1263 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1264 return hv_vcpu->cpuid_cache.features_eax &
1265 HV_MSR_SYNIC_AVAILABLE;
1266 case HV_X64_MSR_STIMER0_CONFIG:
1267 case HV_X64_MSR_STIMER1_CONFIG:
1268 case HV_X64_MSR_STIMER2_CONFIG:
1269 case HV_X64_MSR_STIMER3_CONFIG:
1270 case HV_X64_MSR_STIMER0_COUNT:
1271 case HV_X64_MSR_STIMER1_COUNT:
1272 case HV_X64_MSR_STIMER2_COUNT:
1273 case HV_X64_MSR_STIMER3_COUNT:
1274 return hv_vcpu->cpuid_cache.features_eax &
1275 HV_MSR_SYNTIMER_AVAILABLE;
1276 case HV_X64_MSR_EOI:
1277 case HV_X64_MSR_ICR:
1278 case HV_X64_MSR_TPR:
1279 case HV_X64_MSR_VP_ASSIST_PAGE:
1280 return hv_vcpu->cpuid_cache.features_eax &
1281 HV_MSR_APIC_ACCESS_AVAILABLE;
1283 case HV_X64_MSR_TSC_FREQUENCY:
1284 case HV_X64_MSR_APIC_FREQUENCY:
1285 return hv_vcpu->cpuid_cache.features_eax &
1286 HV_ACCESS_FREQUENCY_MSRS;
1287 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1288 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1289 case HV_X64_MSR_TSC_EMULATION_STATUS:
1290 return hv_vcpu->cpuid_cache.features_eax &
1291 HV_ACCESS_REENLIGHTENMENT;
1292 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1293 case HV_X64_MSR_CRASH_CTL:
1294 return hv_vcpu->cpuid_cache.features_edx &
1295 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1296 case HV_X64_MSR_SYNDBG_OPTIONS:
1297 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1298 return hv_vcpu->cpuid_cache.features_edx &
1299 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1307 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1310 struct kvm *kvm = vcpu->kvm;
1311 struct kvm_hv *hv = to_kvm_hv(kvm);
1313 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1317 case HV_X64_MSR_GUEST_OS_ID:
1318 hv->hv_guest_os_id = data;
1319 /* setting guest os id to zero disables hypercall page */
1320 if (!hv->hv_guest_os_id)
1321 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1323 case HV_X64_MSR_HYPERCALL: {
1328 /* if guest os id is not set hypercall should remain disabled */
1329 if (!hv->hv_guest_os_id)
1331 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1332 hv->hv_hypercall = data;
1337 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1338 * the same way Xen itself does, by setting the bit 31 of EAX
1339 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1340 * going to be clobbered on 64-bit.
1342 if (kvm_xen_hypercall_enabled(kvm)) {
1343 /* orl $0x80000000, %eax */
1344 instructions[i++] = 0x0d;
1345 instructions[i++] = 0x00;
1346 instructions[i++] = 0x00;
1347 instructions[i++] = 0x00;
1348 instructions[i++] = 0x80;
1351 /* vmcall/vmmcall */
1352 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1356 ((unsigned char *)instructions)[i++] = 0xc3;
1358 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1359 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1361 hv->hv_hypercall = data;
1364 case HV_X64_MSR_REFERENCE_TSC:
1365 hv->hv_tsc_page = data;
1366 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1368 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1370 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1371 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1373 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1376 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1377 return kvm_hv_msr_set_crash_data(kvm,
1378 msr - HV_X64_MSR_CRASH_P0,
1380 case HV_X64_MSR_CRASH_CTL:
1382 return kvm_hv_msr_set_crash_ctl(kvm, data);
1384 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1385 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1386 hv->hv_crash_param[0],
1387 hv->hv_crash_param[1],
1388 hv->hv_crash_param[2],
1389 hv->hv_crash_param[3],
1390 hv->hv_crash_param[4]);
1392 /* Send notification about crash to user space */
1393 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1396 case HV_X64_MSR_RESET:
1398 vcpu_debug(vcpu, "hyper-v reset requested\n");
1399 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1402 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1403 hv->hv_reenlightenment_control = data;
1405 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1406 hv->hv_tsc_emulation_control = data;
1408 case HV_X64_MSR_TSC_EMULATION_STATUS:
1412 hv->hv_tsc_emulation_status = data;
1414 case HV_X64_MSR_TIME_REF_COUNT:
1415 /* read-only, but still ignore it if host-initiated */
1419 case HV_X64_MSR_SYNDBG_OPTIONS:
1420 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1421 return syndbg_set_msr(vcpu, msr, data, host);
1423 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1430 /* Calculate cpu time spent by current task in 100ns units */
1431 static u64 current_task_runtime_100ns(void)
1435 task_cputime_adjusted(current, &utime, &stime);
1437 return div_u64(utime + stime, 100);
1440 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1442 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1444 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1448 case HV_X64_MSR_VP_INDEX: {
1449 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1450 u32 new_vp_index = (u32)data;
1452 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1455 if (new_vp_index == hv_vcpu->vp_index)
1459 * The VP index is initialized to vcpu_index by
1460 * kvm_hv_vcpu_postcreate so they initially match. Now the
1461 * VP index is changing, adjust num_mismatched_vp_indexes if
1462 * it now matches or no longer matches vcpu_idx.
1464 if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1465 atomic_inc(&hv->num_mismatched_vp_indexes);
1466 else if (new_vp_index == vcpu->vcpu_idx)
1467 atomic_dec(&hv->num_mismatched_vp_indexes);
1469 hv_vcpu->vp_index = new_vp_index;
1472 case HV_X64_MSR_VP_ASSIST_PAGE: {
1476 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1477 hv_vcpu->hv_vapic = data;
1478 if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1482 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1483 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1484 if (kvm_is_error_hva(addr))
1488 * Clear apic_assist portion of struct hv_vp_assist_page
1489 * only, there can be valuable data in the rest which needs
1490 * to be preserved e.g. on migration.
1492 if (__put_user(0, (u32 __user *)addr))
1494 hv_vcpu->hv_vapic = data;
1495 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1496 if (kvm_lapic_set_pv_eoi(vcpu,
1497 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1498 sizeof(struct hv_vp_assist_page)))
1502 case HV_X64_MSR_EOI:
1503 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1504 case HV_X64_MSR_ICR:
1505 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1506 case HV_X64_MSR_TPR:
1507 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1508 case HV_X64_MSR_VP_RUNTIME:
1511 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1513 case HV_X64_MSR_SCONTROL:
1514 case HV_X64_MSR_SVERSION:
1515 case HV_X64_MSR_SIEFP:
1516 case HV_X64_MSR_SIMP:
1517 case HV_X64_MSR_EOM:
1518 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1519 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1520 case HV_X64_MSR_STIMER0_CONFIG:
1521 case HV_X64_MSR_STIMER1_CONFIG:
1522 case HV_X64_MSR_STIMER2_CONFIG:
1523 case HV_X64_MSR_STIMER3_CONFIG: {
1524 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1526 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1529 case HV_X64_MSR_STIMER0_COUNT:
1530 case HV_X64_MSR_STIMER1_COUNT:
1531 case HV_X64_MSR_STIMER2_COUNT:
1532 case HV_X64_MSR_STIMER3_COUNT: {
1533 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1535 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1538 case HV_X64_MSR_TSC_FREQUENCY:
1539 case HV_X64_MSR_APIC_FREQUENCY:
1540 /* read-only, but still ignore it if host-initiated */
1545 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1553 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1557 struct kvm *kvm = vcpu->kvm;
1558 struct kvm_hv *hv = to_kvm_hv(kvm);
1560 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1564 case HV_X64_MSR_GUEST_OS_ID:
1565 data = hv->hv_guest_os_id;
1567 case HV_X64_MSR_HYPERCALL:
1568 data = hv->hv_hypercall;
1570 case HV_X64_MSR_TIME_REF_COUNT:
1571 data = get_time_ref_counter(kvm);
1573 case HV_X64_MSR_REFERENCE_TSC:
1574 data = hv->hv_tsc_page;
1576 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1577 return kvm_hv_msr_get_crash_data(kvm,
1578 msr - HV_X64_MSR_CRASH_P0,
1580 case HV_X64_MSR_CRASH_CTL:
1581 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1582 case HV_X64_MSR_RESET:
1585 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1586 data = hv->hv_reenlightenment_control;
1588 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1589 data = hv->hv_tsc_emulation_control;
1591 case HV_X64_MSR_TSC_EMULATION_STATUS:
1592 data = hv->hv_tsc_emulation_status;
1594 case HV_X64_MSR_SYNDBG_OPTIONS:
1595 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1596 return syndbg_get_msr(vcpu, msr, pdata, host);
1598 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1606 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1610 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1612 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1616 case HV_X64_MSR_VP_INDEX:
1617 data = hv_vcpu->vp_index;
1619 case HV_X64_MSR_EOI:
1620 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1621 case HV_X64_MSR_ICR:
1622 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1623 case HV_X64_MSR_TPR:
1624 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1625 case HV_X64_MSR_VP_ASSIST_PAGE:
1626 data = hv_vcpu->hv_vapic;
1628 case HV_X64_MSR_VP_RUNTIME:
1629 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1631 case HV_X64_MSR_SCONTROL:
1632 case HV_X64_MSR_SVERSION:
1633 case HV_X64_MSR_SIEFP:
1634 case HV_X64_MSR_SIMP:
1635 case HV_X64_MSR_EOM:
1636 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1637 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1638 case HV_X64_MSR_STIMER0_CONFIG:
1639 case HV_X64_MSR_STIMER1_CONFIG:
1640 case HV_X64_MSR_STIMER2_CONFIG:
1641 case HV_X64_MSR_STIMER3_CONFIG: {
1642 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1644 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1647 case HV_X64_MSR_STIMER0_COUNT:
1648 case HV_X64_MSR_STIMER1_COUNT:
1649 case HV_X64_MSR_STIMER2_COUNT:
1650 case HV_X64_MSR_STIMER3_COUNT: {
1651 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1653 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1656 case HV_X64_MSR_TSC_FREQUENCY:
1657 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1659 case HV_X64_MSR_APIC_FREQUENCY:
1660 data = APIC_BUS_FREQUENCY;
1663 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1670 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1672 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1674 if (!host && !vcpu->arch.hyperv_enabled)
1677 if (!to_hv_vcpu(vcpu)) {
1678 if (kvm_hv_vcpu_init(vcpu))
1682 if (kvm_hv_msr_partition_wide(msr)) {
1685 mutex_lock(&hv->hv_lock);
1686 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1687 mutex_unlock(&hv->hv_lock);
1690 return kvm_hv_set_msr(vcpu, msr, data, host);
1693 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1695 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1697 if (!host && !vcpu->arch.hyperv_enabled)
1700 if (!to_hv_vcpu(vcpu)) {
1701 if (kvm_hv_vcpu_init(vcpu))
1705 if (kvm_hv_msr_partition_wide(msr)) {
1708 mutex_lock(&hv->hv_lock);
1709 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1710 mutex_unlock(&hv->hv_lock);
1713 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1716 static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1717 u64 valid_bank_mask, unsigned long *vcpu_mask)
1719 struct kvm_hv *hv = to_kvm_hv(kvm);
1720 bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1721 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1722 struct kvm_vcpu *vcpu;
1723 int bank, sbank = 0;
1727 BUILD_BUG_ON(sizeof(vp_bitmap) >
1728 sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1731 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1732 * fill a temporary buffer and manually test each vCPU's VP index.
1734 if (likely(!has_mismatch))
1735 bitmap = (u64 *)vcpu_mask;
1740 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1741 * having a '1' for each bank that exists in sparse_banks. Sets must
1742 * be in ascending order, i.e. bank0..bankN.
1744 memset(bitmap, 0, sizeof(vp_bitmap));
1745 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1746 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1747 bitmap[bank] = sparse_banks[sbank++];
1749 if (likely(!has_mismatch))
1752 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1753 kvm_for_each_vcpu(i, vcpu, kvm) {
1754 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1755 __set_bit(i, vcpu_mask);
1759 struct kvm_hv_hcall {
1769 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1772 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1773 u64 *sparse_banks, gpa_t offset)
1777 if (hc->var_cnt > 64)
1780 /* Ignore banks that cannot possibly contain a legal VP index. */
1781 var_cnt = min_t(u16, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS);
1783 return kvm_read_guest(kvm, hc->ingpa + offset, sparse_banks,
1784 var_cnt * sizeof(*sparse_banks));
1787 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1790 struct kvm *kvm = vcpu->kvm;
1791 struct hv_tlb_flush_ex flush_ex;
1792 struct hv_tlb_flush flush;
1793 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1794 u64 valid_bank_mask;
1795 u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1799 * The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the
1800 * valid mask is a u64. Fail the build if KVM's max allowed number of
1801 * vCPUs (>4096) would exceed this limit, KVM will additional changes
1802 * for Hyper-V support to avoid setting the guest up to fail.
1804 BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > 64);
1808 flush.address_space = hc->ingpa;
1809 flush.flags = hc->outgpa;
1810 flush.processor_mask = sse128_lo(hc->xmm[0]);
1812 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1813 &flush, sizeof(flush))))
1814 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1817 trace_kvm_hv_flush_tlb(flush.processor_mask,
1818 flush.address_space, flush.flags);
1820 valid_bank_mask = BIT_ULL(0);
1821 sparse_banks[0] = flush.processor_mask;
1824 * Work around possible WS2012 bug: it sends hypercalls
1825 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1826 * while also expecting us to flush something and crashing if
1827 * we don't. Let's treat processor_mask == 0 same as
1828 * HV_FLUSH_ALL_PROCESSORS.
1830 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1831 flush.processor_mask == 0;
1834 flush_ex.address_space = hc->ingpa;
1835 flush_ex.flags = hc->outgpa;
1836 memcpy(&flush_ex.hv_vp_set,
1837 &hc->xmm[0], sizeof(hc->xmm[0]));
1839 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
1841 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1844 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1845 flush_ex.hv_vp_set.format,
1846 flush_ex.address_space,
1849 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1850 all_cpus = flush_ex.hv_vp_set.format !=
1851 HV_GENERIC_SET_SPARSE_4K;
1853 if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
1854 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1863 if (hc->var_cnt > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
1864 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1865 for (i = 0; i < hc->var_cnt; i += 2) {
1866 sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
1867 sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
1872 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
1873 offsetof(struct hv_tlb_flush_ex,
1874 hv_vp_set.bank_contents)))
1875 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1880 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1881 * analyze it here, flush TLB regardless of the specified address space.
1884 kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
1886 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
1888 kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, vcpu_mask);
1892 /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
1893 return (u64)HV_STATUS_SUCCESS |
1894 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1897 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1898 unsigned long *vcpu_bitmap)
1900 struct kvm_lapic_irq irq = {
1901 .delivery_mode = APIC_DM_FIXED,
1904 struct kvm_vcpu *vcpu;
1907 kvm_for_each_vcpu(i, vcpu, kvm) {
1908 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1911 /* We fail only when APIC is disabled */
1912 kvm_apic_set_irq(vcpu, &irq, NULL);
1916 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1918 struct kvm *kvm = vcpu->kvm;
1919 struct hv_send_ipi_ex send_ipi_ex;
1920 struct hv_send_ipi send_ipi;
1921 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1922 unsigned long valid_bank_mask;
1923 u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1929 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
1931 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1932 sparse_banks[0] = send_ipi.cpu_mask;
1933 vector = send_ipi.vector;
1935 /* 'reserved' part of hv_send_ipi should be 0 */
1936 if (unlikely(hc->ingpa >> 32 != 0))
1937 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1938 sparse_banks[0] = hc->outgpa;
1939 vector = (u32)hc->ingpa;
1942 valid_bank_mask = BIT_ULL(0);
1944 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1946 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
1947 sizeof(send_ipi_ex))))
1948 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1950 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1951 send_ipi_ex.vp_set.format,
1952 send_ipi_ex.vp_set.valid_bank_mask);
1954 vector = send_ipi_ex.vector;
1955 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1956 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1958 if (hc->var_cnt != bitmap_weight(&valid_bank_mask, 64))
1959 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1962 goto check_and_send_ipi;
1967 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
1968 offsetof(struct hv_send_ipi_ex,
1969 vp_set.bank_contents)))
1970 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1974 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1975 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1978 kvm_send_ipi_to_many(kvm, vector, NULL);
1980 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
1982 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1986 return HV_STATUS_SUCCESS;
1989 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1991 struct kvm_cpuid_entry2 *entry;
1992 struct kvm_vcpu_hv *hv_vcpu;
1994 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1995 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
1996 vcpu->arch.hyperv_enabled = true;
1998 vcpu->arch.hyperv_enabled = false;
2002 if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
2005 hv_vcpu = to_hv_vcpu(vcpu);
2007 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
2009 hv_vcpu->cpuid_cache.features_eax = entry->eax;
2010 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2011 hv_vcpu->cpuid_cache.features_edx = entry->edx;
2013 hv_vcpu->cpuid_cache.features_eax = 0;
2014 hv_vcpu->cpuid_cache.features_ebx = 0;
2015 hv_vcpu->cpuid_cache.features_edx = 0;
2018 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
2020 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2021 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2023 hv_vcpu->cpuid_cache.enlightenments_eax = 0;
2024 hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
2027 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
2029 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2031 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
2034 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2036 struct kvm_vcpu_hv *hv_vcpu;
2039 if (!to_hv_vcpu(vcpu)) {
2041 ret = kvm_hv_vcpu_init(vcpu);
2049 hv_vcpu = to_hv_vcpu(vcpu);
2050 hv_vcpu->enforce_cpuid = enforce;
2055 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2059 longmode = is_64_bit_hypercall(vcpu);
2061 kvm_rax_write(vcpu, result);
2063 kvm_rdx_write(vcpu, result >> 32);
2064 kvm_rax_write(vcpu, result & 0xffffffff);
2068 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2070 trace_kvm_hv_hypercall_done(result);
2071 kvm_hv_hypercall_set_result(vcpu, result);
2072 ++vcpu->stat.hypercalls;
2073 return kvm_skip_emulated_instruction(vcpu);
2076 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2078 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2081 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2083 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2084 struct eventfd_ctx *eventfd;
2086 if (unlikely(!hc->fast)) {
2088 gpa_t gpa = hc->ingpa;
2090 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2091 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2092 return HV_STATUS_INVALID_ALIGNMENT;
2094 ret = kvm_vcpu_read_guest(vcpu, gpa,
2095 &hc->ingpa, sizeof(hc->ingpa));
2097 return HV_STATUS_INVALID_ALIGNMENT;
2101 * Per spec, bits 32-47 contain the extra "flag number". However, we
2102 * have no use for it, and in all known usecases it is zero, so just
2103 * report lookup failure if it isn't.
2105 if (hc->ingpa & 0xffff00000000ULL)
2106 return HV_STATUS_INVALID_PORT_ID;
2107 /* remaining bits are reserved-zero */
2108 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2109 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2111 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2113 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2116 return HV_STATUS_INVALID_PORT_ID;
2118 eventfd_signal(eventfd, 1);
2119 return HV_STATUS_SUCCESS;
2122 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2125 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2126 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2127 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2128 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2135 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2140 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2141 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2145 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2147 if (!hv_vcpu->enforce_cpuid)
2151 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2152 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2153 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2154 case HVCALL_POST_MESSAGE:
2155 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2156 case HVCALL_SIGNAL_EVENT:
2157 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2158 case HVCALL_POST_DEBUG_DATA:
2159 case HVCALL_RETRIEVE_DEBUG_DATA:
2160 case HVCALL_RESET_DEBUG_SESSION:
2162 * Return 'true' when SynDBG is disabled so the resulting code
2163 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2165 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2166 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2167 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2168 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2169 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2170 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2173 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2174 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2175 return hv_vcpu->cpuid_cache.enlightenments_eax &
2176 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2177 case HVCALL_SEND_IPI_EX:
2178 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2179 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2182 case HVCALL_SEND_IPI:
2183 return hv_vcpu->cpuid_cache.enlightenments_eax &
2184 HV_X64_CLUSTER_IPI_RECOMMENDED;
2192 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2194 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2195 struct kvm_hv_hcall hc;
2196 u64 ret = HV_STATUS_SUCCESS;
2199 * hypercall generates UD from non zero cpl and real mode
2202 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2203 kvm_queue_exception(vcpu, UD_VECTOR);
2207 #ifdef CONFIG_X86_64
2208 if (is_64_bit_hypercall(vcpu)) {
2209 hc.param = kvm_rcx_read(vcpu);
2210 hc.ingpa = kvm_rdx_read(vcpu);
2211 hc.outgpa = kvm_r8_read(vcpu);
2215 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2216 (kvm_rax_read(vcpu) & 0xffffffff);
2217 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2218 (kvm_rcx_read(vcpu) & 0xffffffff);
2219 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2220 (kvm_rsi_read(vcpu) & 0xffffffff);
2223 hc.code = hc.param & 0xffff;
2224 hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2225 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2226 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2227 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2228 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2230 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2231 hc.rep_idx, hc.ingpa, hc.outgpa);
2233 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2234 ret = HV_STATUS_ACCESS_DENIED;
2235 goto hypercall_complete;
2238 if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2239 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2240 goto hypercall_complete;
2243 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2244 if (unlikely(hv_vcpu->enforce_cpuid &&
2245 !(hv_vcpu->cpuid_cache.features_edx &
2246 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2247 kvm_queue_exception(vcpu, UD_VECTOR);
2251 kvm_hv_hypercall_read_xmm(&hc);
2255 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2256 if (unlikely(hc.rep || hc.var_cnt)) {
2257 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2260 kvm_vcpu_on_spin(vcpu, true);
2262 case HVCALL_SIGNAL_EVENT:
2263 if (unlikely(hc.rep || hc.var_cnt)) {
2264 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2267 ret = kvm_hvcall_signal_event(vcpu, &hc);
2268 if (ret != HV_STATUS_INVALID_PORT_ID)
2270 fallthrough; /* maybe userspace knows this conn_id */
2271 case HVCALL_POST_MESSAGE:
2272 /* don't bother userspace if it has no way to handle it */
2273 if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2274 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2277 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2278 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2279 vcpu->run->hyperv.u.hcall.input = hc.param;
2280 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2281 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2282 vcpu->arch.complete_userspace_io =
2283 kvm_hv_hypercall_complete_userspace;
2285 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2286 if (unlikely(!hc.rep_cnt || hc.rep_idx || hc.var_cnt)) {
2287 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2290 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2292 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2293 if (unlikely(hc.rep || hc.var_cnt)) {
2294 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2297 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2299 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2300 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2301 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2304 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2306 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2307 if (unlikely(hc.rep)) {
2308 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2311 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2313 case HVCALL_SEND_IPI:
2314 if (unlikely(hc.rep || hc.var_cnt)) {
2315 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2318 ret = kvm_hv_send_ipi(vcpu, &hc, false);
2320 case HVCALL_SEND_IPI_EX:
2321 if (unlikely(hc.fast || hc.rep)) {
2322 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2325 ret = kvm_hv_send_ipi(vcpu, &hc, true);
2327 case HVCALL_POST_DEBUG_DATA:
2328 case HVCALL_RETRIEVE_DEBUG_DATA:
2329 if (unlikely(hc.fast)) {
2330 ret = HV_STATUS_INVALID_PARAMETER;
2334 case HVCALL_RESET_DEBUG_SESSION: {
2335 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2337 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2338 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2342 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2343 ret = HV_STATUS_OPERATION_DENIED;
2346 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2347 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2348 vcpu->run->hyperv.u.hcall.input = hc.param;
2349 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2350 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2351 vcpu->arch.complete_userspace_io =
2352 kvm_hv_hypercall_complete_userspace;
2356 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2361 return kvm_hv_hypercall_complete(vcpu, ret);
2364 void kvm_hv_init_vm(struct kvm *kvm)
2366 struct kvm_hv *hv = to_kvm_hv(kvm);
2368 mutex_init(&hv->hv_lock);
2369 idr_init(&hv->conn_to_evt);
2372 void kvm_hv_destroy_vm(struct kvm *kvm)
2374 struct kvm_hv *hv = to_kvm_hv(kvm);
2375 struct eventfd_ctx *eventfd;
2378 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2379 eventfd_ctx_put(eventfd);
2380 idr_destroy(&hv->conn_to_evt);
2383 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2385 struct kvm_hv *hv = to_kvm_hv(kvm);
2386 struct eventfd_ctx *eventfd;
2389 eventfd = eventfd_ctx_fdget(fd);
2390 if (IS_ERR(eventfd))
2391 return PTR_ERR(eventfd);
2393 mutex_lock(&hv->hv_lock);
2394 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2395 GFP_KERNEL_ACCOUNT);
2396 mutex_unlock(&hv->hv_lock);
2403 eventfd_ctx_put(eventfd);
2407 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2409 struct kvm_hv *hv = to_kvm_hv(kvm);
2410 struct eventfd_ctx *eventfd;
2412 mutex_lock(&hv->hv_lock);
2413 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2414 mutex_unlock(&hv->hv_lock);
2419 synchronize_srcu(&kvm->srcu);
2420 eventfd_ctx_put(eventfd);
2424 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2426 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2427 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2430 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2431 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2432 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2435 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2436 struct kvm_cpuid_entry2 __user *entries)
2438 uint16_t evmcs_ver = 0;
2439 struct kvm_cpuid_entry2 cpuid_entries[] = {
2440 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2441 { .function = HYPERV_CPUID_INTERFACE },
2442 { .function = HYPERV_CPUID_VERSION },
2443 { .function = HYPERV_CPUID_FEATURES },
2444 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2445 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2446 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2447 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2448 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2449 { .function = HYPERV_CPUID_NESTED_FEATURES },
2451 int i, nent = ARRAY_SIZE(cpuid_entries);
2453 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2454 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2456 if (cpuid->nent < nent)
2459 if (cpuid->nent > nent)
2462 for (i = 0; i < nent; i++) {
2463 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2466 switch (ent->function) {
2467 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2468 memcpy(signature, "Linux KVM Hv", 12);
2470 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2471 ent->ebx = signature[0];
2472 ent->ecx = signature[1];
2473 ent->edx = signature[2];
2476 case HYPERV_CPUID_INTERFACE:
2477 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2480 case HYPERV_CPUID_VERSION:
2482 * We implement some Hyper-V 2016 functions so let's use
2485 ent->eax = 0x00003839;
2486 ent->ebx = 0x000A0000;
2489 case HYPERV_CPUID_FEATURES:
2490 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2491 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2492 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2493 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2494 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2495 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2496 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2497 ent->eax |= HV_MSR_RESET_AVAILABLE;
2498 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2499 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2500 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2502 ent->ebx |= HV_POST_MESSAGES;
2503 ent->ebx |= HV_SIGNAL_EVENTS;
2505 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2506 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2507 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2509 ent->ebx |= HV_DEBUGGING;
2510 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2511 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2514 * Direct Synthetic timers only make sense with in-kernel
2517 if (!vcpu || lapic_in_kernel(vcpu))
2518 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2522 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2523 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2524 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2525 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2526 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2527 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2529 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2530 if (!cpu_smt_possible())
2531 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2533 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2535 * Default number of spinlock retry attempts, matches
2538 ent->ebx = 0x00000FFF;
2542 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2543 /* Maximum number of virtual processors */
2544 ent->eax = KVM_MAX_VCPUS;
2546 * Maximum number of logical processors, matches
2553 case HYPERV_CPUID_NESTED_FEATURES:
2554 ent->eax = evmcs_ver;
2555 ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2559 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2560 memcpy(signature, "Linux KVM Hv", 12);
2563 ent->ebx = signature[0];
2564 ent->ecx = signature[1];
2565 ent->edx = signature[2];
2568 case HYPERV_CPUID_SYNDBG_INTERFACE:
2569 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2570 ent->eax = signature[0];
2573 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2574 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2582 if (copy_to_user(entries, cpuid_entries,
2583 nent * sizeof(struct kvm_cpuid_entry2)))