1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
29 #include <linux/cpu.h>
30 #include <linux/kvm_host.h>
31 #include <linux/highmem.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/spinlock.h>
34 #include <linux/eventfd.h>
36 #include <asm/apicdef.h>
37 #include <asm/mshyperv.h>
38 #include <trace/events/kvm.h>
44 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
46 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
49 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
51 return atomic64_read(&synic->sint[sint]);
54 static inline int synic_get_sint_vector(u64 sint_value)
56 if (sint_value & HV_SYNIC_SINT_MASKED)
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
61 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
73 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
88 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
91 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
93 bool auto_eoi_old, auto_eoi_new;
95 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
98 if (synic_has_vector_connected(synic, vector))
99 __set_bit(vector, synic->vec_bitmap);
101 __clear_bit(vector, synic->vec_bitmap);
103 auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
105 if (synic_has_vector_auto_eoi(synic, vector))
106 __set_bit(vector, synic->auto_eoi_bitmap);
108 __clear_bit(vector, synic->auto_eoi_bitmap);
110 auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
112 if (auto_eoi_old == auto_eoi_new)
118 down_write(&vcpu->kvm->arch.apicv_update_lock);
121 hv->synic_auto_eoi_used++;
123 hv->synic_auto_eoi_used--;
126 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
127 * the hypervisor to manually inject IRQs.
129 __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
130 APICV_INHIBIT_REASON_HYPERV,
131 !!hv->synic_auto_eoi_used);
133 up_write(&vcpu->kvm->arch.apicv_update_lock);
136 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
139 int vector, old_vector;
142 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
143 masked = data & HV_SYNIC_SINT_MASKED;
146 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
147 * default '0x10000' value on boot and this should not #GP. We need to
148 * allow zero-initing the register from host as well.
150 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
153 * Guest may configure multiple SINTs to use the same vector, so
154 * we maintain a bitmap of vectors handled by synic, and a
155 * bitmap of vectors with auto-eoi behavior. The bitmaps are
156 * updated here, and atomically queried on fast paths.
158 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
160 atomic64_set(&synic->sint[sint], data);
162 synic_update_vector(synic, old_vector);
164 synic_update_vector(synic, vector);
166 /* Load SynIC vectors into EOI exit bitmap */
167 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
171 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
173 struct kvm_vcpu *vcpu = NULL;
176 if (vpidx >= KVM_MAX_VCPUS)
179 vcpu = kvm_get_vcpu(kvm, vpidx);
180 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
182 kvm_for_each_vcpu(i, vcpu, kvm)
183 if (kvm_hv_get_vpindex(vcpu) == vpidx)
188 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
190 struct kvm_vcpu *vcpu;
191 struct kvm_vcpu_hv_synic *synic;
193 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
194 if (!vcpu || !to_hv_vcpu(vcpu))
196 synic = to_hv_synic(vcpu);
197 return (synic->active) ? synic : NULL;
200 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
202 struct kvm *kvm = vcpu->kvm;
203 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
204 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
205 struct kvm_vcpu_hv_stimer *stimer;
208 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
210 /* Try to deliver pending Hyper-V SynIC timers messages */
211 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
212 stimer = &hv_vcpu->stimer[idx];
213 if (stimer->msg_pending && stimer->config.enable &&
214 !stimer->config.direct_mode &&
215 stimer->config.sintx == sint)
216 stimer_mark_pending(stimer, false);
219 idx = srcu_read_lock(&kvm->irq_srcu);
220 gsi = atomic_read(&synic->sint_to_gsi[sint]);
222 kvm_notify_acked_gsi(kvm, gsi);
223 srcu_read_unlock(&kvm->irq_srcu, idx);
226 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
228 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
229 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
231 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
232 hv_vcpu->exit.u.synic.msr = msr;
233 hv_vcpu->exit.u.synic.control = synic->control;
234 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
235 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
237 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
240 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
241 u32 msr, u64 data, bool host)
243 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
246 if (!synic->active && (!host || data))
249 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
253 case HV_X64_MSR_SCONTROL:
254 synic->control = data;
256 synic_exit(synic, msr);
258 case HV_X64_MSR_SVERSION:
263 synic->version = data;
265 case HV_X64_MSR_SIEFP:
266 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
267 !synic->dont_zero_synic_pages)
268 if (kvm_clear_guest(vcpu->kvm,
269 data & PAGE_MASK, PAGE_SIZE)) {
273 synic->evt_page = data;
275 synic_exit(synic, msr);
277 case HV_X64_MSR_SIMP:
278 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
279 !synic->dont_zero_synic_pages)
280 if (kvm_clear_guest(vcpu->kvm,
281 data & PAGE_MASK, PAGE_SIZE)) {
285 synic->msg_page = data;
287 synic_exit(synic, msr);
289 case HV_X64_MSR_EOM: {
295 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
296 kvm_hv_notify_acked_sint(vcpu, i);
299 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
300 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
309 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
311 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
313 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
314 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
317 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
319 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
321 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
322 hv->hv_syndbg.control.status =
323 vcpu->run->hyperv.u.syndbg.status;
327 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
329 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
330 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
332 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
333 hv_vcpu->exit.u.syndbg.msr = msr;
334 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
335 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
336 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
337 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
338 vcpu->arch.complete_userspace_io =
339 kvm_hv_syndbg_complete_userspace;
341 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
344 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
346 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
348 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
351 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
352 to_hv_vcpu(vcpu)->vp_index, msr, data);
354 case HV_X64_MSR_SYNDBG_CONTROL:
355 syndbg->control.control = data;
357 syndbg_exit(vcpu, msr);
359 case HV_X64_MSR_SYNDBG_STATUS:
360 syndbg->control.status = data;
362 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
363 syndbg->control.send_page = data;
365 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
366 syndbg->control.recv_page = data;
368 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
369 syndbg->control.pending_page = data;
371 syndbg_exit(vcpu, msr);
373 case HV_X64_MSR_SYNDBG_OPTIONS:
374 syndbg->options = data;
383 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
385 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
387 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
391 case HV_X64_MSR_SYNDBG_CONTROL:
392 *pdata = syndbg->control.control;
394 case HV_X64_MSR_SYNDBG_STATUS:
395 *pdata = syndbg->control.status;
397 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
398 *pdata = syndbg->control.send_page;
400 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
401 *pdata = syndbg->control.recv_page;
403 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
404 *pdata = syndbg->control.pending_page;
406 case HV_X64_MSR_SYNDBG_OPTIONS:
407 *pdata = syndbg->options;
413 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
418 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
423 if (!synic->active && !host)
428 case HV_X64_MSR_SCONTROL:
429 *pdata = synic->control;
431 case HV_X64_MSR_SVERSION:
432 *pdata = synic->version;
434 case HV_X64_MSR_SIEFP:
435 *pdata = synic->evt_page;
437 case HV_X64_MSR_SIMP:
438 *pdata = synic->msg_page;
443 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
444 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
453 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
455 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
456 struct kvm_lapic_irq irq;
459 if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
462 if (sint >= ARRAY_SIZE(synic->sint))
465 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
469 memset(&irq, 0, sizeof(irq));
470 irq.shorthand = APIC_DEST_SELF;
471 irq.dest_mode = APIC_DEST_PHYSICAL;
472 irq.delivery_mode = APIC_DM_FIXED;
476 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
477 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
481 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
483 struct kvm_vcpu_hv_synic *synic;
485 synic = synic_get(kvm, vpidx);
489 return synic_set_irq(synic, sint);
492 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
494 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
497 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
499 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
500 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
501 kvm_hv_notify_acked_sint(vcpu, i);
504 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
506 struct kvm_vcpu_hv_synic *synic;
508 synic = synic_get(kvm, vpidx);
512 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
515 atomic_set(&synic->sint_to_gsi[sint], gsi);
519 void kvm_hv_irq_routing_update(struct kvm *kvm)
521 struct kvm_irq_routing_table *irq_rt;
522 struct kvm_kernel_irq_routing_entry *e;
525 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
526 lockdep_is_held(&kvm->irq_lock));
528 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
529 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
530 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
531 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
532 e->hv_sint.sint, gsi);
537 static void synic_init(struct kvm_vcpu_hv_synic *synic)
541 memset(synic, 0, sizeof(*synic));
542 synic->version = HV_SYNIC_VERSION_1;
543 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
544 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
545 atomic_set(&synic->sint_to_gsi[i], -1);
549 static u64 get_time_ref_counter(struct kvm *kvm)
551 struct kvm_hv *hv = to_kvm_hv(kvm);
552 struct kvm_vcpu *vcpu;
556 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
557 * is broken, disabled or being updated.
559 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
560 return div_u64(get_kvmclock_ns(kvm), 100);
562 vcpu = kvm_get_vcpu(kvm, 0);
563 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
564 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
565 + hv->tsc_ref.tsc_offset;
568 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
571 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
573 set_bit(stimer->index,
574 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
575 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
580 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
582 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
584 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
587 hrtimer_cancel(&stimer->timer);
588 clear_bit(stimer->index,
589 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
590 stimer->msg_pending = false;
591 stimer->exp_time = 0;
594 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
596 struct kvm_vcpu_hv_stimer *stimer;
598 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
599 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
601 stimer_mark_pending(stimer, true);
603 return HRTIMER_NORESTART;
607 * stimer_start() assumptions:
608 * a) stimer->count is not equal to 0
609 * b) stimer->config has HV_STIMER_ENABLE flag
611 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
616 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
617 ktime_now = ktime_get();
619 if (stimer->config.periodic) {
620 if (stimer->exp_time) {
621 if (time_now >= stimer->exp_time) {
624 div64_u64_rem(time_now - stimer->exp_time,
625 stimer->count, &remainder);
627 time_now + (stimer->count - remainder);
630 stimer->exp_time = time_now + stimer->count;
632 trace_kvm_hv_stimer_start_periodic(
633 hv_stimer_to_vcpu(stimer)->vcpu_id,
635 time_now, stimer->exp_time);
637 hrtimer_start(&stimer->timer,
638 ktime_add_ns(ktime_now,
639 100 * (stimer->exp_time - time_now)),
643 stimer->exp_time = stimer->count;
644 if (time_now >= stimer->count) {
646 * Expire timer according to Hypervisor Top-Level Functional
647 * specification v4(15.3.1):
648 * "If a one shot is enabled and the specified count is in
649 * the past, it will expire immediately."
651 stimer_mark_pending(stimer, false);
655 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
657 time_now, stimer->count);
659 hrtimer_start(&stimer->timer,
660 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
665 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
668 union hv_stimer_config new_config = {.as_uint64 = config},
669 old_config = {.as_uint64 = stimer->config.as_uint64};
670 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
671 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
672 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
674 if (!synic->active && (!host || config))
677 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
678 !(hv_vcpu->cpuid_cache.features_edx &
679 HV_STIMER_DIRECT_MODE_AVAILABLE)))
682 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
683 stimer->index, config, host);
685 stimer_cleanup(stimer);
686 if (old_config.enable &&
687 !new_config.direct_mode && new_config.sintx == 0)
688 new_config.enable = 0;
689 stimer->config.as_uint64 = new_config.as_uint64;
691 if (stimer->config.enable)
692 stimer_mark_pending(stimer, false);
697 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
700 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
701 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
703 if (!synic->active && (!host || count))
706 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
707 stimer->index, count, host);
709 stimer_cleanup(stimer);
710 stimer->count = count;
711 if (stimer->count == 0)
712 stimer->config.enable = 0;
713 else if (stimer->config.auto_enable)
714 stimer->config.enable = 1;
716 if (stimer->config.enable)
717 stimer_mark_pending(stimer, false);
722 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
724 *pconfig = stimer->config.as_uint64;
728 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
730 *pcount = stimer->count;
734 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
735 struct hv_message *src_msg, bool no_retry)
737 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
738 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
740 struct hv_message_header hv_hdr;
743 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
746 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
749 * Strictly following the spec-mandated ordering would assume setting
750 * .msg_pending before checking .message_type. However, this function
751 * is only called in vcpu context so the entire update is atomic from
752 * guest POV and thus the exact order here doesn't matter.
754 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
755 msg_off + offsetof(struct hv_message,
756 header.message_type),
757 sizeof(hv_hdr.message_type));
761 if (hv_hdr.message_type != HVMSG_NONE) {
765 hv_hdr.message_flags.msg_pending = 1;
766 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
767 &hv_hdr.message_flags,
769 offsetof(struct hv_message,
770 header.message_flags),
771 sizeof(hv_hdr.message_flags));
777 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
778 sizeof(src_msg->header) +
779 src_msg->header.payload_size);
783 r = synic_set_irq(synic, sint);
791 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
793 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
794 struct hv_message *msg = &stimer->msg;
795 struct hv_timer_message_payload *payload =
796 (struct hv_timer_message_payload *)&msg->u.payload;
799 * To avoid piling up periodic ticks, don't retry message
800 * delivery for them (within "lazy" lost ticks policy).
802 bool no_retry = stimer->config.periodic;
804 payload->expiration_time = stimer->exp_time;
805 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
806 return synic_deliver_msg(to_hv_synic(vcpu),
807 stimer->config.sintx, msg,
811 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
813 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
814 struct kvm_lapic_irq irq = {
815 .delivery_mode = APIC_DM_FIXED,
816 .vector = stimer->config.apic_vector
819 if (lapic_in_kernel(vcpu))
820 return !kvm_apic_set_irq(vcpu, &irq, NULL);
824 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
826 int r, direct = stimer->config.direct_mode;
828 stimer->msg_pending = true;
830 r = stimer_send_msg(stimer);
832 r = stimer_notify_direct(stimer);
833 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
834 stimer->index, direct, r);
836 stimer->msg_pending = false;
837 if (!(stimer->config.periodic))
838 stimer->config.enable = 0;
842 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
844 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
845 struct kvm_vcpu_hv_stimer *stimer;
846 u64 time_now, exp_time;
852 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
853 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
854 stimer = &hv_vcpu->stimer[i];
855 if (stimer->config.enable) {
856 exp_time = stimer->exp_time;
860 get_time_ref_counter(vcpu->kvm);
861 if (time_now >= exp_time)
862 stimer_expiration(stimer);
865 if ((stimer->config.enable) &&
867 if (!stimer->msg_pending)
868 stimer_start(stimer);
870 stimer_cleanup(stimer);
875 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
877 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
883 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
884 stimer_cleanup(&hv_vcpu->stimer[i]);
887 vcpu->arch.hyperv = NULL;
890 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
892 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
897 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
899 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
901 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
903 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
905 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
907 if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
910 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
911 &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
913 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
915 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
917 struct hv_message *msg = &stimer->msg;
918 struct hv_timer_message_payload *payload =
919 (struct hv_timer_message_payload *)&msg->u.payload;
921 memset(&msg->header, 0, sizeof(msg->header));
922 msg->header.message_type = HVMSG_TIMER_EXPIRED;
923 msg->header.payload_size = sizeof(*payload);
925 payload->timer_index = stimer->index;
926 payload->expiration_time = 0;
927 payload->delivery_time = 0;
930 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
932 memset(stimer, 0, sizeof(*stimer));
933 stimer->index = timer_index;
934 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
935 stimer->timer.function = stimer_timer_callback;
936 stimer_prepare_msg(stimer);
939 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
941 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
947 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
951 vcpu->arch.hyperv = hv_vcpu;
952 hv_vcpu->vcpu = vcpu;
954 synic_init(&hv_vcpu->synic);
956 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
957 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
958 stimer_init(&hv_vcpu->stimer[i], i);
960 hv_vcpu->vp_index = vcpu->vcpu_idx;
962 for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
963 INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
964 spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
970 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
972 struct kvm_vcpu_hv_synic *synic;
975 r = kvm_hv_vcpu_init(vcpu);
979 synic = to_hv_synic(vcpu);
981 synic->active = true;
982 synic->dont_zero_synic_pages = dont_zero_synic_pages;
983 synic->control = HV_SYNIC_CONTROL_ENABLE;
987 static bool kvm_hv_msr_partition_wide(u32 msr)
992 case HV_X64_MSR_GUEST_OS_ID:
993 case HV_X64_MSR_HYPERCALL:
994 case HV_X64_MSR_REFERENCE_TSC:
995 case HV_X64_MSR_TIME_REF_COUNT:
996 case HV_X64_MSR_CRASH_CTL:
997 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
998 case HV_X64_MSR_RESET:
999 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1000 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1001 case HV_X64_MSR_TSC_EMULATION_STATUS:
1002 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1003 case HV_X64_MSR_SYNDBG_OPTIONS:
1004 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1012 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1014 struct kvm_hv *hv = to_kvm_hv(kvm);
1015 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1017 if (WARN_ON_ONCE(index >= size))
1020 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1024 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1026 struct kvm_hv *hv = to_kvm_hv(kvm);
1028 *pdata = hv->hv_crash_ctl;
1032 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1034 struct kvm_hv *hv = to_kvm_hv(kvm);
1036 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1041 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1043 struct kvm_hv *hv = to_kvm_hv(kvm);
1044 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1046 if (WARN_ON_ONCE(index >= size))
1049 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1054 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1055 * between them is possible:
1058 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1062 * nsec/100 = ticks * scale / 2^64 + offset
1064 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1065 * By dividing the kvmclock formula by 100 and equating what's left we get:
1066 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1067 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1068 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1070 * Now expand the kvmclock formula and divide by 100:
1071 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1072 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1074 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1075 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1076 * + system_time / 100
1078 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1079 * nsec/100 = ticks * scale / 2^64
1080 * - tsc_timestamp * scale / 2^64
1081 * + system_time / 100
1083 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1084 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1086 * These two equivalencies are implemented in this function.
1088 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1089 struct ms_hyperv_tsc_page *tsc_ref)
1093 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1097 * check if scale would overflow, if so we use the time ref counter
1098 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1099 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1100 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1102 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1103 if (hv_clock->tsc_to_system_mul >= max_mul)
1107 * Otherwise compute the scale and offset according to the formulas
1110 tsc_ref->tsc_scale =
1111 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1112 hv_clock->tsc_to_system_mul,
1115 tsc_ref->tsc_offset = hv_clock->system_time;
1116 do_div(tsc_ref->tsc_offset, 100);
1117 tsc_ref->tsc_offset -=
1118 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1123 * Don't touch TSC page values if the guest has opted for TSC emulation after
1124 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1125 * access emulation and Hyper-V is known to expect the values in TSC page to
1126 * stay constant before TSC access emulation is disabled from guest side
1127 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1128 * frequency and guest visible TSC value across migration (and prevent it when
1129 * TSC scaling is unsupported).
1131 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1133 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1134 hv->hv_tsc_emulation_control;
1137 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1138 struct pvclock_vcpu_time_info *hv_clock)
1140 struct kvm_hv *hv = to_kvm_hv(kvm);
1144 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1145 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1147 mutex_lock(&hv->hv_lock);
1149 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1150 hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
1151 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1154 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1157 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1159 * Because the TSC parameters only vary when there is a
1160 * change in the master clock, do not bother with caching.
1162 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1163 &tsc_seq, sizeof(tsc_seq))))
1166 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1167 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1170 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1175 * While we're computing and writing the parameters, force the
1176 * guest to use the time reference count MSR.
1178 hv->tsc_ref.tsc_sequence = 0;
1179 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1180 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1183 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1186 /* Ensure sequence is zero before writing the rest of the struct. */
1188 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1192 * Now switch to the TSC page mechanism by writing the sequence.
1195 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1198 /* Write the struct entirely before the non-zero sequence. */
1201 hv->tsc_ref.tsc_sequence = tsc_seq;
1202 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1203 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1206 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1210 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1212 mutex_unlock(&hv->hv_lock);
1215 void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1217 struct kvm_hv *hv = to_kvm_hv(kvm);
1219 mutex_lock(&hv->hv_lock);
1221 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
1222 !tsc_page_update_unsafe(hv))
1223 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1225 mutex_unlock(&hv->hv_lock);
1228 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1230 if (!hv_vcpu->enforce_cpuid)
1234 case HV_X64_MSR_GUEST_OS_ID:
1235 case HV_X64_MSR_HYPERCALL:
1236 return hv_vcpu->cpuid_cache.features_eax &
1237 HV_MSR_HYPERCALL_AVAILABLE;
1238 case HV_X64_MSR_VP_RUNTIME:
1239 return hv_vcpu->cpuid_cache.features_eax &
1240 HV_MSR_VP_RUNTIME_AVAILABLE;
1241 case HV_X64_MSR_TIME_REF_COUNT:
1242 return hv_vcpu->cpuid_cache.features_eax &
1243 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1244 case HV_X64_MSR_VP_INDEX:
1245 return hv_vcpu->cpuid_cache.features_eax &
1246 HV_MSR_VP_INDEX_AVAILABLE;
1247 case HV_X64_MSR_RESET:
1248 return hv_vcpu->cpuid_cache.features_eax &
1249 HV_MSR_RESET_AVAILABLE;
1250 case HV_X64_MSR_REFERENCE_TSC:
1251 return hv_vcpu->cpuid_cache.features_eax &
1252 HV_MSR_REFERENCE_TSC_AVAILABLE;
1253 case HV_X64_MSR_SCONTROL:
1254 case HV_X64_MSR_SVERSION:
1255 case HV_X64_MSR_SIEFP:
1256 case HV_X64_MSR_SIMP:
1257 case HV_X64_MSR_EOM:
1258 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1259 return hv_vcpu->cpuid_cache.features_eax &
1260 HV_MSR_SYNIC_AVAILABLE;
1261 case HV_X64_MSR_STIMER0_CONFIG:
1262 case HV_X64_MSR_STIMER1_CONFIG:
1263 case HV_X64_MSR_STIMER2_CONFIG:
1264 case HV_X64_MSR_STIMER3_CONFIG:
1265 case HV_X64_MSR_STIMER0_COUNT:
1266 case HV_X64_MSR_STIMER1_COUNT:
1267 case HV_X64_MSR_STIMER2_COUNT:
1268 case HV_X64_MSR_STIMER3_COUNT:
1269 return hv_vcpu->cpuid_cache.features_eax &
1270 HV_MSR_SYNTIMER_AVAILABLE;
1271 case HV_X64_MSR_EOI:
1272 case HV_X64_MSR_ICR:
1273 case HV_X64_MSR_TPR:
1274 case HV_X64_MSR_VP_ASSIST_PAGE:
1275 return hv_vcpu->cpuid_cache.features_eax &
1276 HV_MSR_APIC_ACCESS_AVAILABLE;
1278 case HV_X64_MSR_TSC_FREQUENCY:
1279 case HV_X64_MSR_APIC_FREQUENCY:
1280 return hv_vcpu->cpuid_cache.features_eax &
1281 HV_ACCESS_FREQUENCY_MSRS;
1282 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1283 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1284 case HV_X64_MSR_TSC_EMULATION_STATUS:
1285 return hv_vcpu->cpuid_cache.features_eax &
1286 HV_ACCESS_REENLIGHTENMENT;
1287 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1288 return hv_vcpu->cpuid_cache.features_eax &
1289 HV_ACCESS_TSC_INVARIANT;
1290 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1291 case HV_X64_MSR_CRASH_CTL:
1292 return hv_vcpu->cpuid_cache.features_edx &
1293 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1294 case HV_X64_MSR_SYNDBG_OPTIONS:
1295 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1296 return hv_vcpu->cpuid_cache.features_edx &
1297 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1305 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1308 struct kvm *kvm = vcpu->kvm;
1309 struct kvm_hv *hv = to_kvm_hv(kvm);
1311 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1315 case HV_X64_MSR_GUEST_OS_ID:
1316 hv->hv_guest_os_id = data;
1317 /* setting guest os id to zero disables hypercall page */
1318 if (!hv->hv_guest_os_id)
1319 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1321 case HV_X64_MSR_HYPERCALL: {
1326 /* if guest os id is not set hypercall should remain disabled */
1327 if (!hv->hv_guest_os_id)
1329 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1330 hv->hv_hypercall = data;
1335 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1336 * the same way Xen itself does, by setting the bit 31 of EAX
1337 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1338 * going to be clobbered on 64-bit.
1340 if (kvm_xen_hypercall_enabled(kvm)) {
1341 /* orl $0x80000000, %eax */
1342 instructions[i++] = 0x0d;
1343 instructions[i++] = 0x00;
1344 instructions[i++] = 0x00;
1345 instructions[i++] = 0x00;
1346 instructions[i++] = 0x80;
1349 /* vmcall/vmmcall */
1350 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1354 ((unsigned char *)instructions)[i++] = 0xc3;
1356 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1357 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1359 hv->hv_hypercall = data;
1362 case HV_X64_MSR_REFERENCE_TSC:
1363 hv->hv_tsc_page = data;
1364 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1366 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1368 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1369 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1371 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1374 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1375 return kvm_hv_msr_set_crash_data(kvm,
1376 msr - HV_X64_MSR_CRASH_P0,
1378 case HV_X64_MSR_CRASH_CTL:
1380 return kvm_hv_msr_set_crash_ctl(kvm, data);
1382 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1383 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1384 hv->hv_crash_param[0],
1385 hv->hv_crash_param[1],
1386 hv->hv_crash_param[2],
1387 hv->hv_crash_param[3],
1388 hv->hv_crash_param[4]);
1390 /* Send notification about crash to user space */
1391 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1394 case HV_X64_MSR_RESET:
1396 vcpu_debug(vcpu, "hyper-v reset requested\n");
1397 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1400 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1401 hv->hv_reenlightenment_control = data;
1403 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1404 hv->hv_tsc_emulation_control = data;
1406 case HV_X64_MSR_TSC_EMULATION_STATUS:
1410 hv->hv_tsc_emulation_status = data;
1412 case HV_X64_MSR_TIME_REF_COUNT:
1413 /* read-only, but still ignore it if host-initiated */
1417 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1418 /* Only bit 0 is supported */
1419 if (data & ~HV_EXPOSE_INVARIANT_TSC)
1422 /* The feature can't be disabled from the guest */
1423 if (!host && hv->hv_invtsc_control && !data)
1426 hv->hv_invtsc_control = data;
1428 case HV_X64_MSR_SYNDBG_OPTIONS:
1429 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1430 return syndbg_set_msr(vcpu, msr, data, host);
1432 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1439 /* Calculate cpu time spent by current task in 100ns units */
1440 static u64 current_task_runtime_100ns(void)
1444 task_cputime_adjusted(current, &utime, &stime);
1446 return div_u64(utime + stime, 100);
1449 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1451 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1453 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1457 case HV_X64_MSR_VP_INDEX: {
1458 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1459 u32 new_vp_index = (u32)data;
1461 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1464 if (new_vp_index == hv_vcpu->vp_index)
1468 * The VP index is initialized to vcpu_index by
1469 * kvm_hv_vcpu_postcreate so they initially match. Now the
1470 * VP index is changing, adjust num_mismatched_vp_indexes if
1471 * it now matches or no longer matches vcpu_idx.
1473 if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1474 atomic_inc(&hv->num_mismatched_vp_indexes);
1475 else if (new_vp_index == vcpu->vcpu_idx)
1476 atomic_dec(&hv->num_mismatched_vp_indexes);
1478 hv_vcpu->vp_index = new_vp_index;
1481 case HV_X64_MSR_VP_ASSIST_PAGE: {
1485 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1486 hv_vcpu->hv_vapic = data;
1487 if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1491 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1492 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1493 if (kvm_is_error_hva(addr))
1497 * Clear apic_assist portion of struct hv_vp_assist_page
1498 * only, there can be valuable data in the rest which needs
1499 * to be preserved e.g. on migration.
1501 if (__put_user(0, (u32 __user *)addr))
1503 hv_vcpu->hv_vapic = data;
1504 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1505 if (kvm_lapic_set_pv_eoi(vcpu,
1506 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1507 sizeof(struct hv_vp_assist_page)))
1511 case HV_X64_MSR_EOI:
1512 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1513 case HV_X64_MSR_ICR:
1514 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1515 case HV_X64_MSR_TPR:
1516 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1517 case HV_X64_MSR_VP_RUNTIME:
1520 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1522 case HV_X64_MSR_SCONTROL:
1523 case HV_X64_MSR_SVERSION:
1524 case HV_X64_MSR_SIEFP:
1525 case HV_X64_MSR_SIMP:
1526 case HV_X64_MSR_EOM:
1527 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1528 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1529 case HV_X64_MSR_STIMER0_CONFIG:
1530 case HV_X64_MSR_STIMER1_CONFIG:
1531 case HV_X64_MSR_STIMER2_CONFIG:
1532 case HV_X64_MSR_STIMER3_CONFIG: {
1533 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1535 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1538 case HV_X64_MSR_STIMER0_COUNT:
1539 case HV_X64_MSR_STIMER1_COUNT:
1540 case HV_X64_MSR_STIMER2_COUNT:
1541 case HV_X64_MSR_STIMER3_COUNT: {
1542 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1544 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1547 case HV_X64_MSR_TSC_FREQUENCY:
1548 case HV_X64_MSR_APIC_FREQUENCY:
1549 /* read-only, but still ignore it if host-initiated */
1554 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1562 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1566 struct kvm *kvm = vcpu->kvm;
1567 struct kvm_hv *hv = to_kvm_hv(kvm);
1569 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1573 case HV_X64_MSR_GUEST_OS_ID:
1574 data = hv->hv_guest_os_id;
1576 case HV_X64_MSR_HYPERCALL:
1577 data = hv->hv_hypercall;
1579 case HV_X64_MSR_TIME_REF_COUNT:
1580 data = get_time_ref_counter(kvm);
1582 case HV_X64_MSR_REFERENCE_TSC:
1583 data = hv->hv_tsc_page;
1585 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1586 return kvm_hv_msr_get_crash_data(kvm,
1587 msr - HV_X64_MSR_CRASH_P0,
1589 case HV_X64_MSR_CRASH_CTL:
1590 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1591 case HV_X64_MSR_RESET:
1594 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1595 data = hv->hv_reenlightenment_control;
1597 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1598 data = hv->hv_tsc_emulation_control;
1600 case HV_X64_MSR_TSC_EMULATION_STATUS:
1601 data = hv->hv_tsc_emulation_status;
1603 case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1604 data = hv->hv_invtsc_control;
1606 case HV_X64_MSR_SYNDBG_OPTIONS:
1607 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1608 return syndbg_get_msr(vcpu, msr, pdata, host);
1610 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1618 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1622 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1624 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1628 case HV_X64_MSR_VP_INDEX:
1629 data = hv_vcpu->vp_index;
1631 case HV_X64_MSR_EOI:
1632 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1633 case HV_X64_MSR_ICR:
1634 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1635 case HV_X64_MSR_TPR:
1636 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1637 case HV_X64_MSR_VP_ASSIST_PAGE:
1638 data = hv_vcpu->hv_vapic;
1640 case HV_X64_MSR_VP_RUNTIME:
1641 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1643 case HV_X64_MSR_SCONTROL:
1644 case HV_X64_MSR_SVERSION:
1645 case HV_X64_MSR_SIEFP:
1646 case HV_X64_MSR_SIMP:
1647 case HV_X64_MSR_EOM:
1648 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1649 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1650 case HV_X64_MSR_STIMER0_CONFIG:
1651 case HV_X64_MSR_STIMER1_CONFIG:
1652 case HV_X64_MSR_STIMER2_CONFIG:
1653 case HV_X64_MSR_STIMER3_CONFIG: {
1654 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1656 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1659 case HV_X64_MSR_STIMER0_COUNT:
1660 case HV_X64_MSR_STIMER1_COUNT:
1661 case HV_X64_MSR_STIMER2_COUNT:
1662 case HV_X64_MSR_STIMER3_COUNT: {
1663 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1665 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1668 case HV_X64_MSR_TSC_FREQUENCY:
1669 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1671 case HV_X64_MSR_APIC_FREQUENCY:
1672 data = APIC_BUS_FREQUENCY;
1675 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1682 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1684 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1686 if (!host && !vcpu->arch.hyperv_enabled)
1689 if (kvm_hv_vcpu_init(vcpu))
1692 if (kvm_hv_msr_partition_wide(msr)) {
1695 mutex_lock(&hv->hv_lock);
1696 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1697 mutex_unlock(&hv->hv_lock);
1700 return kvm_hv_set_msr(vcpu, msr, data, host);
1703 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1705 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1707 if (!host && !vcpu->arch.hyperv_enabled)
1710 if (kvm_hv_vcpu_init(vcpu))
1713 if (kvm_hv_msr_partition_wide(msr)) {
1716 mutex_lock(&hv->hv_lock);
1717 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1718 mutex_unlock(&hv->hv_lock);
1721 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1724 static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1725 u64 valid_bank_mask, unsigned long *vcpu_mask)
1727 struct kvm_hv *hv = to_kvm_hv(kvm);
1728 bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1729 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1730 struct kvm_vcpu *vcpu;
1731 int bank, sbank = 0;
1735 BUILD_BUG_ON(sizeof(vp_bitmap) >
1736 sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1739 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1740 * fill a temporary buffer and manually test each vCPU's VP index.
1742 if (likely(!has_mismatch))
1743 bitmap = (u64 *)vcpu_mask;
1748 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1749 * having a '1' for each bank that exists in sparse_banks. Sets must
1750 * be in ascending order, i.e. bank0..bankN.
1752 memset(bitmap, 0, sizeof(vp_bitmap));
1753 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1754 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1755 bitmap[bank] = sparse_banks[sbank++];
1757 if (likely(!has_mismatch))
1760 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1761 kvm_for_each_vcpu(i, vcpu, kvm) {
1762 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1763 __set_bit(i, vcpu_mask);
1767 static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
1769 int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
1770 unsigned long sbank;
1772 if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
1776 * The index into the sparse bank is the number of preceding bits in
1777 * the valid mask. Optimize for VMs with <64 vCPUs by skipping the
1778 * fancy math if there can't possibly be preceding bits.
1781 sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
1785 return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
1786 (unsigned long *)&sparse_banks[sbank]);
1789 struct kvm_hv_hcall {
1790 /* Hypercall input data */
1800 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1803 * Current read offset when KVM reads hypercall input data gradually,
1804 * either offset in bytes from 'ingpa' for regular hypercalls or the
1805 * number of already consumed 'XMM halves' for 'fast' hypercalls.
1809 int consumed_xmm_halves;
1814 static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
1815 u16 orig_cnt, u16 cnt_cap, u64 *data)
1818 * Preserve the original count when ignoring entries via a "cap", KVM
1819 * still needs to validate the guest input (though the non-XMM path
1820 * punts on the checks).
1822 u16 cnt = min(orig_cnt, cnt_cap);
1827 * Each XMM holds two sparse banks, but do not count halves that
1828 * have already been consumed for hypercall parameters.
1830 if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1831 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1833 for (i = 0; i < cnt; i++) {
1834 j = i + hc->consumed_xmm_halves;
1836 data[i] = sse128_hi(hc->xmm[j / 2]);
1838 data[i] = sse128_lo(hc->xmm[j / 2]);
1843 return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
1844 cnt * sizeof(*data));
1847 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1850 if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
1853 /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
1854 return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
1858 static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
1860 return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
1863 static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
1864 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
1865 u64 *entries, int count)
1867 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1868 u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
1873 spin_lock(&tlb_flush_fifo->write_lock);
1876 * All entries should fit on the fifo leaving one free for 'flush all'
1877 * entry in case another request comes in. In case there's not enough
1878 * space, just put 'flush all' entry there.
1880 if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
1881 WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
1886 * Note: full fifo always contains 'flush all' entry, no need to check the
1889 kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
1892 spin_unlock(&tlb_flush_fifo->write_lock);
1895 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
1897 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1898 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1899 u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
1903 if (!tdp_enabled || !hv_vcpu)
1906 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
1908 count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
1910 for (i = 0; i < count; i++) {
1911 if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
1915 * Lower 12 bits of 'address' encode the number of additional
1918 gva = entries[i] & PAGE_MASK;
1919 for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1920 static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1922 ++vcpu->stat.tlb_flush;
1927 kfifo_reset_out(&tlb_flush_fifo->entries);
1929 /* Fall back to full flush. */
1933 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
1935 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1936 u64 *sparse_banks = hv_vcpu->sparse_banks;
1937 struct kvm *kvm = vcpu->kvm;
1938 struct hv_tlb_flush_ex flush_ex;
1939 struct hv_tlb_flush flush;
1940 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1941 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1943 * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
1944 * entries on the TLB flush fifo. The last entry, however, needs to be
1945 * always left free for 'flush all' entry which gets placed when
1946 * there is not enough space to put all the requested entries.
1948 u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
1949 u64 *tlb_flush_entries;
1950 u64 valid_bank_mask;
1956 * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
1957 * sparse banks. Fail the build if KVM's max allowed number of
1958 * vCPUs (>4096) exceeds this limit.
1960 BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
1963 * 'Slow' hypercall's first parameter is the address in guest's memory
1964 * where hypercall parameters are placed. This is either a GPA or a
1965 * nested GPA when KVM is handling the call from L2 ('direct' TLB
1966 * flush). Translate the address here so the memory can be uniformly
1967 * read with kvm_read_guest().
1969 if (!hc->fast && is_guest_mode(vcpu)) {
1970 hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
1971 if (unlikely(hc->ingpa == INVALID_GPA))
1972 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1975 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
1976 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
1978 flush.address_space = hc->ingpa;
1979 flush.flags = hc->outgpa;
1980 flush.processor_mask = sse128_lo(hc->xmm[0]);
1981 hc->consumed_xmm_halves = 1;
1983 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1984 &flush, sizeof(flush))))
1985 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1986 hc->data_offset = sizeof(flush);
1989 trace_kvm_hv_flush_tlb(flush.processor_mask,
1990 flush.address_space, flush.flags,
1991 is_guest_mode(vcpu));
1993 valid_bank_mask = BIT_ULL(0);
1994 sparse_banks[0] = flush.processor_mask;
1997 * Work around possible WS2012 bug: it sends hypercalls
1998 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1999 * while also expecting us to flush something and crashing if
2000 * we don't. Let's treat processor_mask == 0 same as
2001 * HV_FLUSH_ALL_PROCESSORS.
2003 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
2004 flush.processor_mask == 0;
2007 flush_ex.address_space = hc->ingpa;
2008 flush_ex.flags = hc->outgpa;
2009 memcpy(&flush_ex.hv_vp_set,
2010 &hc->xmm[0], sizeof(hc->xmm[0]));
2011 hc->consumed_xmm_halves = 2;
2013 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2015 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2016 hc->data_offset = sizeof(flush_ex);
2019 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
2020 flush_ex.hv_vp_set.format,
2021 flush_ex.address_space,
2022 flush_ex.flags, is_guest_mode(vcpu));
2024 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
2025 all_cpus = flush_ex.hv_vp_set.format !=
2026 HV_GENERIC_SET_SPARSE_4K;
2028 if (hc->var_cnt != hweight64(valid_bank_mask))
2029 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2035 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2036 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2040 * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
2041 * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
2042 * case (HV_GENERIC_SET_ALL). Always adjust data_offset and
2043 * consumed_xmm_halves to make sure TLB flush entries are read
2044 * from the correct offset.
2047 hc->consumed_xmm_halves += hc->var_cnt;
2049 hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
2052 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
2053 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
2054 hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
2055 tlb_flush_entries = NULL;
2057 if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
2058 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2059 tlb_flush_entries = __tlb_flush_entries;
2063 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2064 * analyze it here, flush TLB regardless of the specified address space.
2066 if (all_cpus && !is_guest_mode(vcpu)) {
2067 kvm_for_each_vcpu(i, v, kvm) {
2068 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2069 hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2070 tlb_flush_entries, hc->rep_cnt);
2073 kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
2074 } else if (!is_guest_mode(vcpu)) {
2075 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
2077 for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
2078 v = kvm_get_vcpu(kvm, i);
2081 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2082 hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2083 tlb_flush_entries, hc->rep_cnt);
2086 kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2088 struct kvm_vcpu_hv *hv_v;
2090 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
2092 kvm_for_each_vcpu(i, v, kvm) {
2093 hv_v = to_hv_vcpu(v);
2096 * The following check races with nested vCPUs entering/exiting
2097 * and/or migrating between L1's vCPUs, however the only case when
2098 * KVM *must* flush the TLB is when the target L2 vCPU keeps
2099 * running on the same L1 vCPU from the moment of the request until
2100 * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
2101 * cases, e.g. when the target L2 vCPU migrates to a different L1
2102 * vCPU or when the corresponding L1 vCPU temporary switches to a
2103 * different L2 vCPU while the request is being processed.
2105 if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2109 !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
2113 __set_bit(i, vcpu_mask);
2114 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
2115 hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2116 tlb_flush_entries, hc->rep_cnt);
2119 kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2123 /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
2124 return (u64)HV_STATUS_SUCCESS |
2125 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2128 static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
2129 u64 *sparse_banks, u64 valid_bank_mask)
2131 struct kvm_lapic_irq irq = {
2132 .delivery_mode = APIC_DM_FIXED,
2135 struct kvm_vcpu *vcpu;
2138 kvm_for_each_vcpu(i, vcpu, kvm) {
2140 !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
2141 valid_bank_mask, sparse_banks))
2144 /* We fail only when APIC is disabled */
2145 kvm_apic_set_irq(vcpu, &irq, NULL);
2149 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2151 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2152 u64 *sparse_banks = hv_vcpu->sparse_banks;
2153 struct kvm *kvm = vcpu->kvm;
2154 struct hv_send_ipi_ex send_ipi_ex;
2155 struct hv_send_ipi send_ipi;
2156 u64 valid_bank_mask;
2160 if (hc->code == HVCALL_SEND_IPI) {
2162 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2164 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2165 sparse_banks[0] = send_ipi.cpu_mask;
2166 vector = send_ipi.vector;
2168 /* 'reserved' part of hv_send_ipi should be 0 */
2169 if (unlikely(hc->ingpa >> 32 != 0))
2170 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2171 sparse_banks[0] = hc->outgpa;
2172 vector = (u32)hc->ingpa;
2175 valid_bank_mask = BIT_ULL(0);
2177 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
2180 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2181 sizeof(send_ipi_ex))))
2182 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2184 send_ipi_ex.vector = (u32)hc->ingpa;
2185 send_ipi_ex.vp_set.format = hc->outgpa;
2186 send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
2189 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
2190 send_ipi_ex.vp_set.format,
2191 send_ipi_ex.vp_set.valid_bank_mask);
2193 vector = send_ipi_ex.vector;
2194 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
2195 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
2197 if (hc->var_cnt != hweight64(valid_bank_mask))
2198 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2201 goto check_and_send_ipi;
2207 hc->data_offset = offsetof(struct hv_send_ipi_ex,
2208 vp_set.bank_contents);
2210 hc->consumed_xmm_halves = 1;
2212 if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2213 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2217 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
2218 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2221 kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
2223 kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
2226 return HV_STATUS_SUCCESS;
2229 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
2231 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2232 struct kvm_cpuid_entry2 *entry;
2234 vcpu->arch.hyperv_enabled = hyperv_enabled;
2238 * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
2241 WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
2245 memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2247 if (!vcpu->arch.hyperv_enabled)
2250 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
2252 hv_vcpu->cpuid_cache.features_eax = entry->eax;
2253 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2254 hv_vcpu->cpuid_cache.features_edx = entry->edx;
2257 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
2259 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2260 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2263 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
2265 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2267 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
2269 hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2270 hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2274 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2276 struct kvm_vcpu_hv *hv_vcpu;
2279 if (!to_hv_vcpu(vcpu)) {
2281 ret = kvm_hv_vcpu_init(vcpu);
2289 hv_vcpu = to_hv_vcpu(vcpu);
2290 hv_vcpu->enforce_cpuid = enforce;
2295 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2299 longmode = is_64_bit_hypercall(vcpu);
2301 kvm_rax_write(vcpu, result);
2303 kvm_rdx_write(vcpu, result >> 32);
2304 kvm_rax_write(vcpu, result & 0xffffffff);
2308 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2310 u32 tlb_lock_count = 0;
2313 if (hv_result_success(result) && is_guest_mode(vcpu) &&
2314 kvm_hv_is_tlb_flush_hcall(vcpu) &&
2315 kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
2316 &tlb_lock_count, sizeof(tlb_lock_count)))
2317 result = HV_STATUS_INVALID_HYPERCALL_INPUT;
2319 trace_kvm_hv_hypercall_done(result);
2320 kvm_hv_hypercall_set_result(vcpu, result);
2321 ++vcpu->stat.hypercalls;
2323 ret = kvm_skip_emulated_instruction(vcpu);
2326 kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
2331 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2333 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2336 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2338 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2339 struct eventfd_ctx *eventfd;
2341 if (unlikely(!hc->fast)) {
2343 gpa_t gpa = hc->ingpa;
2345 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2346 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2347 return HV_STATUS_INVALID_ALIGNMENT;
2349 ret = kvm_vcpu_read_guest(vcpu, gpa,
2350 &hc->ingpa, sizeof(hc->ingpa));
2352 return HV_STATUS_INVALID_ALIGNMENT;
2356 * Per spec, bits 32-47 contain the extra "flag number". However, we
2357 * have no use for it, and in all known usecases it is zero, so just
2358 * report lookup failure if it isn't.
2360 if (hc->ingpa & 0xffff00000000ULL)
2361 return HV_STATUS_INVALID_PORT_ID;
2362 /* remaining bits are reserved-zero */
2363 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2364 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2366 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2368 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2371 return HV_STATUS_INVALID_PORT_ID;
2373 eventfd_signal(eventfd, 1);
2374 return HV_STATUS_SUCCESS;
2377 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2380 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2381 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2382 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2383 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2384 case HVCALL_SEND_IPI_EX:
2391 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2396 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2397 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2401 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2403 if (!hv_vcpu->enforce_cpuid)
2407 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2408 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2409 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2410 case HVCALL_POST_MESSAGE:
2411 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2412 case HVCALL_SIGNAL_EVENT:
2413 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2414 case HVCALL_POST_DEBUG_DATA:
2415 case HVCALL_RETRIEVE_DEBUG_DATA:
2416 case HVCALL_RESET_DEBUG_SESSION:
2418 * Return 'true' when SynDBG is disabled so the resulting code
2419 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2421 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2422 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2423 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2424 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2425 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2426 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2429 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2430 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2431 return hv_vcpu->cpuid_cache.enlightenments_eax &
2432 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2433 case HVCALL_SEND_IPI_EX:
2434 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2435 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2438 case HVCALL_SEND_IPI:
2439 return hv_vcpu->cpuid_cache.enlightenments_eax &
2440 HV_X64_CLUSTER_IPI_RECOMMENDED;
2448 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2450 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2451 struct kvm_hv_hcall hc;
2452 u64 ret = HV_STATUS_SUCCESS;
2455 * hypercall generates UD from non zero cpl and real mode
2458 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2459 kvm_queue_exception(vcpu, UD_VECTOR);
2463 #ifdef CONFIG_X86_64
2464 if (is_64_bit_hypercall(vcpu)) {
2465 hc.param = kvm_rcx_read(vcpu);
2466 hc.ingpa = kvm_rdx_read(vcpu);
2467 hc.outgpa = kvm_r8_read(vcpu);
2471 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2472 (kvm_rax_read(vcpu) & 0xffffffff);
2473 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2474 (kvm_rcx_read(vcpu) & 0xffffffff);
2475 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2476 (kvm_rsi_read(vcpu) & 0xffffffff);
2479 hc.code = hc.param & 0xffff;
2480 hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2481 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2482 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2483 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2484 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2486 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2487 hc.rep_idx, hc.ingpa, hc.outgpa);
2489 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2490 ret = HV_STATUS_ACCESS_DENIED;
2491 goto hypercall_complete;
2494 if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2495 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2496 goto hypercall_complete;
2499 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2500 if (unlikely(hv_vcpu->enforce_cpuid &&
2501 !(hv_vcpu->cpuid_cache.features_edx &
2502 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2503 kvm_queue_exception(vcpu, UD_VECTOR);
2507 kvm_hv_hypercall_read_xmm(&hc);
2511 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2512 if (unlikely(hc.rep || hc.var_cnt)) {
2513 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2516 kvm_vcpu_on_spin(vcpu, true);
2518 case HVCALL_SIGNAL_EVENT:
2519 if (unlikely(hc.rep || hc.var_cnt)) {
2520 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2523 ret = kvm_hvcall_signal_event(vcpu, &hc);
2524 if (ret != HV_STATUS_INVALID_PORT_ID)
2526 fallthrough; /* maybe userspace knows this conn_id */
2527 case HVCALL_POST_MESSAGE:
2528 /* don't bother userspace if it has no way to handle it */
2529 if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2530 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2533 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2534 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2535 vcpu->run->hyperv.u.hcall.input = hc.param;
2536 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2537 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2538 vcpu->arch.complete_userspace_io =
2539 kvm_hv_hypercall_complete_userspace;
2541 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2542 if (unlikely(hc.var_cnt)) {
2543 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2547 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2548 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2549 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2552 ret = kvm_hv_flush_tlb(vcpu, &hc);
2554 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2555 if (unlikely(hc.var_cnt)) {
2556 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2560 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2561 if (unlikely(hc.rep)) {
2562 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2565 ret = kvm_hv_flush_tlb(vcpu, &hc);
2567 case HVCALL_SEND_IPI:
2568 if (unlikely(hc.var_cnt)) {
2569 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2573 case HVCALL_SEND_IPI_EX:
2574 if (unlikely(hc.rep)) {
2575 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2578 ret = kvm_hv_send_ipi(vcpu, &hc);
2580 case HVCALL_POST_DEBUG_DATA:
2581 case HVCALL_RETRIEVE_DEBUG_DATA:
2582 if (unlikely(hc.fast)) {
2583 ret = HV_STATUS_INVALID_PARAMETER;
2587 case HVCALL_RESET_DEBUG_SESSION: {
2588 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2590 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2591 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2595 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2596 ret = HV_STATUS_OPERATION_DENIED;
2599 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2600 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2601 vcpu->run->hyperv.u.hcall.input = hc.param;
2602 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2603 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2604 vcpu->arch.complete_userspace_io =
2605 kvm_hv_hypercall_complete_userspace;
2609 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2614 return kvm_hv_hypercall_complete(vcpu, ret);
2617 void kvm_hv_init_vm(struct kvm *kvm)
2619 struct kvm_hv *hv = to_kvm_hv(kvm);
2621 mutex_init(&hv->hv_lock);
2622 idr_init(&hv->conn_to_evt);
2625 void kvm_hv_destroy_vm(struct kvm *kvm)
2627 struct kvm_hv *hv = to_kvm_hv(kvm);
2628 struct eventfd_ctx *eventfd;
2631 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2632 eventfd_ctx_put(eventfd);
2633 idr_destroy(&hv->conn_to_evt);
2636 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2638 struct kvm_hv *hv = to_kvm_hv(kvm);
2639 struct eventfd_ctx *eventfd;
2642 eventfd = eventfd_ctx_fdget(fd);
2643 if (IS_ERR(eventfd))
2644 return PTR_ERR(eventfd);
2646 mutex_lock(&hv->hv_lock);
2647 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2648 GFP_KERNEL_ACCOUNT);
2649 mutex_unlock(&hv->hv_lock);
2656 eventfd_ctx_put(eventfd);
2660 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2662 struct kvm_hv *hv = to_kvm_hv(kvm);
2663 struct eventfd_ctx *eventfd;
2665 mutex_lock(&hv->hv_lock);
2666 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2667 mutex_unlock(&hv->hv_lock);
2672 synchronize_srcu(&kvm->srcu);
2673 eventfd_ctx_put(eventfd);
2677 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2679 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2680 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2683 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2684 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2685 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2688 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2689 struct kvm_cpuid_entry2 __user *entries)
2691 uint16_t evmcs_ver = 0;
2692 struct kvm_cpuid_entry2 cpuid_entries[] = {
2693 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2694 { .function = HYPERV_CPUID_INTERFACE },
2695 { .function = HYPERV_CPUID_VERSION },
2696 { .function = HYPERV_CPUID_FEATURES },
2697 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2698 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2699 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2700 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2701 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2702 { .function = HYPERV_CPUID_NESTED_FEATURES },
2704 int i, nent = ARRAY_SIZE(cpuid_entries);
2706 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2707 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2709 if (cpuid->nent < nent)
2712 if (cpuid->nent > nent)
2715 for (i = 0; i < nent; i++) {
2716 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2719 switch (ent->function) {
2720 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2721 memcpy(signature, "Linux KVM Hv", 12);
2723 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2724 ent->ebx = signature[0];
2725 ent->ecx = signature[1];
2726 ent->edx = signature[2];
2729 case HYPERV_CPUID_INTERFACE:
2730 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2733 case HYPERV_CPUID_VERSION:
2735 * We implement some Hyper-V 2016 functions so let's use
2738 ent->eax = 0x00003839;
2739 ent->ebx = 0x000A0000;
2742 case HYPERV_CPUID_FEATURES:
2743 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2744 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2745 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2746 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2747 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2748 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2749 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2750 ent->eax |= HV_MSR_RESET_AVAILABLE;
2751 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2752 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2753 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2754 ent->eax |= HV_ACCESS_TSC_INVARIANT;
2756 ent->ebx |= HV_POST_MESSAGES;
2757 ent->ebx |= HV_SIGNAL_EVENTS;
2759 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2760 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2761 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2763 ent->ebx |= HV_DEBUGGING;
2764 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2765 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2766 ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
2769 * Direct Synthetic timers only make sense with in-kernel
2772 if (!vcpu || lapic_in_kernel(vcpu))
2773 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2777 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2778 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2779 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2780 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2781 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2782 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2784 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2785 if (!cpu_smt_possible())
2786 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2788 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2790 * Default number of spinlock retry attempts, matches
2793 ent->ebx = 0x00000FFF;
2797 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2798 /* Maximum number of virtual processors */
2799 ent->eax = KVM_MAX_VCPUS;
2801 * Maximum number of logical processors, matches
2808 case HYPERV_CPUID_NESTED_FEATURES:
2809 ent->eax = evmcs_ver;
2810 ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
2811 ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2812 ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
2815 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2816 memcpy(signature, "Linux KVM Hv", 12);
2819 ent->ebx = signature[0];
2820 ent->ecx = signature[1];
2821 ent->edx = signature[2];
2824 case HYPERV_CPUID_SYNDBG_INTERFACE:
2825 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2826 ent->eax = signature[0];
2829 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2830 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2838 if (copy_to_user(entries, cpuid_entries,
2839 nent * sizeof(struct kvm_cpuid_entry2)))