1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
28 #include <linux/cpu.h>
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
41 /* "Hv#1" signature */
42 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
44 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
46 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
49 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
51 return atomic64_read(&synic->sint[sint]);
54 static inline int synic_get_sint_vector(u64 sint_value)
56 if (sint_value & HV_SYNIC_SINT_MASKED)
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
61 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
73 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
88 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
91 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
94 if (synic_has_vector_connected(synic, vector))
95 __set_bit(vector, synic->vec_bitmap);
97 __clear_bit(vector, synic->vec_bitmap);
99 if (synic_has_vector_auto_eoi(synic, vector))
100 __set_bit(vector, synic->auto_eoi_bitmap);
102 __clear_bit(vector, synic->auto_eoi_bitmap);
105 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
108 int vector, old_vector;
111 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
112 masked = data & HV_SYNIC_SINT_MASKED;
115 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
116 * default '0x10000' value on boot and this should not #GP. We need to
117 * allow zero-initing the register from host as well.
119 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
122 * Guest may configure multiple SINTs to use the same vector, so
123 * we maintain a bitmap of vectors handled by synic, and a
124 * bitmap of vectors with auto-eoi behavior. The bitmaps are
125 * updated here, and atomically queried on fast paths.
127 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
129 atomic64_set(&synic->sint[sint], data);
131 synic_update_vector(synic, old_vector);
133 synic_update_vector(synic, vector);
135 /* Load SynIC vectors into EOI exit bitmap */
136 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
140 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
142 struct kvm_vcpu *vcpu = NULL;
145 if (vpidx >= KVM_MAX_VCPUS)
148 vcpu = kvm_get_vcpu(kvm, vpidx);
149 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
151 kvm_for_each_vcpu(i, vcpu, kvm)
152 if (kvm_hv_get_vpindex(vcpu) == vpidx)
157 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
159 struct kvm_vcpu *vcpu;
160 struct kvm_vcpu_hv_synic *synic;
162 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
163 if (!vcpu || !to_hv_vcpu(vcpu))
165 synic = to_hv_synic(vcpu);
166 return (synic->active) ? synic : NULL;
169 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
171 struct kvm *kvm = vcpu->kvm;
172 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
173 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
174 struct kvm_vcpu_hv_stimer *stimer;
177 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
179 /* Try to deliver pending Hyper-V SynIC timers messages */
180 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
181 stimer = &hv_vcpu->stimer[idx];
182 if (stimer->msg_pending && stimer->config.enable &&
183 !stimer->config.direct_mode &&
184 stimer->config.sintx == sint)
185 stimer_mark_pending(stimer, false);
188 idx = srcu_read_lock(&kvm->irq_srcu);
189 gsi = atomic_read(&synic->sint_to_gsi[sint]);
191 kvm_notify_acked_gsi(kvm, gsi);
192 srcu_read_unlock(&kvm->irq_srcu, idx);
195 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
197 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
198 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
200 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
201 hv_vcpu->exit.u.synic.msr = msr;
202 hv_vcpu->exit.u.synic.control = synic->control;
203 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
204 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
206 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
209 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
210 u32 msr, u64 data, bool host)
212 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
215 if (!synic->active && !host)
218 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
222 case HV_X64_MSR_SCONTROL:
223 synic->control = data;
225 synic_exit(synic, msr);
227 case HV_X64_MSR_SVERSION:
232 synic->version = data;
234 case HV_X64_MSR_SIEFP:
235 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
236 !synic->dont_zero_synic_pages)
237 if (kvm_clear_guest(vcpu->kvm,
238 data & PAGE_MASK, PAGE_SIZE)) {
242 synic->evt_page = data;
244 synic_exit(synic, msr);
246 case HV_X64_MSR_SIMP:
247 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
248 !synic->dont_zero_synic_pages)
249 if (kvm_clear_guest(vcpu->kvm,
250 data & PAGE_MASK, PAGE_SIZE)) {
254 synic->msg_page = data;
256 synic_exit(synic, msr);
258 case HV_X64_MSR_EOM: {
261 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
262 kvm_hv_notify_acked_sint(vcpu, i);
265 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
266 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
275 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
277 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
279 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
280 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
283 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
285 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
287 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
288 hv->hv_syndbg.control.status =
289 vcpu->run->hyperv.u.syndbg.status;
293 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
295 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
296 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
298 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
299 hv_vcpu->exit.u.syndbg.msr = msr;
300 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
301 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
302 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
303 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
304 vcpu->arch.complete_userspace_io =
305 kvm_hv_syndbg_complete_userspace;
307 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
310 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
312 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
314 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
317 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
318 to_hv_vcpu(vcpu)->vp_index, msr, data);
320 case HV_X64_MSR_SYNDBG_CONTROL:
321 syndbg->control.control = data;
323 syndbg_exit(vcpu, msr);
325 case HV_X64_MSR_SYNDBG_STATUS:
326 syndbg->control.status = data;
328 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
329 syndbg->control.send_page = data;
331 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
332 syndbg->control.recv_page = data;
334 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
335 syndbg->control.pending_page = data;
337 syndbg_exit(vcpu, msr);
339 case HV_X64_MSR_SYNDBG_OPTIONS:
340 syndbg->options = data;
349 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
351 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
353 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
357 case HV_X64_MSR_SYNDBG_CONTROL:
358 *pdata = syndbg->control.control;
360 case HV_X64_MSR_SYNDBG_STATUS:
361 *pdata = syndbg->control.status;
363 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
364 *pdata = syndbg->control.send_page;
366 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
367 *pdata = syndbg->control.recv_page;
369 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
370 *pdata = syndbg->control.pending_page;
372 case HV_X64_MSR_SYNDBG_OPTIONS:
373 *pdata = syndbg->options;
379 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
384 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
389 if (!synic->active && !host)
394 case HV_X64_MSR_SCONTROL:
395 *pdata = synic->control;
397 case HV_X64_MSR_SVERSION:
398 *pdata = synic->version;
400 case HV_X64_MSR_SIEFP:
401 *pdata = synic->evt_page;
403 case HV_X64_MSR_SIMP:
404 *pdata = synic->msg_page;
409 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
410 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
419 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
421 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
422 struct kvm_lapic_irq irq;
425 if (sint >= ARRAY_SIZE(synic->sint))
428 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
432 memset(&irq, 0, sizeof(irq));
433 irq.shorthand = APIC_DEST_SELF;
434 irq.dest_mode = APIC_DEST_PHYSICAL;
435 irq.delivery_mode = APIC_DM_FIXED;
439 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
440 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
444 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
446 struct kvm_vcpu_hv_synic *synic;
448 synic = synic_get(kvm, vpidx);
452 return synic_set_irq(synic, sint);
455 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
457 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
460 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
462 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
463 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
464 kvm_hv_notify_acked_sint(vcpu, i);
467 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
469 struct kvm_vcpu_hv_synic *synic;
471 synic = synic_get(kvm, vpidx);
475 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
478 atomic_set(&synic->sint_to_gsi[sint], gsi);
482 void kvm_hv_irq_routing_update(struct kvm *kvm)
484 struct kvm_irq_routing_table *irq_rt;
485 struct kvm_kernel_irq_routing_entry *e;
488 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
489 lockdep_is_held(&kvm->irq_lock));
491 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
492 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
493 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
494 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
495 e->hv_sint.sint, gsi);
500 static void synic_init(struct kvm_vcpu_hv_synic *synic)
504 memset(synic, 0, sizeof(*synic));
505 synic->version = HV_SYNIC_VERSION_1;
506 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
507 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
508 atomic_set(&synic->sint_to_gsi[i], -1);
512 static u64 get_time_ref_counter(struct kvm *kvm)
514 struct kvm_hv *hv = to_kvm_hv(kvm);
515 struct kvm_vcpu *vcpu;
519 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
520 * is broken, disabled or being updated.
522 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
523 return div_u64(get_kvmclock_ns(kvm), 100);
525 vcpu = kvm_get_vcpu(kvm, 0);
526 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
527 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
528 + hv->tsc_ref.tsc_offset;
531 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
534 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
536 set_bit(stimer->index,
537 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
538 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
543 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
545 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
547 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
550 hrtimer_cancel(&stimer->timer);
551 clear_bit(stimer->index,
552 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
553 stimer->msg_pending = false;
554 stimer->exp_time = 0;
557 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
559 struct kvm_vcpu_hv_stimer *stimer;
561 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
562 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
564 stimer_mark_pending(stimer, true);
566 return HRTIMER_NORESTART;
570 * stimer_start() assumptions:
571 * a) stimer->count is not equal to 0
572 * b) stimer->config has HV_STIMER_ENABLE flag
574 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
579 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
580 ktime_now = ktime_get();
582 if (stimer->config.periodic) {
583 if (stimer->exp_time) {
584 if (time_now >= stimer->exp_time) {
587 div64_u64_rem(time_now - stimer->exp_time,
588 stimer->count, &remainder);
590 time_now + (stimer->count - remainder);
593 stimer->exp_time = time_now + stimer->count;
595 trace_kvm_hv_stimer_start_periodic(
596 hv_stimer_to_vcpu(stimer)->vcpu_id,
598 time_now, stimer->exp_time);
600 hrtimer_start(&stimer->timer,
601 ktime_add_ns(ktime_now,
602 100 * (stimer->exp_time - time_now)),
606 stimer->exp_time = stimer->count;
607 if (time_now >= stimer->count) {
609 * Expire timer according to Hypervisor Top-Level Functional
610 * specification v4(15.3.1):
611 * "If a one shot is enabled and the specified count is in
612 * the past, it will expire immediately."
614 stimer_mark_pending(stimer, false);
618 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
620 time_now, stimer->count);
622 hrtimer_start(&stimer->timer,
623 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
628 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
631 union hv_stimer_config new_config = {.as_uint64 = config},
632 old_config = {.as_uint64 = stimer->config.as_uint64};
633 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
634 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
635 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
637 if (!synic->active && !host)
640 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
641 !(hv_vcpu->cpuid_cache.features_edx &
642 HV_STIMER_DIRECT_MODE_AVAILABLE)))
645 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
646 stimer->index, config, host);
648 stimer_cleanup(stimer);
649 if (old_config.enable &&
650 !new_config.direct_mode && new_config.sintx == 0)
651 new_config.enable = 0;
652 stimer->config.as_uint64 = new_config.as_uint64;
654 if (stimer->config.enable)
655 stimer_mark_pending(stimer, false);
660 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
663 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
664 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
666 if (!synic->active && !host)
669 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
670 stimer->index, count, host);
672 stimer_cleanup(stimer);
673 stimer->count = count;
674 if (stimer->count == 0)
675 stimer->config.enable = 0;
676 else if (stimer->config.auto_enable)
677 stimer->config.enable = 1;
679 if (stimer->config.enable)
680 stimer_mark_pending(stimer, false);
685 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
687 *pconfig = stimer->config.as_uint64;
691 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
693 *pcount = stimer->count;
697 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
698 struct hv_message *src_msg, bool no_retry)
700 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
701 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
703 struct hv_message_header hv_hdr;
706 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
709 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
712 * Strictly following the spec-mandated ordering would assume setting
713 * .msg_pending before checking .message_type. However, this function
714 * is only called in vcpu context so the entire update is atomic from
715 * guest POV and thus the exact order here doesn't matter.
717 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
718 msg_off + offsetof(struct hv_message,
719 header.message_type),
720 sizeof(hv_hdr.message_type));
724 if (hv_hdr.message_type != HVMSG_NONE) {
728 hv_hdr.message_flags.msg_pending = 1;
729 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
730 &hv_hdr.message_flags,
732 offsetof(struct hv_message,
733 header.message_flags),
734 sizeof(hv_hdr.message_flags));
740 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
741 sizeof(src_msg->header) +
742 src_msg->header.payload_size);
746 r = synic_set_irq(synic, sint);
754 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
756 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
757 struct hv_message *msg = &stimer->msg;
758 struct hv_timer_message_payload *payload =
759 (struct hv_timer_message_payload *)&msg->u.payload;
762 * To avoid piling up periodic ticks, don't retry message
763 * delivery for them (within "lazy" lost ticks policy).
765 bool no_retry = stimer->config.periodic;
767 payload->expiration_time = stimer->exp_time;
768 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
769 return synic_deliver_msg(to_hv_synic(vcpu),
770 stimer->config.sintx, msg,
774 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
776 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
777 struct kvm_lapic_irq irq = {
778 .delivery_mode = APIC_DM_FIXED,
779 .vector = stimer->config.apic_vector
782 if (lapic_in_kernel(vcpu))
783 return !kvm_apic_set_irq(vcpu, &irq, NULL);
787 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
789 int r, direct = stimer->config.direct_mode;
791 stimer->msg_pending = true;
793 r = stimer_send_msg(stimer);
795 r = stimer_notify_direct(stimer);
796 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
797 stimer->index, direct, r);
799 stimer->msg_pending = false;
800 if (!(stimer->config.periodic))
801 stimer->config.enable = 0;
805 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
807 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
808 struct kvm_vcpu_hv_stimer *stimer;
809 u64 time_now, exp_time;
815 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
816 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
817 stimer = &hv_vcpu->stimer[i];
818 if (stimer->config.enable) {
819 exp_time = stimer->exp_time;
823 get_time_ref_counter(vcpu->kvm);
824 if (time_now >= exp_time)
825 stimer_expiration(stimer);
828 if ((stimer->config.enable) &&
830 if (!stimer->msg_pending)
831 stimer_start(stimer);
833 stimer_cleanup(stimer);
838 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
840 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
846 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
847 stimer_cleanup(&hv_vcpu->stimer[i]);
850 vcpu->arch.hyperv = NULL;
853 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
855 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
860 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
862 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
864 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
866 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
867 struct hv_vp_assist_page *assist_page)
869 if (!kvm_hv_assist_page_enabled(vcpu))
871 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
872 assist_page, sizeof(*assist_page));
874 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
876 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
878 struct hv_message *msg = &stimer->msg;
879 struct hv_timer_message_payload *payload =
880 (struct hv_timer_message_payload *)&msg->u.payload;
882 memset(&msg->header, 0, sizeof(msg->header));
883 msg->header.message_type = HVMSG_TIMER_EXPIRED;
884 msg->header.payload_size = sizeof(*payload);
886 payload->timer_index = stimer->index;
887 payload->expiration_time = 0;
888 payload->delivery_time = 0;
891 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
893 memset(stimer, 0, sizeof(*stimer));
894 stimer->index = timer_index;
895 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
896 stimer->timer.function = stimer_timer_callback;
897 stimer_prepare_msg(stimer);
900 static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
902 struct kvm_vcpu_hv *hv_vcpu;
905 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
909 vcpu->arch.hyperv = hv_vcpu;
910 hv_vcpu->vcpu = vcpu;
912 synic_init(&hv_vcpu->synic);
914 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
915 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
916 stimer_init(&hv_vcpu->stimer[i], i);
918 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
923 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
925 struct kvm_vcpu_hv_synic *synic;
928 if (!to_hv_vcpu(vcpu)) {
929 r = kvm_hv_vcpu_init(vcpu);
934 synic = to_hv_synic(vcpu);
937 * Hyper-V SynIC auto EOI SINT's are
938 * not compatible with APICV, so request
939 * to deactivate APICV permanently.
941 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
942 synic->active = true;
943 synic->dont_zero_synic_pages = dont_zero_synic_pages;
944 synic->control = HV_SYNIC_CONTROL_ENABLE;
948 static bool kvm_hv_msr_partition_wide(u32 msr)
953 case HV_X64_MSR_GUEST_OS_ID:
954 case HV_X64_MSR_HYPERCALL:
955 case HV_X64_MSR_REFERENCE_TSC:
956 case HV_X64_MSR_TIME_REF_COUNT:
957 case HV_X64_MSR_CRASH_CTL:
958 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
959 case HV_X64_MSR_RESET:
960 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
961 case HV_X64_MSR_TSC_EMULATION_CONTROL:
962 case HV_X64_MSR_TSC_EMULATION_STATUS:
963 case HV_X64_MSR_SYNDBG_OPTIONS:
964 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
972 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
974 struct kvm_hv *hv = to_kvm_hv(kvm);
975 size_t size = ARRAY_SIZE(hv->hv_crash_param);
977 if (WARN_ON_ONCE(index >= size))
980 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
984 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
986 struct kvm_hv *hv = to_kvm_hv(kvm);
988 *pdata = hv->hv_crash_ctl;
992 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
994 struct kvm_hv *hv = to_kvm_hv(kvm);
996 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1001 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1003 struct kvm_hv *hv = to_kvm_hv(kvm);
1004 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1006 if (WARN_ON_ONCE(index >= size))
1009 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1014 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1015 * between them is possible:
1018 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1022 * nsec/100 = ticks * scale / 2^64 + offset
1024 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1025 * By dividing the kvmclock formula by 100 and equating what's left we get:
1026 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1027 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1028 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1030 * Now expand the kvmclock formula and divide by 100:
1031 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1032 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1034 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1035 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1036 * + system_time / 100
1038 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1039 * nsec/100 = ticks * scale / 2^64
1040 * - tsc_timestamp * scale / 2^64
1041 * + system_time / 100
1043 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1044 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1046 * These two equivalencies are implemented in this function.
1048 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1049 struct ms_hyperv_tsc_page *tsc_ref)
1053 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1057 * check if scale would overflow, if so we use the time ref counter
1058 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1059 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1060 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1062 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1063 if (hv_clock->tsc_to_system_mul >= max_mul)
1067 * Otherwise compute the scale and offset according to the formulas
1070 tsc_ref->tsc_scale =
1071 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1072 hv_clock->tsc_to_system_mul,
1075 tsc_ref->tsc_offset = hv_clock->system_time;
1076 do_div(tsc_ref->tsc_offset, 100);
1077 tsc_ref->tsc_offset -=
1078 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1083 * Don't touch TSC page values if the guest has opted for TSC emulation after
1084 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1085 * access emulation and Hyper-V is known to expect the values in TSC page to
1086 * stay constant before TSC access emulation is disabled from guest side
1087 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1088 * frequency and guest visible TSC value across migration (and prevent it when
1089 * TSC scaling is unsupported).
1091 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1093 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1094 hv->hv_tsc_emulation_control;
1097 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1098 struct pvclock_vcpu_time_info *hv_clock)
1100 struct kvm_hv *hv = to_kvm_hv(kvm);
1104 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1105 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1107 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1108 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1111 mutex_lock(&hv->hv_lock);
1112 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1115 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1117 * Because the TSC parameters only vary when there is a
1118 * change in the master clock, do not bother with caching.
1120 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1121 &tsc_seq, sizeof(tsc_seq))))
1124 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1125 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1128 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1133 * While we're computing and writing the parameters, force the
1134 * guest to use the time reference count MSR.
1136 hv->tsc_ref.tsc_sequence = 0;
1137 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1138 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1141 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1144 /* Ensure sequence is zero before writing the rest of the struct. */
1146 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1150 * Now switch to the TSC page mechanism by writing the sequence.
1153 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1156 /* Write the struct entirely before the non-zero sequence. */
1159 hv->tsc_ref.tsc_sequence = tsc_seq;
1160 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1161 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1164 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1168 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1170 mutex_unlock(&hv->hv_lock);
1173 void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1175 struct kvm_hv *hv = to_kvm_hv(kvm);
1179 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1180 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
1181 tsc_page_update_unsafe(hv))
1184 mutex_lock(&hv->hv_lock);
1186 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1189 /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
1190 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1191 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1193 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1195 hv->tsc_ref.tsc_sequence = 0;
1198 * Take the srcu lock as memslots will be accessed to check the gfn
1199 * cache generation against the memslots generation.
1201 idx = srcu_read_lock(&kvm->srcu);
1202 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1203 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1204 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1205 srcu_read_unlock(&kvm->srcu, idx);
1208 mutex_unlock(&hv->hv_lock);
1212 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1214 if (!hv_vcpu->enforce_cpuid)
1218 case HV_X64_MSR_GUEST_OS_ID:
1219 case HV_X64_MSR_HYPERCALL:
1220 return hv_vcpu->cpuid_cache.features_eax &
1221 HV_MSR_HYPERCALL_AVAILABLE;
1222 case HV_X64_MSR_VP_RUNTIME:
1223 return hv_vcpu->cpuid_cache.features_eax &
1224 HV_MSR_VP_RUNTIME_AVAILABLE;
1225 case HV_X64_MSR_TIME_REF_COUNT:
1226 return hv_vcpu->cpuid_cache.features_eax &
1227 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1228 case HV_X64_MSR_VP_INDEX:
1229 return hv_vcpu->cpuid_cache.features_eax &
1230 HV_MSR_VP_INDEX_AVAILABLE;
1231 case HV_X64_MSR_RESET:
1232 return hv_vcpu->cpuid_cache.features_eax &
1233 HV_MSR_RESET_AVAILABLE;
1234 case HV_X64_MSR_REFERENCE_TSC:
1235 return hv_vcpu->cpuid_cache.features_eax &
1236 HV_MSR_REFERENCE_TSC_AVAILABLE;
1237 case HV_X64_MSR_SCONTROL:
1238 case HV_X64_MSR_SVERSION:
1239 case HV_X64_MSR_SIEFP:
1240 case HV_X64_MSR_SIMP:
1241 case HV_X64_MSR_EOM:
1242 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1243 return hv_vcpu->cpuid_cache.features_eax &
1244 HV_MSR_SYNIC_AVAILABLE;
1245 case HV_X64_MSR_STIMER0_CONFIG:
1246 case HV_X64_MSR_STIMER1_CONFIG:
1247 case HV_X64_MSR_STIMER2_CONFIG:
1248 case HV_X64_MSR_STIMER3_CONFIG:
1249 case HV_X64_MSR_STIMER0_COUNT:
1250 case HV_X64_MSR_STIMER1_COUNT:
1251 case HV_X64_MSR_STIMER2_COUNT:
1252 case HV_X64_MSR_STIMER3_COUNT:
1253 return hv_vcpu->cpuid_cache.features_eax &
1254 HV_MSR_SYNTIMER_AVAILABLE;
1255 case HV_X64_MSR_EOI:
1256 case HV_X64_MSR_ICR:
1257 case HV_X64_MSR_TPR:
1258 case HV_X64_MSR_VP_ASSIST_PAGE:
1259 return hv_vcpu->cpuid_cache.features_eax &
1260 HV_MSR_APIC_ACCESS_AVAILABLE;
1262 case HV_X64_MSR_TSC_FREQUENCY:
1263 case HV_X64_MSR_APIC_FREQUENCY:
1264 return hv_vcpu->cpuid_cache.features_eax &
1265 HV_ACCESS_FREQUENCY_MSRS;
1266 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1267 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1268 case HV_X64_MSR_TSC_EMULATION_STATUS:
1269 return hv_vcpu->cpuid_cache.features_eax &
1270 HV_ACCESS_REENLIGHTENMENT;
1271 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1272 case HV_X64_MSR_CRASH_CTL:
1273 return hv_vcpu->cpuid_cache.features_edx &
1274 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1275 case HV_X64_MSR_SYNDBG_OPTIONS:
1276 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1277 return hv_vcpu->cpuid_cache.features_edx &
1278 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1286 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1289 struct kvm *kvm = vcpu->kvm;
1290 struct kvm_hv *hv = to_kvm_hv(kvm);
1292 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1296 case HV_X64_MSR_GUEST_OS_ID:
1297 hv->hv_guest_os_id = data;
1298 /* setting guest os id to zero disables hypercall page */
1299 if (!hv->hv_guest_os_id)
1300 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1302 case HV_X64_MSR_HYPERCALL: {
1307 /* if guest os id is not set hypercall should remain disabled */
1308 if (!hv->hv_guest_os_id)
1310 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1311 hv->hv_hypercall = data;
1316 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1317 * the same way Xen itself does, by setting the bit 31 of EAX
1318 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1319 * going to be clobbered on 64-bit.
1321 if (kvm_xen_hypercall_enabled(kvm)) {
1322 /* orl $0x80000000, %eax */
1323 instructions[i++] = 0x0d;
1324 instructions[i++] = 0x00;
1325 instructions[i++] = 0x00;
1326 instructions[i++] = 0x00;
1327 instructions[i++] = 0x80;
1330 /* vmcall/vmmcall */
1331 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1335 ((unsigned char *)instructions)[i++] = 0xc3;
1337 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1338 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1340 hv->hv_hypercall = data;
1343 case HV_X64_MSR_REFERENCE_TSC:
1344 hv->hv_tsc_page = data;
1345 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1347 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1349 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1350 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1352 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1355 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1356 return kvm_hv_msr_set_crash_data(kvm,
1357 msr - HV_X64_MSR_CRASH_P0,
1359 case HV_X64_MSR_CRASH_CTL:
1361 return kvm_hv_msr_set_crash_ctl(kvm, data);
1363 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1364 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1365 hv->hv_crash_param[0],
1366 hv->hv_crash_param[1],
1367 hv->hv_crash_param[2],
1368 hv->hv_crash_param[3],
1369 hv->hv_crash_param[4]);
1371 /* Send notification about crash to user space */
1372 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1375 case HV_X64_MSR_RESET:
1377 vcpu_debug(vcpu, "hyper-v reset requested\n");
1378 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1381 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1382 hv->hv_reenlightenment_control = data;
1384 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1385 hv->hv_tsc_emulation_control = data;
1387 case HV_X64_MSR_TSC_EMULATION_STATUS:
1391 hv->hv_tsc_emulation_status = data;
1393 case HV_X64_MSR_TIME_REF_COUNT:
1394 /* read-only, but still ignore it if host-initiated */
1398 case HV_X64_MSR_SYNDBG_OPTIONS:
1399 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1400 return syndbg_set_msr(vcpu, msr, data, host);
1402 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1409 /* Calculate cpu time spent by current task in 100ns units */
1410 static u64 current_task_runtime_100ns(void)
1414 task_cputime_adjusted(current, &utime, &stime);
1416 return div_u64(utime + stime, 100);
1419 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1421 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1423 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1427 case HV_X64_MSR_VP_INDEX: {
1428 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1429 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1430 u32 new_vp_index = (u32)data;
1432 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1435 if (new_vp_index == hv_vcpu->vp_index)
1439 * The VP index is initialized to vcpu_index by
1440 * kvm_hv_vcpu_postcreate so they initially match. Now the
1441 * VP index is changing, adjust num_mismatched_vp_indexes if
1442 * it now matches or no longer matches vcpu_idx.
1444 if (hv_vcpu->vp_index == vcpu_idx)
1445 atomic_inc(&hv->num_mismatched_vp_indexes);
1446 else if (new_vp_index == vcpu_idx)
1447 atomic_dec(&hv->num_mismatched_vp_indexes);
1449 hv_vcpu->vp_index = new_vp_index;
1452 case HV_X64_MSR_VP_ASSIST_PAGE: {
1456 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1457 hv_vcpu->hv_vapic = data;
1458 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1462 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1463 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1464 if (kvm_is_error_hva(addr))
1468 * Clear apic_assist portion of struct hv_vp_assist_page
1469 * only, there can be valuable data in the rest which needs
1470 * to be preserved e.g. on migration.
1472 if (__put_user(0, (u32 __user *)addr))
1474 hv_vcpu->hv_vapic = data;
1475 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1476 if (kvm_lapic_enable_pv_eoi(vcpu,
1477 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1478 sizeof(struct hv_vp_assist_page)))
1482 case HV_X64_MSR_EOI:
1483 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1484 case HV_X64_MSR_ICR:
1485 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1486 case HV_X64_MSR_TPR:
1487 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1488 case HV_X64_MSR_VP_RUNTIME:
1491 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1493 case HV_X64_MSR_SCONTROL:
1494 case HV_X64_MSR_SVERSION:
1495 case HV_X64_MSR_SIEFP:
1496 case HV_X64_MSR_SIMP:
1497 case HV_X64_MSR_EOM:
1498 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1499 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1500 case HV_X64_MSR_STIMER0_CONFIG:
1501 case HV_X64_MSR_STIMER1_CONFIG:
1502 case HV_X64_MSR_STIMER2_CONFIG:
1503 case HV_X64_MSR_STIMER3_CONFIG: {
1504 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1506 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1509 case HV_X64_MSR_STIMER0_COUNT:
1510 case HV_X64_MSR_STIMER1_COUNT:
1511 case HV_X64_MSR_STIMER2_COUNT:
1512 case HV_X64_MSR_STIMER3_COUNT: {
1513 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1515 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1518 case HV_X64_MSR_TSC_FREQUENCY:
1519 case HV_X64_MSR_APIC_FREQUENCY:
1520 /* read-only, but still ignore it if host-initiated */
1525 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1533 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1537 struct kvm *kvm = vcpu->kvm;
1538 struct kvm_hv *hv = to_kvm_hv(kvm);
1540 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1544 case HV_X64_MSR_GUEST_OS_ID:
1545 data = hv->hv_guest_os_id;
1547 case HV_X64_MSR_HYPERCALL:
1548 data = hv->hv_hypercall;
1550 case HV_X64_MSR_TIME_REF_COUNT:
1551 data = get_time_ref_counter(kvm);
1553 case HV_X64_MSR_REFERENCE_TSC:
1554 data = hv->hv_tsc_page;
1556 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1557 return kvm_hv_msr_get_crash_data(kvm,
1558 msr - HV_X64_MSR_CRASH_P0,
1560 case HV_X64_MSR_CRASH_CTL:
1561 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1562 case HV_X64_MSR_RESET:
1565 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1566 data = hv->hv_reenlightenment_control;
1568 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1569 data = hv->hv_tsc_emulation_control;
1571 case HV_X64_MSR_TSC_EMULATION_STATUS:
1572 data = hv->hv_tsc_emulation_status;
1574 case HV_X64_MSR_SYNDBG_OPTIONS:
1575 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1576 return syndbg_get_msr(vcpu, msr, pdata, host);
1578 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1586 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1590 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1592 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1596 case HV_X64_MSR_VP_INDEX:
1597 data = hv_vcpu->vp_index;
1599 case HV_X64_MSR_EOI:
1600 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1601 case HV_X64_MSR_ICR:
1602 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1603 case HV_X64_MSR_TPR:
1604 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1605 case HV_X64_MSR_VP_ASSIST_PAGE:
1606 data = hv_vcpu->hv_vapic;
1608 case HV_X64_MSR_VP_RUNTIME:
1609 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1611 case HV_X64_MSR_SCONTROL:
1612 case HV_X64_MSR_SVERSION:
1613 case HV_X64_MSR_SIEFP:
1614 case HV_X64_MSR_SIMP:
1615 case HV_X64_MSR_EOM:
1616 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1617 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1618 case HV_X64_MSR_STIMER0_CONFIG:
1619 case HV_X64_MSR_STIMER1_CONFIG:
1620 case HV_X64_MSR_STIMER2_CONFIG:
1621 case HV_X64_MSR_STIMER3_CONFIG: {
1622 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1624 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1627 case HV_X64_MSR_STIMER0_COUNT:
1628 case HV_X64_MSR_STIMER1_COUNT:
1629 case HV_X64_MSR_STIMER2_COUNT:
1630 case HV_X64_MSR_STIMER3_COUNT: {
1631 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1633 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1636 case HV_X64_MSR_TSC_FREQUENCY:
1637 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1639 case HV_X64_MSR_APIC_FREQUENCY:
1640 data = APIC_BUS_FREQUENCY;
1643 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1650 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1652 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1654 if (!host && !vcpu->arch.hyperv_enabled)
1657 if (!to_hv_vcpu(vcpu)) {
1658 if (kvm_hv_vcpu_init(vcpu))
1662 if (kvm_hv_msr_partition_wide(msr)) {
1665 mutex_lock(&hv->hv_lock);
1666 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1667 mutex_unlock(&hv->hv_lock);
1670 return kvm_hv_set_msr(vcpu, msr, data, host);
1673 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1675 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1677 if (!host && !vcpu->arch.hyperv_enabled)
1680 if (!to_hv_vcpu(vcpu)) {
1681 if (kvm_hv_vcpu_init(vcpu))
1685 if (kvm_hv_msr_partition_wide(msr)) {
1688 mutex_lock(&hv->hv_lock);
1689 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1690 mutex_unlock(&hv->hv_lock);
1693 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1696 static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1697 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1698 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1700 struct kvm_hv *hv = to_kvm_hv(kvm);
1701 struct kvm_vcpu *vcpu;
1702 int i, bank, sbank = 0;
1704 memset(vp_bitmap, 0,
1705 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1706 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1707 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1708 vp_bitmap[bank] = sparse_banks[sbank++];
1710 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1711 /* for all vcpus vp_index == vcpu_idx */
1712 return (unsigned long *)vp_bitmap;
1715 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1716 kvm_for_each_vcpu(i, vcpu, kvm) {
1717 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1718 __set_bit(i, vcpu_bitmap);
1723 struct kvm_hv_hcall {
1732 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1735 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1739 struct kvm *kvm = vcpu->kvm;
1740 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1741 struct hv_tlb_flush_ex flush_ex;
1742 struct hv_tlb_flush flush;
1743 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1744 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1745 unsigned long *vcpu_mask;
1746 u64 valid_bank_mask;
1747 u64 sparse_banks[64];
1748 int sparse_banks_len;
1753 flush.address_space = hc->ingpa;
1754 flush.flags = hc->outgpa;
1755 flush.processor_mask = sse128_lo(hc->xmm[0]);
1757 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1758 &flush, sizeof(flush))))
1759 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1762 trace_kvm_hv_flush_tlb(flush.processor_mask,
1763 flush.address_space, flush.flags);
1765 valid_bank_mask = BIT_ULL(0);
1766 sparse_banks[0] = flush.processor_mask;
1769 * Work around possible WS2012 bug: it sends hypercalls
1770 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1771 * while also expecting us to flush something and crashing if
1772 * we don't. Let's treat processor_mask == 0 same as
1773 * HV_FLUSH_ALL_PROCESSORS.
1775 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1776 flush.processor_mask == 0;
1779 flush_ex.address_space = hc->ingpa;
1780 flush_ex.flags = hc->outgpa;
1781 memcpy(&flush_ex.hv_vp_set,
1782 &hc->xmm[0], sizeof(hc->xmm[0]));
1784 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
1786 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1789 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1790 flush_ex.hv_vp_set.format,
1791 flush_ex.address_space,
1794 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1795 all_cpus = flush_ex.hv_vp_set.format !=
1796 HV_GENERIC_SET_SPARSE_4K;
1798 sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
1800 if (!sparse_banks_len && !all_cpus)
1805 if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
1806 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1807 for (i = 0; i < sparse_banks_len; i += 2) {
1808 sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
1809 sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
1812 gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
1813 hv_vp_set.bank_contents);
1814 if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
1816 sizeof(sparse_banks[0]))))
1817 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1822 cpumask_clear(&hv_vcpu->tlb_flush);
1824 vcpu_mask = all_cpus ? NULL :
1825 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1826 vp_bitmap, vcpu_bitmap);
1829 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1830 * analyze it here, flush TLB regardless of the specified address space.
1832 kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
1833 NULL, vcpu_mask, &hv_vcpu->tlb_flush);
1836 /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
1837 return (u64)HV_STATUS_SUCCESS |
1838 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1841 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1842 unsigned long *vcpu_bitmap)
1844 struct kvm_lapic_irq irq = {
1845 .delivery_mode = APIC_DM_FIXED,
1848 struct kvm_vcpu *vcpu;
1851 kvm_for_each_vcpu(i, vcpu, kvm) {
1852 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1855 /* We fail only when APIC is disabled */
1856 kvm_apic_set_irq(vcpu, &irq, NULL);
1860 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1862 struct kvm *kvm = vcpu->kvm;
1863 struct hv_send_ipi_ex send_ipi_ex;
1864 struct hv_send_ipi send_ipi;
1865 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1866 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1867 unsigned long *vcpu_mask;
1868 unsigned long valid_bank_mask;
1869 u64 sparse_banks[64];
1870 int sparse_banks_len;
1876 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
1878 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1879 sparse_banks[0] = send_ipi.cpu_mask;
1880 vector = send_ipi.vector;
1882 /* 'reserved' part of hv_send_ipi should be 0 */
1883 if (unlikely(hc->ingpa >> 32 != 0))
1884 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1885 sparse_banks[0] = hc->outgpa;
1886 vector = (u32)hc->ingpa;
1889 valid_bank_mask = BIT_ULL(0);
1891 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1893 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
1894 sizeof(send_ipi_ex))))
1895 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1897 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1898 send_ipi_ex.vp_set.format,
1899 send_ipi_ex.vp_set.valid_bank_mask);
1901 vector = send_ipi_ex.vector;
1902 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1903 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1904 sizeof(sparse_banks[0]);
1906 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1908 if (!sparse_banks_len)
1913 hc->ingpa + offsetof(struct hv_send_ipi_ex,
1914 vp_set.bank_contents),
1917 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1920 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1921 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1923 vcpu_mask = all_cpus ? NULL :
1924 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1925 vp_bitmap, vcpu_bitmap);
1927 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1930 return HV_STATUS_SUCCESS;
1933 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1935 struct kvm_cpuid_entry2 *entry;
1936 struct kvm_vcpu_hv *hv_vcpu;
1938 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1939 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
1940 vcpu->arch.hyperv_enabled = true;
1942 vcpu->arch.hyperv_enabled = false;
1946 if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
1949 hv_vcpu = to_hv_vcpu(vcpu);
1951 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
1953 hv_vcpu->cpuid_cache.features_eax = entry->eax;
1954 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
1955 hv_vcpu->cpuid_cache.features_edx = entry->edx;
1957 hv_vcpu->cpuid_cache.features_eax = 0;
1958 hv_vcpu->cpuid_cache.features_ebx = 0;
1959 hv_vcpu->cpuid_cache.features_edx = 0;
1962 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
1964 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
1965 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
1967 hv_vcpu->cpuid_cache.enlightenments_eax = 0;
1968 hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
1971 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
1973 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
1975 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
1978 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
1980 struct kvm_vcpu_hv *hv_vcpu;
1983 if (!to_hv_vcpu(vcpu)) {
1985 ret = kvm_hv_vcpu_init(vcpu);
1993 hv_vcpu = to_hv_vcpu(vcpu);
1994 hv_vcpu->enforce_cpuid = enforce;
1999 bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
2001 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
2004 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2008 longmode = is_64_bit_mode(vcpu);
2010 kvm_rax_write(vcpu, result);
2012 kvm_rdx_write(vcpu, result >> 32);
2013 kvm_rax_write(vcpu, result & 0xffffffff);
2017 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2019 trace_kvm_hv_hypercall_done(result);
2020 kvm_hv_hypercall_set_result(vcpu, result);
2021 ++vcpu->stat.hypercalls;
2022 return kvm_skip_emulated_instruction(vcpu);
2025 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2027 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2030 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2032 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2033 struct eventfd_ctx *eventfd;
2035 if (unlikely(!hc->fast)) {
2037 gpa_t gpa = hc->ingpa;
2039 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2040 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2041 return HV_STATUS_INVALID_ALIGNMENT;
2043 ret = kvm_vcpu_read_guest(vcpu, gpa,
2044 &hc->ingpa, sizeof(hc->ingpa));
2046 return HV_STATUS_INVALID_ALIGNMENT;
2050 * Per spec, bits 32-47 contain the extra "flag number". However, we
2051 * have no use for it, and in all known usecases it is zero, so just
2052 * report lookup failure if it isn't.
2054 if (hc->ingpa & 0xffff00000000ULL)
2055 return HV_STATUS_INVALID_PORT_ID;
2056 /* remaining bits are reserved-zero */
2057 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2058 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2060 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2062 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2065 return HV_STATUS_INVALID_PORT_ID;
2067 eventfd_signal(eventfd, 1);
2068 return HV_STATUS_SUCCESS;
2071 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2074 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2075 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2076 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2077 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2084 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2089 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2090 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2094 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2096 if (!hv_vcpu->enforce_cpuid)
2100 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2101 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2102 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2103 case HVCALL_POST_MESSAGE:
2104 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2105 case HVCALL_SIGNAL_EVENT:
2106 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2107 case HVCALL_POST_DEBUG_DATA:
2108 case HVCALL_RETRIEVE_DEBUG_DATA:
2109 case HVCALL_RESET_DEBUG_SESSION:
2111 * Return 'true' when SynDBG is disabled so the resulting code
2112 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2114 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2115 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2116 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2117 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2118 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2119 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2122 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2123 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2124 return hv_vcpu->cpuid_cache.enlightenments_eax &
2125 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2126 case HVCALL_SEND_IPI_EX:
2127 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2128 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2131 case HVCALL_SEND_IPI:
2132 return hv_vcpu->cpuid_cache.enlightenments_eax &
2133 HV_X64_CLUSTER_IPI_RECOMMENDED;
2141 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2143 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2144 struct kvm_hv_hcall hc;
2145 u64 ret = HV_STATUS_SUCCESS;
2148 * hypercall generates UD from non zero cpl and real mode
2151 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2152 kvm_queue_exception(vcpu, UD_VECTOR);
2156 #ifdef CONFIG_X86_64
2157 if (is_64_bit_mode(vcpu)) {
2158 hc.param = kvm_rcx_read(vcpu);
2159 hc.ingpa = kvm_rdx_read(vcpu);
2160 hc.outgpa = kvm_r8_read(vcpu);
2164 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2165 (kvm_rax_read(vcpu) & 0xffffffff);
2166 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2167 (kvm_rcx_read(vcpu) & 0xffffffff);
2168 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2169 (kvm_rsi_read(vcpu) & 0xffffffff);
2172 hc.code = hc.param & 0xffff;
2173 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2174 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2175 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2176 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2178 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
2179 hc.ingpa, hc.outgpa);
2181 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2182 ret = HV_STATUS_ACCESS_DENIED;
2183 goto hypercall_complete;
2186 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2187 if (unlikely(hv_vcpu->enforce_cpuid &&
2188 !(hv_vcpu->cpuid_cache.features_edx &
2189 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2190 kvm_queue_exception(vcpu, UD_VECTOR);
2194 kvm_hv_hypercall_read_xmm(&hc);
2198 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2199 if (unlikely(hc.rep)) {
2200 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2203 kvm_vcpu_on_spin(vcpu, true);
2205 case HVCALL_SIGNAL_EVENT:
2206 if (unlikely(hc.rep)) {
2207 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2210 ret = kvm_hvcall_signal_event(vcpu, &hc);
2211 if (ret != HV_STATUS_INVALID_PORT_ID)
2213 fallthrough; /* maybe userspace knows this conn_id */
2214 case HVCALL_POST_MESSAGE:
2215 /* don't bother userspace if it has no way to handle it */
2216 if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
2217 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2220 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2221 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2222 vcpu->run->hyperv.u.hcall.input = hc.param;
2223 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2224 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2225 vcpu->arch.complete_userspace_io =
2226 kvm_hv_hypercall_complete_userspace;
2228 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2229 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2230 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2233 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2235 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2236 if (unlikely(hc.rep)) {
2237 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2240 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2242 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2243 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2244 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2247 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2249 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2250 if (unlikely(hc.rep)) {
2251 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2254 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2256 case HVCALL_SEND_IPI:
2257 if (unlikely(hc.rep)) {
2258 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2261 ret = kvm_hv_send_ipi(vcpu, &hc, false);
2263 case HVCALL_SEND_IPI_EX:
2264 if (unlikely(hc.fast || hc.rep)) {
2265 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2268 ret = kvm_hv_send_ipi(vcpu, &hc, true);
2270 case HVCALL_POST_DEBUG_DATA:
2271 case HVCALL_RETRIEVE_DEBUG_DATA:
2272 if (unlikely(hc.fast)) {
2273 ret = HV_STATUS_INVALID_PARAMETER;
2277 case HVCALL_RESET_DEBUG_SESSION: {
2278 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2280 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2281 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2285 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2286 ret = HV_STATUS_OPERATION_DENIED;
2289 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2290 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2291 vcpu->run->hyperv.u.hcall.input = hc.param;
2292 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2293 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2294 vcpu->arch.complete_userspace_io =
2295 kvm_hv_hypercall_complete_userspace;
2299 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2304 return kvm_hv_hypercall_complete(vcpu, ret);
2307 void kvm_hv_init_vm(struct kvm *kvm)
2309 struct kvm_hv *hv = to_kvm_hv(kvm);
2311 mutex_init(&hv->hv_lock);
2312 idr_init(&hv->conn_to_evt);
2315 void kvm_hv_destroy_vm(struct kvm *kvm)
2317 struct kvm_hv *hv = to_kvm_hv(kvm);
2318 struct eventfd_ctx *eventfd;
2321 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2322 eventfd_ctx_put(eventfd);
2323 idr_destroy(&hv->conn_to_evt);
2326 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2328 struct kvm_hv *hv = to_kvm_hv(kvm);
2329 struct eventfd_ctx *eventfd;
2332 eventfd = eventfd_ctx_fdget(fd);
2333 if (IS_ERR(eventfd))
2334 return PTR_ERR(eventfd);
2336 mutex_lock(&hv->hv_lock);
2337 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2338 GFP_KERNEL_ACCOUNT);
2339 mutex_unlock(&hv->hv_lock);
2346 eventfd_ctx_put(eventfd);
2350 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2352 struct kvm_hv *hv = to_kvm_hv(kvm);
2353 struct eventfd_ctx *eventfd;
2355 mutex_lock(&hv->hv_lock);
2356 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2357 mutex_unlock(&hv->hv_lock);
2362 synchronize_srcu(&kvm->srcu);
2363 eventfd_ctx_put(eventfd);
2367 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2369 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2370 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2373 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2374 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2375 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2378 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2379 struct kvm_cpuid_entry2 __user *entries)
2381 uint16_t evmcs_ver = 0;
2382 struct kvm_cpuid_entry2 cpuid_entries[] = {
2383 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2384 { .function = HYPERV_CPUID_INTERFACE },
2385 { .function = HYPERV_CPUID_VERSION },
2386 { .function = HYPERV_CPUID_FEATURES },
2387 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2388 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2389 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2390 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2391 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2392 { .function = HYPERV_CPUID_NESTED_FEATURES },
2394 int i, nent = ARRAY_SIZE(cpuid_entries);
2396 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2397 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2399 /* Skip NESTED_FEATURES if eVMCS is not supported */
2403 if (cpuid->nent < nent)
2406 if (cpuid->nent > nent)
2409 for (i = 0; i < nent; i++) {
2410 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2413 switch (ent->function) {
2414 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2415 memcpy(signature, "Linux KVM Hv", 12);
2417 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2418 ent->ebx = signature[0];
2419 ent->ecx = signature[1];
2420 ent->edx = signature[2];
2423 case HYPERV_CPUID_INTERFACE:
2424 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2427 case HYPERV_CPUID_VERSION:
2429 * We implement some Hyper-V 2016 functions so let's use
2432 ent->eax = 0x00003839;
2433 ent->ebx = 0x000A0000;
2436 case HYPERV_CPUID_FEATURES:
2437 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2438 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2439 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2440 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2441 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2442 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2443 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2444 ent->eax |= HV_MSR_RESET_AVAILABLE;
2445 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2446 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2447 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2449 ent->ebx |= HV_POST_MESSAGES;
2450 ent->ebx |= HV_SIGNAL_EVENTS;
2452 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2453 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2454 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2456 ent->ebx |= HV_DEBUGGING;
2457 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2458 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2461 * Direct Synthetic timers only make sense with in-kernel
2464 if (!vcpu || lapic_in_kernel(vcpu))
2465 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2469 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2470 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2471 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2472 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2473 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2474 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2476 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2477 if (!cpu_smt_possible())
2478 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2480 * Default number of spinlock retry attempts, matches
2483 ent->ebx = 0x00000FFF;
2487 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2488 /* Maximum number of virtual processors */
2489 ent->eax = KVM_MAX_VCPUS;
2491 * Maximum number of logical processors, matches
2498 case HYPERV_CPUID_NESTED_FEATURES:
2499 ent->eax = evmcs_ver;
2503 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2504 memcpy(signature, "Linux KVM Hv", 12);
2507 ent->ebx = signature[0];
2508 ent->ecx = signature[1];
2509 ent->edx = signature[2];
2512 case HYPERV_CPUID_SYNDBG_INTERFACE:
2513 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2514 ent->eax = signature[0];
2517 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2518 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2526 if (copy_to_user(entries, cpuid_entries,
2527 nent * sizeof(struct kvm_cpuid_entry2)))