2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
39 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
41 return atomic64_read(&synic->sint[sint]);
44 static inline int synic_get_sint_vector(u64 sint_value)
46 if (sint_value & HV_SYNIC_SINT_MASKED)
48 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
51 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
56 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
57 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
63 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
69 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
70 sint_value = synic_read_sint(synic, i);
71 if (synic_get_sint_vector(sint_value) == vector &&
72 sint_value & HV_SYNIC_SINT_AUTO_EOI)
78 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
81 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
84 if (synic_has_vector_connected(synic, vector))
85 __set_bit(vector, synic->vec_bitmap);
87 __clear_bit(vector, synic->vec_bitmap);
89 if (synic_has_vector_auto_eoi(synic, vector))
90 __set_bit(vector, synic->auto_eoi_bitmap);
92 __clear_bit(vector, synic->auto_eoi_bitmap);
95 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
98 int vector, old_vector;
101 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
102 masked = data & HV_SYNIC_SINT_MASKED;
105 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
106 * default '0x10000' value on boot and this should not #GP. We need to
107 * allow zero-initing the register from host as well.
109 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
112 * Guest may configure multiple SINTs to use the same vector, so
113 * we maintain a bitmap of vectors handled by synic, and a
114 * bitmap of vectors with auto-eoi behavior. The bitmaps are
115 * updated here, and atomically queried on fast paths.
117 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
119 atomic64_set(&synic->sint[sint], data);
121 synic_update_vector(synic, old_vector);
123 synic_update_vector(synic, vector);
125 /* Load SynIC vectors into EOI exit bitmap */
126 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
130 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
132 struct kvm_vcpu *vcpu = NULL;
135 if (vpidx >= KVM_MAX_VCPUS)
138 vcpu = kvm_get_vcpu(kvm, vpidx);
139 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
141 kvm_for_each_vcpu(i, vcpu, kvm)
142 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
147 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
149 struct kvm_vcpu *vcpu;
150 struct kvm_vcpu_hv_synic *synic;
152 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
155 synic = vcpu_to_synic(vcpu);
156 return (synic->active) ? synic : NULL;
159 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
162 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
165 struct hv_message *msg;
166 struct hv_message_page *msg_page;
168 gpa = synic->msg_page & PAGE_MASK;
169 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
170 if (is_error_page(page)) {
171 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
175 msg_page = kmap_atomic(page);
177 msg = &msg_page->sint_message[sint];
178 msg->header.message_flags.msg_pending = 0;
180 kunmap_atomic(msg_page);
181 kvm_release_page_dirty(page);
182 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
185 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
187 struct kvm *kvm = vcpu->kvm;
188 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
189 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
190 struct kvm_vcpu_hv_stimer *stimer;
191 int gsi, idx, stimers_pending;
193 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
195 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
196 synic_clear_sint_msg_pending(synic, sint);
198 /* Try to deliver pending Hyper-V SynIC timers messages */
200 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
201 stimer = &hv_vcpu->stimer[idx];
202 if (stimer->msg_pending &&
203 (stimer->config & HV_STIMER_ENABLE) &&
204 HV_STIMER_SINT(stimer->config) == sint) {
205 set_bit(stimer->index,
206 hv_vcpu->stimer_pending_bitmap);
211 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
213 idx = srcu_read_lock(&kvm->irq_srcu);
214 gsi = atomic_read(&synic->sint_to_gsi[sint]);
216 kvm_notify_acked_gsi(kvm, gsi);
217 srcu_read_unlock(&kvm->irq_srcu, idx);
220 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
222 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
223 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
225 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
226 hv_vcpu->exit.u.synic.msr = msr;
227 hv_vcpu->exit.u.synic.control = synic->control;
228 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
229 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
231 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
234 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
235 u32 msr, u64 data, bool host)
237 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
240 if (!synic->active && !host)
243 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
247 case HV_X64_MSR_SCONTROL:
248 synic->control = data;
250 synic_exit(synic, msr);
252 case HV_X64_MSR_SVERSION:
257 synic->version = data;
259 case HV_X64_MSR_SIEFP:
260 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
261 !synic->dont_zero_synic_pages)
262 if (kvm_clear_guest(vcpu->kvm,
263 data & PAGE_MASK, PAGE_SIZE)) {
267 synic->evt_page = data;
269 synic_exit(synic, msr);
271 case HV_X64_MSR_SIMP:
272 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
273 !synic->dont_zero_synic_pages)
274 if (kvm_clear_guest(vcpu->kvm,
275 data & PAGE_MASK, PAGE_SIZE)) {
279 synic->msg_page = data;
281 synic_exit(synic, msr);
283 case HV_X64_MSR_EOM: {
286 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
287 kvm_hv_notify_acked_sint(vcpu, i);
290 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
291 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
300 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
305 if (!synic->active && !host)
310 case HV_X64_MSR_SCONTROL:
311 *pdata = synic->control;
313 case HV_X64_MSR_SVERSION:
314 *pdata = synic->version;
316 case HV_X64_MSR_SIEFP:
317 *pdata = synic->evt_page;
319 case HV_X64_MSR_SIMP:
320 *pdata = synic->msg_page;
325 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
326 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
335 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
337 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
338 struct kvm_lapic_irq irq;
341 if (sint >= ARRAY_SIZE(synic->sint))
344 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
348 memset(&irq, 0, sizeof(irq));
349 irq.shorthand = APIC_DEST_SELF;
350 irq.dest_mode = APIC_DEST_PHYSICAL;
351 irq.delivery_mode = APIC_DM_FIXED;
355 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
356 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
360 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
362 struct kvm_vcpu_hv_synic *synic;
364 synic = synic_get(kvm, vpidx);
368 return synic_set_irq(synic, sint);
371 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
373 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
376 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
378 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
379 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
380 kvm_hv_notify_acked_sint(vcpu, i);
383 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
385 struct kvm_vcpu_hv_synic *synic;
387 synic = synic_get(kvm, vpidx);
391 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
394 atomic_set(&synic->sint_to_gsi[sint], gsi);
398 void kvm_hv_irq_routing_update(struct kvm *kvm)
400 struct kvm_irq_routing_table *irq_rt;
401 struct kvm_kernel_irq_routing_entry *e;
404 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
405 lockdep_is_held(&kvm->irq_lock));
407 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
408 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
409 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
410 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
411 e->hv_sint.sint, gsi);
416 static void synic_init(struct kvm_vcpu_hv_synic *synic)
420 memset(synic, 0, sizeof(*synic));
421 synic->version = HV_SYNIC_VERSION_1;
422 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
423 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
424 atomic_set(&synic->sint_to_gsi[i], -1);
428 static u64 get_time_ref_counter(struct kvm *kvm)
430 struct kvm_hv *hv = &kvm->arch.hyperv;
431 struct kvm_vcpu *vcpu;
435 * The guest has not set up the TSC page or the clock isn't
436 * stable, fall back to get_kvmclock_ns.
438 if (!hv->tsc_ref.tsc_sequence)
439 return div_u64(get_kvmclock_ns(kvm), 100);
441 vcpu = kvm_get_vcpu(kvm, 0);
442 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
443 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
444 + hv->tsc_ref.tsc_offset;
447 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
450 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
452 set_bit(stimer->index,
453 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
454 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
459 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
461 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
463 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
466 hrtimer_cancel(&stimer->timer);
467 clear_bit(stimer->index,
468 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
469 stimer->msg_pending = false;
470 stimer->exp_time = 0;
473 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
475 struct kvm_vcpu_hv_stimer *stimer;
477 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
478 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
480 stimer_mark_pending(stimer, true);
482 return HRTIMER_NORESTART;
486 * stimer_start() assumptions:
487 * a) stimer->count is not equal to 0
488 * b) stimer->config has HV_STIMER_ENABLE flag
490 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
495 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
496 ktime_now = ktime_get();
498 if (stimer->config & HV_STIMER_PERIODIC) {
499 if (stimer->exp_time) {
500 if (time_now >= stimer->exp_time) {
503 div64_u64_rem(time_now - stimer->exp_time,
504 stimer->count, &remainder);
506 time_now + (stimer->count - remainder);
509 stimer->exp_time = time_now + stimer->count;
511 trace_kvm_hv_stimer_start_periodic(
512 stimer_to_vcpu(stimer)->vcpu_id,
514 time_now, stimer->exp_time);
516 hrtimer_start(&stimer->timer,
517 ktime_add_ns(ktime_now,
518 100 * (stimer->exp_time - time_now)),
522 stimer->exp_time = stimer->count;
523 if (time_now >= stimer->count) {
525 * Expire timer according to Hypervisor Top-Level Functional
526 * specification v4(15.3.1):
527 * "If a one shot is enabled and the specified count is in
528 * the past, it will expire immediately."
530 stimer_mark_pending(stimer, false);
534 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
536 time_now, stimer->count);
538 hrtimer_start(&stimer->timer,
539 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
544 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
547 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
548 stimer->index, config, host);
550 stimer_cleanup(stimer);
551 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
552 config &= ~HV_STIMER_ENABLE;
553 stimer->config = config;
554 stimer_mark_pending(stimer, false);
558 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
561 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
562 stimer->index, count, host);
564 stimer_cleanup(stimer);
565 stimer->count = count;
566 if (stimer->count == 0)
567 stimer->config &= ~HV_STIMER_ENABLE;
568 else if (stimer->config & HV_STIMER_AUTOENABLE)
569 stimer->config |= HV_STIMER_ENABLE;
570 stimer_mark_pending(stimer, false);
574 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
576 *pconfig = stimer->config;
580 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
582 *pcount = stimer->count;
586 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
587 struct hv_message *src_msg)
589 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
592 struct hv_message *dst_msg;
594 struct hv_message_page *msg_page;
596 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
599 gpa = synic->msg_page & PAGE_MASK;
600 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
601 if (is_error_page(page))
604 msg_page = kmap_atomic(page);
605 dst_msg = &msg_page->sint_message[sint];
606 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
607 src_msg->header.message_type) != HVMSG_NONE) {
608 dst_msg->header.message_flags.msg_pending = 1;
611 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
612 src_msg->header.payload_size);
613 dst_msg->header.message_type = src_msg->header.message_type;
614 dst_msg->header.payload_size = src_msg->header.payload_size;
615 r = synic_set_irq(synic, sint);
621 kunmap_atomic(msg_page);
622 kvm_release_page_dirty(page);
623 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
627 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
629 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
630 struct hv_message *msg = &stimer->msg;
631 struct hv_timer_message_payload *payload =
632 (struct hv_timer_message_payload *)&msg->u.payload;
634 payload->expiration_time = stimer->exp_time;
635 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
636 return synic_deliver_msg(vcpu_to_synic(vcpu),
637 HV_STIMER_SINT(stimer->config), msg);
640 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
644 stimer->msg_pending = true;
645 r = stimer_send_msg(stimer);
646 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
649 stimer->msg_pending = false;
650 if (!(stimer->config & HV_STIMER_PERIODIC))
651 stimer->config &= ~HV_STIMER_ENABLE;
655 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
657 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
658 struct kvm_vcpu_hv_stimer *stimer;
659 u64 time_now, exp_time;
662 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
663 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
664 stimer = &hv_vcpu->stimer[i];
665 if (stimer->config & HV_STIMER_ENABLE) {
666 exp_time = stimer->exp_time;
670 get_time_ref_counter(vcpu->kvm);
671 if (time_now >= exp_time)
672 stimer_expiration(stimer);
675 if ((stimer->config & HV_STIMER_ENABLE) &&
677 if (!stimer->msg_pending)
678 stimer_start(stimer);
680 stimer_cleanup(stimer);
685 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
687 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
690 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
691 stimer_cleanup(&hv_vcpu->stimer[i]);
694 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
696 struct hv_message *msg = &stimer->msg;
697 struct hv_timer_message_payload *payload =
698 (struct hv_timer_message_payload *)&msg->u.payload;
700 memset(&msg->header, 0, sizeof(msg->header));
701 msg->header.message_type = HVMSG_TIMER_EXPIRED;
702 msg->header.payload_size = sizeof(*payload);
704 payload->timer_index = stimer->index;
705 payload->expiration_time = 0;
706 payload->delivery_time = 0;
709 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
711 memset(stimer, 0, sizeof(*stimer));
712 stimer->index = timer_index;
713 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
714 stimer->timer.function = stimer_timer_callback;
715 stimer_prepare_msg(stimer);
718 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
720 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
723 synic_init(&hv_vcpu->synic);
725 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
726 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
727 stimer_init(&hv_vcpu->stimer[i], i);
730 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
732 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
734 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
737 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
739 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
742 * Hyper-V SynIC auto EOI SINT's are
743 * not compatible with APICV, so deactivate APICV
745 kvm_vcpu_deactivate_apicv(vcpu);
746 synic->active = true;
747 synic->dont_zero_synic_pages = dont_zero_synic_pages;
751 static bool kvm_hv_msr_partition_wide(u32 msr)
756 case HV_X64_MSR_GUEST_OS_ID:
757 case HV_X64_MSR_HYPERCALL:
758 case HV_X64_MSR_REFERENCE_TSC:
759 case HV_X64_MSR_TIME_REF_COUNT:
760 case HV_X64_MSR_CRASH_CTL:
761 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
762 case HV_X64_MSR_RESET:
763 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
764 case HV_X64_MSR_TSC_EMULATION_CONTROL:
765 case HV_X64_MSR_TSC_EMULATION_STATUS:
773 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
774 u32 index, u64 *pdata)
776 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
778 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
781 *pdata = hv->hv_crash_param[index];
785 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
787 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
789 *pdata = hv->hv_crash_ctl;
793 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
795 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
798 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
800 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
802 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
803 hv->hv_crash_param[0],
804 hv->hv_crash_param[1],
805 hv->hv_crash_param[2],
806 hv->hv_crash_param[3],
807 hv->hv_crash_param[4]);
809 /* Send notification about crash to user space */
810 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
816 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
819 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
821 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
824 hv->hv_crash_param[index] = data;
829 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
830 * between them is possible:
833 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
837 * nsec/100 = ticks * scale / 2^64 + offset
839 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
840 * By dividing the kvmclock formula by 100 and equating what's left we get:
841 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
842 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
843 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
845 * Now expand the kvmclock formula and divide by 100:
846 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
847 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
849 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
850 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
851 * + system_time / 100
853 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
854 * nsec/100 = ticks * scale / 2^64
855 * - tsc_timestamp * scale / 2^64
856 * + system_time / 100
858 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
859 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
861 * These two equivalencies are implemented in this function.
863 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
864 HV_REFERENCE_TSC_PAGE *tsc_ref)
868 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
872 * check if scale would overflow, if so we use the time ref counter
873 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
874 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
875 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
877 max_mul = 100ull << (32 - hv_clock->tsc_shift);
878 if (hv_clock->tsc_to_system_mul >= max_mul)
882 * Otherwise compute the scale and offset according to the formulas
886 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
887 hv_clock->tsc_to_system_mul,
890 tsc_ref->tsc_offset = hv_clock->system_time;
891 do_div(tsc_ref->tsc_offset, 100);
892 tsc_ref->tsc_offset -=
893 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
897 void kvm_hv_setup_tsc_page(struct kvm *kvm,
898 struct pvclock_vcpu_time_info *hv_clock)
900 struct kvm_hv *hv = &kvm->arch.hyperv;
904 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
905 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
907 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
910 mutex_lock(&kvm->arch.hyperv.hv_lock);
911 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
914 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
916 * Because the TSC parameters only vary when there is a
917 * change in the master clock, do not bother with caching.
919 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
920 &tsc_seq, sizeof(tsc_seq))))
924 * While we're computing and writing the parameters, force the
925 * guest to use the time reference count MSR.
927 hv->tsc_ref.tsc_sequence = 0;
928 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
929 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
932 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
935 /* Ensure sequence is zero before writing the rest of the struct. */
937 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
941 * Now switch to the TSC page mechanism by writing the sequence.
944 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
947 /* Write the struct entirely before the non-zero sequence. */
950 hv->tsc_ref.tsc_sequence = tsc_seq;
951 kvm_write_guest(kvm, gfn_to_gpa(gfn),
952 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
954 mutex_unlock(&kvm->arch.hyperv.hv_lock);
957 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
960 struct kvm *kvm = vcpu->kvm;
961 struct kvm_hv *hv = &kvm->arch.hyperv;
964 case HV_X64_MSR_GUEST_OS_ID:
965 hv->hv_guest_os_id = data;
966 /* setting guest os id to zero disables hypercall page */
967 if (!hv->hv_guest_os_id)
968 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
970 case HV_X64_MSR_HYPERCALL: {
975 /* if guest os id is not set hypercall should remain disabled */
976 if (!hv->hv_guest_os_id)
978 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
979 hv->hv_hypercall = data;
982 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
983 addr = gfn_to_hva(kvm, gfn);
984 if (kvm_is_error_hva(addr))
986 kvm_x86_ops->patch_hypercall(vcpu, instructions);
987 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
988 if (__copy_to_user((void __user *)addr, instructions, 4))
990 hv->hv_hypercall = data;
991 mark_page_dirty(kvm, gfn);
994 case HV_X64_MSR_REFERENCE_TSC:
995 hv->hv_tsc_page = data;
996 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
997 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
999 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1000 return kvm_hv_msr_set_crash_data(vcpu,
1001 msr - HV_X64_MSR_CRASH_P0,
1003 case HV_X64_MSR_CRASH_CTL:
1004 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1005 case HV_X64_MSR_RESET:
1007 vcpu_debug(vcpu, "hyper-v reset requested\n");
1008 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1011 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1012 hv->hv_reenlightenment_control = data;
1014 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1015 hv->hv_tsc_emulation_control = data;
1017 case HV_X64_MSR_TSC_EMULATION_STATUS:
1018 hv->hv_tsc_emulation_status = data;
1020 case HV_X64_MSR_TIME_REF_COUNT:
1021 /* read-only, but still ignore it if host-initiated */
1026 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1033 /* Calculate cpu time spent by current task in 100ns units */
1034 static u64 current_task_runtime_100ns(void)
1038 task_cputime_adjusted(current, &utime, &stime);
1040 return div_u64(utime + stime, 100);
1043 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1045 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1048 case HV_X64_MSR_VP_INDEX:
1049 if (!host || (u32)data >= KVM_MAX_VCPUS)
1051 hv->vp_index = (u32)data;
1053 case HV_X64_MSR_VP_ASSIST_PAGE: {
1057 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1058 hv->hv_vapic = data;
1059 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1063 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1064 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1065 if (kvm_is_error_hva(addr))
1067 if (__clear_user((void __user *)addr, PAGE_SIZE))
1069 hv->hv_vapic = data;
1070 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1071 if (kvm_lapic_enable_pv_eoi(vcpu,
1072 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1076 case HV_X64_MSR_EOI:
1077 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1078 case HV_X64_MSR_ICR:
1079 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1080 case HV_X64_MSR_TPR:
1081 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1082 case HV_X64_MSR_VP_RUNTIME:
1085 hv->runtime_offset = data - current_task_runtime_100ns();
1087 case HV_X64_MSR_SCONTROL:
1088 case HV_X64_MSR_SVERSION:
1089 case HV_X64_MSR_SIEFP:
1090 case HV_X64_MSR_SIMP:
1091 case HV_X64_MSR_EOM:
1092 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1093 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1094 case HV_X64_MSR_STIMER0_CONFIG:
1095 case HV_X64_MSR_STIMER1_CONFIG:
1096 case HV_X64_MSR_STIMER2_CONFIG:
1097 case HV_X64_MSR_STIMER3_CONFIG: {
1098 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1100 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1103 case HV_X64_MSR_STIMER0_COUNT:
1104 case HV_X64_MSR_STIMER1_COUNT:
1105 case HV_X64_MSR_STIMER2_COUNT:
1106 case HV_X64_MSR_STIMER3_COUNT: {
1107 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1109 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1112 case HV_X64_MSR_TSC_FREQUENCY:
1113 case HV_X64_MSR_APIC_FREQUENCY:
1114 /* read-only, but still ignore it if host-initiated */
1119 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1127 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1130 struct kvm *kvm = vcpu->kvm;
1131 struct kvm_hv *hv = &kvm->arch.hyperv;
1134 case HV_X64_MSR_GUEST_OS_ID:
1135 data = hv->hv_guest_os_id;
1137 case HV_X64_MSR_HYPERCALL:
1138 data = hv->hv_hypercall;
1140 case HV_X64_MSR_TIME_REF_COUNT:
1141 data = get_time_ref_counter(kvm);
1143 case HV_X64_MSR_REFERENCE_TSC:
1144 data = hv->hv_tsc_page;
1146 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1147 return kvm_hv_msr_get_crash_data(vcpu,
1148 msr - HV_X64_MSR_CRASH_P0,
1150 case HV_X64_MSR_CRASH_CTL:
1151 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1152 case HV_X64_MSR_RESET:
1155 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1156 data = hv->hv_reenlightenment_control;
1158 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1159 data = hv->hv_tsc_emulation_control;
1161 case HV_X64_MSR_TSC_EMULATION_STATUS:
1162 data = hv->hv_tsc_emulation_status;
1165 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1173 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1177 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1180 case HV_X64_MSR_VP_INDEX:
1181 data = hv->vp_index;
1183 case HV_X64_MSR_EOI:
1184 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1185 case HV_X64_MSR_ICR:
1186 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1187 case HV_X64_MSR_TPR:
1188 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1189 case HV_X64_MSR_VP_ASSIST_PAGE:
1190 data = hv->hv_vapic;
1192 case HV_X64_MSR_VP_RUNTIME:
1193 data = current_task_runtime_100ns() + hv->runtime_offset;
1195 case HV_X64_MSR_SCONTROL:
1196 case HV_X64_MSR_SVERSION:
1197 case HV_X64_MSR_SIEFP:
1198 case HV_X64_MSR_SIMP:
1199 case HV_X64_MSR_EOM:
1200 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1201 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1202 case HV_X64_MSR_STIMER0_CONFIG:
1203 case HV_X64_MSR_STIMER1_CONFIG:
1204 case HV_X64_MSR_STIMER2_CONFIG:
1205 case HV_X64_MSR_STIMER3_CONFIG: {
1206 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1208 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1211 case HV_X64_MSR_STIMER0_COUNT:
1212 case HV_X64_MSR_STIMER1_COUNT:
1213 case HV_X64_MSR_STIMER2_COUNT:
1214 case HV_X64_MSR_STIMER3_COUNT: {
1215 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1217 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1220 case HV_X64_MSR_TSC_FREQUENCY:
1221 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1223 case HV_X64_MSR_APIC_FREQUENCY:
1224 data = APIC_BUS_FREQUENCY;
1227 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1234 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1236 if (kvm_hv_msr_partition_wide(msr)) {
1239 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1240 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1241 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1244 return kvm_hv_set_msr(vcpu, msr, data, host);
1247 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1249 if (kvm_hv_msr_partition_wide(msr)) {
1252 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1253 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1254 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1257 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1260 static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
1264 if (!(valid_bank_mask & BIT_ULL(bank_no)))
1267 for (j = 0; j < bank_no; j++)
1268 if (valid_bank_mask & BIT_ULL(j))
1274 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1275 u16 rep_cnt, bool ex)
1277 struct kvm *kvm = current_vcpu->kvm;
1278 struct kvm_vcpu_hv *hv_current = ¤t_vcpu->arch.hyperv;
1279 struct hv_tlb_flush_ex flush_ex;
1280 struct hv_tlb_flush flush;
1281 struct kvm_vcpu *vcpu;
1282 unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
1283 unsigned long valid_bank_mask = 0;
1284 u64 sparse_banks[64];
1285 int sparse_banks_len, i;
1289 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1290 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1292 trace_kvm_hv_flush_tlb(flush.processor_mask,
1293 flush.address_space, flush.flags);
1295 sparse_banks[0] = flush.processor_mask;
1296 all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
1298 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1300 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1302 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1303 flush_ex.hv_vp_set.format,
1304 flush_ex.address_space,
1307 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1308 all_cpus = flush_ex.hv_vp_set.format !=
1309 HV_GENERIC_SET_SPARSE_4K;
1311 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1312 sizeof(sparse_banks[0]);
1314 if (!sparse_banks_len && !all_cpus)
1319 ingpa + offsetof(struct hv_tlb_flush_ex,
1320 hv_vp_set.bank_contents),
1323 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1326 cpumask_clear(&hv_current->tlb_lush);
1328 kvm_for_each_vcpu(i, vcpu, kvm) {
1329 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1330 int bank = hv->vp_index / 64, sbank = 0;
1333 /* Banks >64 can't be represented */
1337 /* Non-ex hypercalls can only address first 64 vCPUs */
1343 * Check is the bank of this vCPU is in sparse
1344 * set and get the sparse bank number.
1346 sbank = get_sparse_bank_no(valid_bank_mask,
1353 if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
1358 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
1359 * can't analyze it here, flush TLB regardless of the specified
1362 __set_bit(i, vcpu_bitmap);
1365 kvm_make_vcpus_request_mask(kvm,
1366 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1367 vcpu_bitmap, &hv_current->tlb_lush);
1370 /* We always do full TLB flush, set rep_done = rep_cnt. */
1371 return (u64)HV_STATUS_SUCCESS |
1372 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1375 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1377 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1380 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1384 longmode = is_64_bit_mode(vcpu);
1386 kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1388 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1389 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1393 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1395 kvm_hv_hypercall_set_result(vcpu, result);
1396 ++vcpu->stat.hypercalls;
1397 return kvm_skip_emulated_instruction(vcpu);
1400 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1402 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1405 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1407 struct eventfd_ctx *eventfd;
1409 if (unlikely(!fast)) {
1413 if ((gpa & (__alignof__(param) - 1)) ||
1414 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1415 return HV_STATUS_INVALID_ALIGNMENT;
1417 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1419 return HV_STATUS_INVALID_ALIGNMENT;
1423 * Per spec, bits 32-47 contain the extra "flag number". However, we
1424 * have no use for it, and in all known usecases it is zero, so just
1425 * report lookup failure if it isn't.
1427 if (param & 0xffff00000000ULL)
1428 return HV_STATUS_INVALID_PORT_ID;
1429 /* remaining bits are reserved-zero */
1430 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1431 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1433 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1435 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1438 return HV_STATUS_INVALID_PORT_ID;
1440 eventfd_signal(eventfd, 1);
1441 return HV_STATUS_SUCCESS;
1444 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1446 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1447 uint16_t code, rep_idx, rep_cnt;
1448 bool fast, longmode, rep;
1451 * hypercall generates UD from non zero cpl and real mode
1454 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1455 kvm_queue_exception(vcpu, UD_VECTOR);
1459 longmode = is_64_bit_mode(vcpu);
1462 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1463 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1464 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1465 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1466 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1467 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1469 #ifdef CONFIG_X86_64
1471 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1472 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1473 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1477 code = param & 0xffff;
1478 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1479 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1480 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1481 rep = !!(rep_cnt || rep_idx);
1483 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1486 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1487 if (unlikely(rep)) {
1488 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1491 kvm_vcpu_on_spin(vcpu, true);
1493 case HVCALL_SIGNAL_EVENT:
1494 if (unlikely(rep)) {
1495 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1498 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1499 if (ret != HV_STATUS_INVALID_PORT_ID)
1501 /* maybe userspace knows this conn_id: fall through */
1502 case HVCALL_POST_MESSAGE:
1503 /* don't bother userspace if it has no way to handle it */
1504 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1505 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1508 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1509 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1510 vcpu->run->hyperv.u.hcall.input = param;
1511 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1512 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1513 vcpu->arch.complete_userspace_io =
1514 kvm_hv_hypercall_complete_userspace;
1516 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1517 if (unlikely(fast || !rep_cnt || rep_idx)) {
1518 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1521 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1523 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1524 if (unlikely(fast || rep)) {
1525 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1528 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1530 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1531 if (unlikely(fast || !rep_cnt || rep_idx)) {
1532 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1535 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1537 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1538 if (unlikely(fast || rep)) {
1539 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1542 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1545 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1549 return kvm_hv_hypercall_complete(vcpu, ret);
1552 void kvm_hv_init_vm(struct kvm *kvm)
1554 mutex_init(&kvm->arch.hyperv.hv_lock);
1555 idr_init(&kvm->arch.hyperv.conn_to_evt);
1558 void kvm_hv_destroy_vm(struct kvm *kvm)
1560 struct eventfd_ctx *eventfd;
1563 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1564 eventfd_ctx_put(eventfd);
1565 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1568 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1570 struct kvm_hv *hv = &kvm->arch.hyperv;
1571 struct eventfd_ctx *eventfd;
1574 eventfd = eventfd_ctx_fdget(fd);
1575 if (IS_ERR(eventfd))
1576 return PTR_ERR(eventfd);
1578 mutex_lock(&hv->hv_lock);
1579 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1581 mutex_unlock(&hv->hv_lock);
1588 eventfd_ctx_put(eventfd);
1592 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1594 struct kvm_hv *hv = &kvm->arch.hyperv;
1595 struct eventfd_ctx *eventfd;
1597 mutex_lock(&hv->hv_lock);
1598 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1599 mutex_unlock(&hv->hv_lock);
1604 synchronize_srcu(&kvm->srcu);
1605 eventfd_ctx_put(eventfd);
1609 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1611 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1612 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1615 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1616 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1617 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);