1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
13 #include <linux/kvm_host.h>
14 #include <linux/sched/stat.h>
16 #include <trace/events/kvm.h>
17 #include <xen/interface/xen.h>
18 #include <xen/interface/vcpu.h>
19 #include <xen/interface/event_channel.h>
23 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
25 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
27 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
28 struct pvclock_wall_clock *wc;
29 gpa_t gpa = gfn_to_gpa(gfn);
34 int idx = srcu_read_lock(&kvm->srcu);
36 if (gfn == GPA_INVALID) {
37 kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
42 ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
43 gpa, PAGE_SIZE, false);
48 * This code mirrors kvm_write_wall_clock() except that it writes
49 * directly through the pfn cache and doesn't mark the page dirty.
51 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
53 /* It could be invalid again already, so we need to check */
54 read_lock_irq(&gpc->lock);
59 read_unlock_irq(&gpc->lock);
62 /* Paranoia checks on the 32-bit struct layout */
63 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
64 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
65 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
68 /* Paranoia checks on the 64-bit struct layout */
69 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
70 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
72 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
73 struct shared_info *shinfo = gpc->khva;
75 wc_sec_hi = &shinfo->wc_sec_hi;
80 struct compat_shared_info *shinfo = gpc->khva;
82 wc_sec_hi = &shinfo->arch.wc_sec_hi;
86 /* Increment and ensure an odd value */
87 wc_version = wc->version = (wc->version + 1) | 1;
90 wc->nsec = do_div(wall_nsec, 1000000000);
91 wc->sec = (u32)wall_nsec;
92 *wc_sec_hi = wall_nsec >> 32;
95 wc->version = wc_version + 1;
96 read_unlock_irq(&gpc->lock);
98 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
101 srcu_read_unlock(&kvm->srcu, idx);
105 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
107 struct kvm_vcpu_xen *vx = &v->arch.xen;
108 u64 now = get_kvmclock_ns(v->kvm);
109 u64 delta_ns = now - vx->runstate_entry_time;
110 u64 run_delay = current->sched_info.run_delay;
112 if (unlikely(!vx->runstate_entry_time))
113 vx->current_runstate = RUNSTATE_offline;
116 * Time waiting for the scheduler isn't "stolen" if the
117 * vCPU wasn't running anyway.
119 if (vx->current_runstate == RUNSTATE_running) {
120 u64 steal_ns = run_delay - vx->last_steal;
122 delta_ns -= steal_ns;
124 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
126 vx->last_steal = run_delay;
128 vx->runstate_times[vx->current_runstate] += delta_ns;
129 vx->current_runstate = state;
130 vx->runstate_entry_time = now;
133 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
135 struct kvm_vcpu_xen *vx = &v->arch.xen;
136 uint64_t state_entry_time;
139 kvm_xen_update_runstate(v, state);
141 if (!vx->runstate_set)
144 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
146 offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
149 * The only difference is alignment of uint64_t in 32-bit.
150 * So the first field 'state' is accessed directly using
151 * offsetof() (where its offset happens to be zero), while the
152 * remaining fields which are all uint64_t, start at 'offset'
153 * which we tweak here by adding 4.
155 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
156 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
157 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
158 offsetof(struct compat_vcpu_runstate_info, time) + 4);
160 if (v->kvm->arch.xen.long_mode)
161 offset = offsetof(struct vcpu_runstate_info, state_entry_time);
164 * First write the updated state_entry_time at the appropriate
165 * location determined by 'offset'.
167 state_entry_time = vx->runstate_entry_time;
168 state_entry_time |= XEN_RUNSTATE_UPDATE;
170 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
171 sizeof(state_entry_time));
172 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
173 sizeof(state_entry_time));
175 if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
176 &state_entry_time, offset,
177 sizeof(state_entry_time)))
182 * Next, write the new runstate. This is in the *same* place
183 * for 32-bit and 64-bit guests, asserted here for paranoia.
185 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
186 offsetof(struct compat_vcpu_runstate_info, state));
187 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
188 sizeof(vx->current_runstate));
189 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
190 sizeof(vx->current_runstate));
192 if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
193 &vx->current_runstate,
194 offsetof(struct vcpu_runstate_info, state),
195 sizeof(vx->current_runstate)))
199 * Write the actual runstate times immediately after the
200 * runstate_entry_time.
202 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
203 offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
204 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
205 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
206 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
207 sizeof_field(struct compat_vcpu_runstate_info, time));
208 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
209 sizeof(vx->runstate_times));
211 if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
212 &vx->runstate_times[0],
213 offset + sizeof(u64),
214 sizeof(vx->runstate_times)))
220 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
221 * runstate_entry_time field.
224 state_entry_time &= ~XEN_RUNSTATE_UPDATE;
225 if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
226 &state_entry_time, offset,
227 sizeof(state_entry_time)))
231 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
233 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
234 bool atomic = in_atomic() || !task_is_running(current);
239 * If the global upcall vector (HVMIRQ_callback_vector) is set and
240 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
242 struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
243 struct kvm_memslots *slots = kvm_memslots(v->kvm);
244 bool ghc_valid = slots->generation == ghc->generation &&
245 !kvm_is_error_hva(ghc->hva) && ghc->memslot;
247 unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
249 /* No need for compat handling here */
250 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
251 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
252 BUILD_BUG_ON(sizeof(rc) !=
253 sizeof_field(struct vcpu_info, evtchn_upcall_pending));
254 BUILD_BUG_ON(sizeof(rc) !=
255 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
258 * For efficiency, this mirrors the checks for using the valid
259 * cache in kvm_read_guest_offset_cached(), but just uses
260 * __get_user() instead. And falls back to the slow path.
262 if (!evtchn_pending_sel && ghc_valid) {
265 err = __get_user(rc, (u8 __user *)ghc->hva + offset);
274 * This function gets called from kvm_vcpu_block() after setting the
275 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
276 * from a HLT. So we really mustn't sleep. If the page ended up absent
277 * at that point, just return 1 in order to trigger an immediate wake,
278 * and we'll end up getting called again from a context where we *can*
279 * fault in the page and wait for it.
285 err = kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
286 if (err || !ghc->memslot) {
288 * If this failed, userspace has screwed up the
289 * vcpu_info mapping. No interrupts for you.
296 * Now we have a valid (protected by srcu) userspace HVA in
297 * ghc->hva which points to the struct vcpu_info. If there
298 * are any bits in the in-kernel evtchn_pending_sel then
299 * we need to write those to the guest vcpu_info and set
300 * its evtchn_upcall_pending flag. If there aren't any bits
301 * to add, we only want to *check* evtchn_upcall_pending.
303 if (evtchn_pending_sel) {
304 bool long_mode = v->kvm->arch.xen.long_mode;
306 if (!user_access_begin((void __user *)ghc->hva, sizeof(struct vcpu_info)))
309 if (IS_ENABLED(CONFIG_64BIT) && long_mode) {
310 struct vcpu_info __user *vi = (void __user *)ghc->hva;
312 /* Attempt to set the evtchn_pending_sel bits in the
313 * guest, and if that succeeds then clear the same
314 * bits in the in-kernel version. */
315 asm volatile("1:\t" LOCK_PREFIX "orq %0, %1\n"
317 "\t" LOCK_PREFIX "andq %0, %2\n"
319 _ASM_EXTABLE_UA(1b, 2b)
320 : "=r" (evtchn_pending_sel),
321 "+m" (vi->evtchn_pending_sel),
322 "+m" (v->arch.xen.evtchn_pending_sel)
323 : "0" (evtchn_pending_sel));
325 struct compat_vcpu_info __user *vi = (void __user *)ghc->hva;
326 u32 evtchn_pending_sel32 = evtchn_pending_sel;
328 /* Attempt to set the evtchn_pending_sel bits in the
329 * guest, and if that succeeds then clear the same
330 * bits in the in-kernel version. */
331 asm volatile("1:\t" LOCK_PREFIX "orl %0, %1\n"
333 "\t" LOCK_PREFIX "andl %0, %2\n"
335 _ASM_EXTABLE_UA(1b, 2b)
336 : "=r" (evtchn_pending_sel32),
337 "+m" (vi->evtchn_pending_sel),
338 "+m" (v->arch.xen.evtchn_pending_sel)
339 : "0" (evtchn_pending_sel32));
342 unsafe_put_user(rc, (u8 __user *)ghc->hva + offset, err);
347 mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
349 __get_user(rc, (u8 __user *)ghc->hva + offset);
355 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
359 mutex_lock(&kvm->lock);
361 switch (data->type) {
362 case KVM_XEN_ATTR_TYPE_LONG_MODE:
363 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
366 kvm->arch.xen.long_mode = !!data->u.long_mode;
371 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
372 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
375 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
376 if (data->u.vector && data->u.vector < 0x10)
379 kvm->arch.xen.upcall_vector = data->u.vector;
388 mutex_unlock(&kvm->lock);
392 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
396 mutex_lock(&kvm->lock);
398 switch (data->type) {
399 case KVM_XEN_ATTR_TYPE_LONG_MODE:
400 data->u.long_mode = kvm->arch.xen.long_mode;
404 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
405 if (kvm->arch.xen.shinfo_cache.active)
406 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
408 data->u.shared_info.gfn = GPA_INVALID;
412 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
413 data->u.vector = kvm->arch.xen.upcall_vector;
421 mutex_unlock(&kvm->lock);
425 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
427 int idx, r = -ENOENT;
429 mutex_lock(&vcpu->kvm->lock);
430 idx = srcu_read_lock(&vcpu->kvm->srcu);
432 switch (data->type) {
433 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
434 /* No compat necessary here. */
435 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
436 sizeof(struct compat_vcpu_info));
437 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
438 offsetof(struct compat_vcpu_info, time));
440 if (data->u.gpa == GPA_INVALID) {
441 vcpu->arch.xen.vcpu_info_set = false;
446 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
447 &vcpu->arch.xen.vcpu_info_cache,
449 sizeof(struct vcpu_info));
451 vcpu->arch.xen.vcpu_info_set = true;
452 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
456 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
457 if (data->u.gpa == GPA_INVALID) {
458 vcpu->arch.xen.vcpu_time_info_set = false;
463 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
464 &vcpu->arch.xen.vcpu_time_info_cache,
466 sizeof(struct pvclock_vcpu_time_info));
468 vcpu->arch.xen.vcpu_time_info_set = true;
469 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
473 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
474 if (!sched_info_on()) {
478 if (data->u.gpa == GPA_INVALID) {
479 vcpu->arch.xen.runstate_set = false;
484 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
485 &vcpu->arch.xen.runstate_cache,
487 sizeof(struct vcpu_runstate_info));
489 vcpu->arch.xen.runstate_set = true;
493 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
494 if (!sched_info_on()) {
498 if (data->u.runstate.state > RUNSTATE_offline) {
503 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
507 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
508 if (!sched_info_on()) {
512 if (data->u.runstate.state > RUNSTATE_offline) {
516 if (data->u.runstate.state_entry_time !=
517 (data->u.runstate.time_running +
518 data->u.runstate.time_runnable +
519 data->u.runstate.time_blocked +
520 data->u.runstate.time_offline)) {
524 if (get_kvmclock_ns(vcpu->kvm) <
525 data->u.runstate.state_entry_time) {
530 vcpu->arch.xen.current_runstate = data->u.runstate.state;
531 vcpu->arch.xen.runstate_entry_time =
532 data->u.runstate.state_entry_time;
533 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
534 data->u.runstate.time_running;
535 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
536 data->u.runstate.time_runnable;
537 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
538 data->u.runstate.time_blocked;
539 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
540 data->u.runstate.time_offline;
541 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
545 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
546 if (!sched_info_on()) {
550 if (data->u.runstate.state > RUNSTATE_offline &&
551 data->u.runstate.state != (u64)-1) {
555 /* The adjustment must add up */
556 if (data->u.runstate.state_entry_time !=
557 (data->u.runstate.time_running +
558 data->u.runstate.time_runnable +
559 data->u.runstate.time_blocked +
560 data->u.runstate.time_offline)) {
565 if (get_kvmclock_ns(vcpu->kvm) <
566 (vcpu->arch.xen.runstate_entry_time +
567 data->u.runstate.state_entry_time)) {
572 vcpu->arch.xen.runstate_entry_time +=
573 data->u.runstate.state_entry_time;
574 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
575 data->u.runstate.time_running;
576 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
577 data->u.runstate.time_runnable;
578 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
579 data->u.runstate.time_blocked;
580 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
581 data->u.runstate.time_offline;
583 if (data->u.runstate.state <= RUNSTATE_offline)
584 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
592 srcu_read_unlock(&vcpu->kvm->srcu, idx);
593 mutex_unlock(&vcpu->kvm->lock);
597 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
601 mutex_lock(&vcpu->kvm->lock);
603 switch (data->type) {
604 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
605 if (vcpu->arch.xen.vcpu_info_set)
606 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
608 data->u.gpa = GPA_INVALID;
612 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
613 if (vcpu->arch.xen.vcpu_time_info_set)
614 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
616 data->u.gpa = GPA_INVALID;
620 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
621 if (!sched_info_on()) {
625 if (vcpu->arch.xen.runstate_set) {
626 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
631 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
632 if (!sched_info_on()) {
636 data->u.runstate.state = vcpu->arch.xen.current_runstate;
640 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
641 if (!sched_info_on()) {
645 data->u.runstate.state = vcpu->arch.xen.current_runstate;
646 data->u.runstate.state_entry_time =
647 vcpu->arch.xen.runstate_entry_time;
648 data->u.runstate.time_running =
649 vcpu->arch.xen.runstate_times[RUNSTATE_running];
650 data->u.runstate.time_runnable =
651 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
652 data->u.runstate.time_blocked =
653 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
654 data->u.runstate.time_offline =
655 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
659 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
667 mutex_unlock(&vcpu->kvm->lock);
671 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
673 struct kvm *kvm = vcpu->kvm;
674 u32 page_num = data & ~PAGE_MASK;
675 u64 page_addr = data & PAGE_MASK;
676 bool lm = is_long_mode(vcpu);
678 /* Latch long_mode for shared_info pages etc. */
679 vcpu->kvm->arch.xen.long_mode = lm;
682 * If Xen hypercall intercept is enabled, fill the hypercall
683 * page with VMCALL/VMMCALL instructions since that's what
684 * we catch. Else the VMM has provided the hypercall pages
685 * with instructions of its own choosing, so use those.
687 if (kvm_xen_hypercall_enabled(kvm)) {
694 /* mov imm32, %eax */
695 instructions[0] = 0xb8;
697 /* vmcall / vmmcall */
698 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
701 instructions[8] = 0xc3;
704 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
706 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
707 *(u32 *)&instructions[1] = i;
708 if (kvm_vcpu_write_guest(vcpu,
709 page_addr + (i * sizeof(instructions)),
710 instructions, sizeof(instructions)))
715 * Note, truncation is a non-issue as 'lm' is guaranteed to be
716 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
718 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
719 : kvm->arch.xen_hvm_config.blob_addr_32;
720 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
721 : kvm->arch.xen_hvm_config.blob_size_32;
724 if (page_num >= blob_size)
727 blob_addr += page_num * PAGE_SIZE;
729 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
731 return PTR_ERR(page);
733 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
741 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
743 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
747 * With hypercall interception the kernel generates its own
748 * hypercall page so it must not be provided.
750 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
751 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
752 xhc->blob_size_32 || xhc->blob_size_64))
755 mutex_lock(&kvm->lock);
757 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
758 static_branch_inc(&kvm_xen_enabled.key);
759 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
760 static_branch_slow_dec_deferred(&kvm_xen_enabled);
762 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
764 mutex_unlock(&kvm->lock);
768 void kvm_xen_init_vm(struct kvm *kvm)
772 void kvm_xen_destroy_vm(struct kvm *kvm)
774 kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
776 if (kvm->arch.xen_hvm_config.msr)
777 static_branch_slow_dec_deferred(&kvm_xen_enabled);
780 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
782 kvm_rax_write(vcpu, result);
783 return kvm_skip_emulated_instruction(vcpu);
786 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
788 struct kvm_run *run = vcpu->run;
790 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
793 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
796 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
799 u64 input, params[6];
801 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
803 /* Hyper-V hypercalls get bit 31 set in EAX */
804 if ((input & 0x80000000) &&
805 kvm_hv_hypercall_enabled(vcpu))
806 return kvm_hv_hypercall(vcpu);
808 longmode = is_64_bit_hypercall(vcpu);
810 params[0] = (u32)kvm_rbx_read(vcpu);
811 params[1] = (u32)kvm_rcx_read(vcpu);
812 params[2] = (u32)kvm_rdx_read(vcpu);
813 params[3] = (u32)kvm_rsi_read(vcpu);
814 params[4] = (u32)kvm_rdi_read(vcpu);
815 params[5] = (u32)kvm_rbp_read(vcpu);
819 params[0] = (u64)kvm_rdi_read(vcpu);
820 params[1] = (u64)kvm_rsi_read(vcpu);
821 params[2] = (u64)kvm_rdx_read(vcpu);
822 params[3] = (u64)kvm_r10_read(vcpu);
823 params[4] = (u64)kvm_r8_read(vcpu);
824 params[5] = (u64)kvm_r9_read(vcpu);
827 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
828 params[3], params[4], params[5]);
830 vcpu->run->exit_reason = KVM_EXIT_XEN;
831 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
832 vcpu->run->xen.u.hcall.longmode = longmode;
833 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
834 vcpu->run->xen.u.hcall.input = input;
835 vcpu->run->xen.u.hcall.params[0] = params[0];
836 vcpu->run->xen.u.hcall.params[1] = params[1];
837 vcpu->run->xen.u.hcall.params[2] = params[2];
838 vcpu->run->xen.u.hcall.params[3] = params[3];
839 vcpu->run->xen.u.hcall.params[4] = params[4];
840 vcpu->run->xen.u.hcall.params[5] = params[5];
841 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
842 vcpu->arch.complete_userspace_io =
843 kvm_xen_hypercall_complete_userspace;
848 static inline int max_evtchn_port(struct kvm *kvm)
850 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
851 return EVTCHN_2L_NR_CHANNELS;
853 return COMPAT_EVTCHN_2L_NR_CHANNELS;
857 * This follows the kvm_set_irq() API, so it returns:
858 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
859 * = 0 Interrupt was coalesced (previous irq is still pending)
860 * > 0 Number of CPUs interrupt was delivered to
862 int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
865 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
866 struct kvm_vcpu *vcpu;
867 unsigned long *pending_bits, *mask_bits;
870 bool kick_vcpu = false;
874 vcpu = kvm_get_vcpu_by_id(kvm, e->xen_evtchn.vcpu);
878 if (!vcpu->arch.xen.vcpu_info_set)
881 if (e->xen_evtchn.port >= max_evtchn_port(kvm))
885 read_lock_irqsave(&gpc->lock, flags);
887 idx = srcu_read_lock(&kvm->srcu);
888 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
891 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
892 struct shared_info *shinfo = gpc->khva;
893 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
894 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
895 port_word_bit = e->xen_evtchn.port / 64;
897 struct compat_shared_info *shinfo = gpc->khva;
898 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
899 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
900 port_word_bit = e->xen_evtchn.port / 32;
904 * If this port wasn't already set, and if it isn't masked, then
905 * we try to set the corresponding bit in the in-kernel shadow of
906 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
907 * already set, then we kick the vCPU in question to write to the
908 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
910 if (test_and_set_bit(e->xen_evtchn.port, pending_bits)) {
911 rc = 0; /* It was already raised */
912 } else if (test_bit(e->xen_evtchn.port, mask_bits)) {
913 rc = -1; /* Masked */
915 rc = 1; /* Delivered. But was the vCPU waking already? */
916 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
921 srcu_read_unlock(&kvm->srcu, idx);
922 read_unlock_irqrestore(&gpc->lock, flags);
925 kvm_make_request(KVM_REQ_EVENT, vcpu);
932 /* This is the version called from kvm_set_irq() as the .set function */
933 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
934 int irq_source_id, int level, bool line_status)
936 bool mm_borrowed = false;
942 rc = kvm_xen_set_evtchn_fast(e, kvm);
943 if (rc != -EWOULDBLOCK)
946 if (current->mm != kvm->mm) {
948 * If not on a thread which already belongs to this KVM,
949 * we'd better be in the irqfd workqueue.
951 if (WARN_ON_ONCE(current->mm))
954 kthread_use_mm(kvm->mm);
959 * For the irqfd workqueue, using the main kvm->lock mutex is
960 * fine since this function is invoked from kvm_set_irq() with
961 * no other lock held, no srcu. In future if it will be called
962 * directly from a vCPU thread (e.g. on hypercall for an IPI)
963 * then it may need to switch to using a leaf-node mutex for
964 * serializing the shared_info mapping.
966 mutex_lock(&kvm->lock);
969 * It is theoretically possible for the page to be unmapped
970 * and the MMU notifier to invalidate the shared_info before
971 * we even get to use it. In that case, this looks like an
972 * infinite loop. It was tempting to do it via the userspace
973 * HVA instead... but that just *hides* the fact that it's
974 * an infinite loop, because if a fault occurs and it waits
975 * for the page to come back, it can *still* immediately
976 * fault and have to wait again, repeatedly.
978 * Conversely, the page could also have been reinstated by
979 * another thread before we even obtain the mutex above, so
980 * check again *first* before remapping it.
983 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
986 rc = kvm_xen_set_evtchn_fast(e, kvm);
987 if (rc != -EWOULDBLOCK)
990 idx = srcu_read_lock(&kvm->srcu);
991 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa,
993 srcu_read_unlock(&kvm->srcu, idx);
996 mutex_unlock(&kvm->lock);
999 kthread_unuse_mm(kvm->mm);
1004 int kvm_xen_setup_evtchn(struct kvm *kvm,
1005 struct kvm_kernel_irq_routing_entry *e,
1006 const struct kvm_irq_routing_entry *ue)
1009 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1012 /* We only support 2 level event channels for now */
1013 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1016 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1017 e->xen_evtchn.vcpu = ue->u.xen_evtchn.vcpu;
1018 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1019 e->set = evtchn_set_fn;