1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
14 #include <linux/eventfd.h>
15 #include <linux/kvm_host.h>
16 #include <linux/sched/stat.h>
18 #include <trace/events/kvm.h>
19 #include <xen/interface/xen.h>
20 #include <xen/interface/vcpu.h>
21 #include <xen/interface/version.h>
22 #include <xen/interface/event_channel.h>
23 #include <xen/interface/sched.h>
27 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
28 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
29 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
31 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
33 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
36 struct pvclock_wall_clock *wc;
37 gpa_t gpa = gfn_to_gpa(gfn);
42 int idx = srcu_read_lock(&kvm->srcu);
44 if (gfn == KVM_XEN_INVALID_GFN) {
45 kvm_gpc_deactivate(gpc);
50 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
55 * This code mirrors kvm_write_wall_clock() except that it writes
56 * directly through the pfn cache and doesn't mark the page dirty.
58 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
60 /* It could be invalid again already, so we need to check */
61 read_lock_irq(&gpc->lock);
66 read_unlock_irq(&gpc->lock);
69 /* Paranoia checks on the 32-bit struct layout */
70 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
71 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
72 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
75 /* Paranoia checks on the 64-bit struct layout */
76 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
77 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
79 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
80 struct shared_info *shinfo = gpc->khva;
82 wc_sec_hi = &shinfo->wc_sec_hi;
87 struct compat_shared_info *shinfo = gpc->khva;
89 wc_sec_hi = &shinfo->arch.wc_sec_hi;
93 /* Increment and ensure an odd value */
94 wc_version = wc->version = (wc->version + 1) | 1;
97 wc->nsec = do_div(wall_nsec, 1000000000);
98 wc->sec = (u32)wall_nsec;
99 *wc_sec_hi = wall_nsec >> 32;
102 wc->version = wc_version + 1;
103 read_unlock_irq(&gpc->lock);
105 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
108 srcu_read_unlock(&kvm->srcu, idx);
112 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
114 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
115 struct kvm_xen_evtchn e;
117 e.vcpu_id = vcpu->vcpu_id;
118 e.vcpu_idx = vcpu->vcpu_idx;
119 e.port = vcpu->arch.xen.timer_virq;
120 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
122 kvm_xen_set_evtchn(&e, vcpu->kvm);
124 vcpu->arch.xen.timer_expires = 0;
125 atomic_set(&vcpu->arch.xen.timer_pending, 0);
129 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
131 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
133 if (atomic_read(&vcpu->arch.xen.timer_pending))
134 return HRTIMER_NORESTART;
136 atomic_inc(&vcpu->arch.xen.timer_pending);
137 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
140 return HRTIMER_NORESTART;
143 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
145 atomic_set(&vcpu->arch.xen.timer_pending, 0);
146 vcpu->arch.xen.timer_expires = guest_abs;
149 xen_timer_callback(&vcpu->arch.xen.timer);
151 ktime_t ktime_now = ktime_get();
152 hrtimer_start(&vcpu->arch.xen.timer,
153 ktime_add_ns(ktime_now, delta_ns),
154 HRTIMER_MODE_ABS_HARD);
158 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
160 hrtimer_cancel(&vcpu->arch.xen.timer);
161 vcpu->arch.xen.timer_expires = 0;
162 atomic_set(&vcpu->arch.xen.timer_pending, 0);
165 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
167 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
168 HRTIMER_MODE_ABS_HARD);
169 vcpu->arch.xen.timer.function = xen_timer_callback;
172 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
174 struct kvm_vcpu_xen *vx = &v->arch.xen;
175 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache;
176 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
177 size_t user_len, user_len1, user_len2;
178 struct vcpu_runstate_info rs;
181 uint8_t *update_bit = NULL;
187 * The only difference between 32-bit and 64-bit versions of the
188 * runstate struct is the alignment of uint64_t in 32-bit, which
189 * means that the 64-bit version has an additional 4 bytes of
190 * padding after the first field 'state'. Let's be really really
191 * paranoid about that, and matching it with our internal data
192 * structures that we memcpy into it...
194 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
195 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
196 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
199 * The 64-bit structure has 4 bytes of padding before 'state_entry_time'
200 * so each subsequent field is shifted by 4, and it's 4 bytes longer.
202 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
203 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
204 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
205 offsetof(struct compat_vcpu_runstate_info, time) + 4);
206 BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4);
209 * The state field is in the same place at the start of both structs,
210 * and is the same size (int) as vx->current_runstate.
212 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
213 offsetof(struct compat_vcpu_runstate_info, state));
214 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
215 sizeof(vx->current_runstate));
216 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
217 sizeof(vx->current_runstate));
220 * The state_entry_time field is 64 bits in both versions, and the
221 * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86
222 * is little-endian means that it's in the last *byte* of the word.
223 * That detail is important later.
225 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
227 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
229 BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80);
232 * The time array is four 64-bit quantities in both versions, matching
233 * the vx->runstate_times and immediately following state_entry_time.
235 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
236 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t));
237 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
238 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t));
239 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
240 sizeof_field(struct compat_vcpu_runstate_info, time));
241 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
242 sizeof(vx->runstate_times));
244 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
245 user_len = sizeof(struct vcpu_runstate_info);
246 times_ofs = offsetof(struct vcpu_runstate_info,
249 user_len = sizeof(struct compat_vcpu_runstate_info);
250 times_ofs = offsetof(struct compat_vcpu_runstate_info,
255 * There are basically no alignment constraints. The guest can set it
256 * up so it crosses from one page to the next, and at arbitrary byte
257 * alignment (and the 32-bit ABI doesn't align the 64-bit integers
258 * anyway, even if the overall struct had been 64-bit aligned).
260 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
261 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
262 user_len2 = user_len - user_len1;
264 user_len1 = user_len;
267 BUG_ON(user_len1 + user_len2 != user_len);
271 * Attempt to obtain the GPC lock on *both* (if there are two)
272 * gfn_to_pfn caches that cover the region.
274 read_lock_irqsave(&gpc1->lock, flags);
275 while (!kvm_gpc_check(gpc1, user_len1)) {
276 read_unlock_irqrestore(&gpc1->lock, flags);
278 /* When invoked from kvm_sched_out() we cannot sleep */
282 if (kvm_gpc_refresh(gpc1, user_len1))
285 read_lock_irqsave(&gpc1->lock, flags);
288 if (likely(!user_len2)) {
290 * Set up three pointers directly to the runstate_info
291 * struct in the guest (via the GPC).
293 * • @rs_state → state field
294 * • @rs_times → state_entry_time field.
295 * • @update_bit → last byte of state_entry_time, which
296 * contains the XEN_RUNSTATE_UPDATE bit.
298 rs_state = gpc1->khva;
299 rs_times = gpc1->khva + times_ofs;
300 if (v->kvm->arch.xen.runstate_update_flag)
301 update_bit = ((void *)(&rs_times[1])) - 1;
304 * The guest's runstate_info is split across two pages and we
305 * need to hold and validate both GPCs simultaneously. We can
306 * declare a lock ordering GPC1 > GPC2 because nothing else
307 * takes them more than one at a time.
309 read_lock(&gpc2->lock);
311 if (!kvm_gpc_check(gpc2, user_len2)) {
312 read_unlock(&gpc2->lock);
313 read_unlock_irqrestore(&gpc1->lock, flags);
315 /* When invoked from kvm_sched_out() we cannot sleep */
320 * Use kvm_gpc_activate() here because if the runstate
321 * area was configured in 32-bit mode and only extends
322 * to the second page now because the guest changed to
323 * 64-bit mode, the second GPC won't have been set up.
325 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
330 * We dropped the lock on GPC1 so we have to go all the
331 * way back and revalidate that too.
337 * In this case, the runstate_info struct will be assembled on
338 * the kernel stack (compat or not as appropriate) and will
339 * be copied to GPC1/GPC2 with a dual memcpy. Set up the three
340 * rs pointers accordingly.
342 rs_times = &rs.state_entry_time;
345 * The rs_state pointer points to the start of what we'll
346 * copy to the guest, which in the case of a compat guest
347 * is the 32-bit field that the compiler thinks is padding.
349 rs_state = ((void *)rs_times) - times_ofs;
352 * The update_bit is still directly in the guest memory,
353 * via one GPC or the other.
355 if (v->kvm->arch.xen.runstate_update_flag) {
356 if (user_len1 >= times_ofs + sizeof(uint64_t))
357 update_bit = gpc1->khva + times_ofs +
358 sizeof(uint64_t) - 1;
360 update_bit = gpc2->khva + times_ofs +
361 sizeof(uint64_t) - 1 - user_len1;
366 * Don't leak kernel memory through the padding in the 64-bit
367 * version of the struct.
369 memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time));
374 * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the
375 * state_entry_time field, directly in the guest. We need to set
376 * that (and write-barrier) before writing to the rest of the
377 * structure, and clear it last. Just as Xen does, we address the
378 * single *byte* in which it resides because it might be in a
379 * different cache line to the rest of the 64-bit word, due to
380 * the (lack of) alignment constraints.
382 entry_time = vx->runstate_entry_time;
384 entry_time |= XEN_RUNSTATE_UPDATE;
385 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56;
390 * Now assemble the actual structure, either on our kernel stack
391 * or directly in the guest according to how the rs_state and
392 * rs_times pointers were set up above.
394 *rs_state = vx->current_runstate;
395 rs_times[0] = entry_time;
396 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
398 /* For the split case, we have to then copy it to the guest. */
400 memcpy(gpc1->khva, rs_state, user_len1);
401 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2);
405 /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */
407 entry_time &= ~XEN_RUNSTATE_UPDATE;
408 *update_bit = entry_time >> 56;
413 read_unlock(&gpc2->lock);
415 read_unlock_irqrestore(&gpc1->lock, flags);
417 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
419 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
422 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
424 struct kvm_vcpu_xen *vx = &v->arch.xen;
425 u64 now = get_kvmclock_ns(v->kvm);
426 u64 delta_ns = now - vx->runstate_entry_time;
427 u64 run_delay = current->sched_info.run_delay;
429 if (unlikely(!vx->runstate_entry_time))
430 vx->current_runstate = RUNSTATE_offline;
433 * Time waiting for the scheduler isn't "stolen" if the
434 * vCPU wasn't running anyway.
436 if (vx->current_runstate == RUNSTATE_running) {
437 u64 steal_ns = run_delay - vx->last_steal;
439 delta_ns -= steal_ns;
441 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
443 vx->last_steal = run_delay;
445 vx->runstate_times[vx->current_runstate] += delta_ns;
446 vx->current_runstate = state;
447 vx->runstate_entry_time = now;
449 if (vx->runstate_cache.active)
450 kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
453 static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
455 struct kvm_lapic_irq irq = { };
458 irq.dest_id = v->vcpu_id;
459 irq.vector = v->arch.xen.upcall_vector;
460 irq.dest_mode = APIC_DEST_PHYSICAL;
461 irq.shorthand = APIC_DEST_NOSHORT;
462 irq.delivery_mode = APIC_DM_FIXED;
465 /* The fast version will always work for physical unicast */
466 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
470 * On event channel delivery, the vcpu_info may not have been accessible.
471 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
472 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
473 * Do so now that we can sleep in the context of the vCPU to bring the
474 * page in, and refresh the pfn cache for it.
476 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
478 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
479 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
482 if (!evtchn_pending_sel)
486 * Yes, this is an open-coded loop. But that's just what put_user()
487 * does anyway. Page it in and retry the instruction. We're just a
488 * little more honest about it.
490 read_lock_irqsave(&gpc->lock, flags);
491 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
492 read_unlock_irqrestore(&gpc->lock, flags);
494 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
497 read_lock_irqsave(&gpc->lock, flags);
500 /* Now gpc->khva is a valid kernel address for the vcpu_info */
501 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
502 struct vcpu_info *vi = gpc->khva;
504 asm volatile(LOCK_PREFIX "orq %0, %1\n"
506 LOCK_PREFIX "andq %0, %2\n"
507 : "=r" (evtchn_pending_sel),
508 "+m" (vi->evtchn_pending_sel),
509 "+m" (v->arch.xen.evtchn_pending_sel)
510 : "0" (evtchn_pending_sel));
511 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
513 u32 evtchn_pending_sel32 = evtchn_pending_sel;
514 struct compat_vcpu_info *vi = gpc->khva;
516 asm volatile(LOCK_PREFIX "orl %0, %1\n"
518 LOCK_PREFIX "andl %0, %2\n"
519 : "=r" (evtchn_pending_sel32),
520 "+m" (vi->evtchn_pending_sel),
521 "+m" (v->arch.xen.evtchn_pending_sel)
522 : "0" (evtchn_pending_sel32));
523 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
525 read_unlock_irqrestore(&gpc->lock, flags);
527 /* For the per-vCPU lapic vector, deliver it as MSI. */
528 if (v->arch.xen.upcall_vector)
529 kvm_xen_inject_vcpu_vector(v);
531 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
534 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
536 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
541 * If the global upcall vector (HVMIRQ_callback_vector) is set and
542 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
545 /* No need for compat handling here */
546 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
547 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
548 BUILD_BUG_ON(sizeof(rc) !=
549 sizeof_field(struct vcpu_info, evtchn_upcall_pending));
550 BUILD_BUG_ON(sizeof(rc) !=
551 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
553 read_lock_irqsave(&gpc->lock, flags);
554 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
555 read_unlock_irqrestore(&gpc->lock, flags);
558 * This function gets called from kvm_vcpu_block() after setting the
559 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
560 * from a HLT. So we really mustn't sleep. If the page ended up absent
561 * at that point, just return 1 in order to trigger an immediate wake,
562 * and we'll end up getting called again from a context where we *can*
563 * fault in the page and wait for it.
565 if (in_atomic() || !task_is_running(current))
568 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
570 * If this failed, userspace has screwed up the
571 * vcpu_info mapping. No interrupts for you.
575 read_lock_irqsave(&gpc->lock, flags);
578 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
579 read_unlock_irqrestore(&gpc->lock, flags);
583 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
588 switch (data->type) {
589 case KVM_XEN_ATTR_TYPE_LONG_MODE:
590 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
593 mutex_lock(&kvm->lock);
594 kvm->arch.xen.long_mode = !!data->u.long_mode;
595 mutex_unlock(&kvm->lock);
600 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
601 mutex_lock(&kvm->lock);
602 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
603 mutex_unlock(&kvm->lock);
606 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
607 if (data->u.vector && data->u.vector < 0x10)
610 mutex_lock(&kvm->lock);
611 kvm->arch.xen.upcall_vector = data->u.vector;
612 mutex_unlock(&kvm->lock);
617 case KVM_XEN_ATTR_TYPE_EVTCHN:
618 r = kvm_xen_setattr_evtchn(kvm, data);
621 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
622 mutex_lock(&kvm->lock);
623 kvm->arch.xen.xen_version = data->u.xen_version;
624 mutex_unlock(&kvm->lock);
628 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
629 if (!sched_info_on()) {
633 mutex_lock(&kvm->lock);
634 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
635 mutex_unlock(&kvm->lock);
646 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
650 mutex_lock(&kvm->lock);
652 switch (data->type) {
653 case KVM_XEN_ATTR_TYPE_LONG_MODE:
654 data->u.long_mode = kvm->arch.xen.long_mode;
658 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
659 if (kvm->arch.xen.shinfo_cache.active)
660 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
662 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
666 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
667 data->u.vector = kvm->arch.xen.upcall_vector;
671 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
672 data->u.xen_version = kvm->arch.xen.xen_version;
676 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
677 if (!sched_info_on()) {
681 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
689 mutex_unlock(&kvm->lock);
693 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
695 int idx, r = -ENOENT;
697 mutex_lock(&vcpu->kvm->lock);
698 idx = srcu_read_lock(&vcpu->kvm->srcu);
700 switch (data->type) {
701 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
702 /* No compat necessary here. */
703 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
704 sizeof(struct compat_vcpu_info));
705 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
706 offsetof(struct compat_vcpu_info, time));
708 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
709 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
714 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
715 data->u.gpa, sizeof(struct vcpu_info));
717 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
721 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
722 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
723 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
728 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
730 sizeof(struct pvclock_vcpu_time_info));
732 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
735 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: {
738 if (!sched_info_on()) {
742 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
745 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
746 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
751 * If the guest switches to 64-bit mode after setting the runstate
752 * address, that's actually OK. kvm_xen_update_runstate_guest()
755 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
756 sz = sizeof(struct vcpu_runstate_info);
758 sz = sizeof(struct compat_vcpu_runstate_info);
760 /* How much fits in the (first) page? */
761 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
762 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
767 /* Either map the second page, or deactivate the second GPC */
769 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
772 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
773 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
774 data->u.gpa + sz1, sz2);
779 kvm_xen_update_runstate_guest(vcpu, false);
782 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
783 if (!sched_info_on()) {
787 if (data->u.runstate.state > RUNSTATE_offline) {
792 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
796 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
797 if (!sched_info_on()) {
801 if (data->u.runstate.state > RUNSTATE_offline) {
805 if (data->u.runstate.state_entry_time !=
806 (data->u.runstate.time_running +
807 data->u.runstate.time_runnable +
808 data->u.runstate.time_blocked +
809 data->u.runstate.time_offline)) {
813 if (get_kvmclock_ns(vcpu->kvm) <
814 data->u.runstate.state_entry_time) {
819 vcpu->arch.xen.current_runstate = data->u.runstate.state;
820 vcpu->arch.xen.runstate_entry_time =
821 data->u.runstate.state_entry_time;
822 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
823 data->u.runstate.time_running;
824 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
825 data->u.runstate.time_runnable;
826 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
827 data->u.runstate.time_blocked;
828 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
829 data->u.runstate.time_offline;
830 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
834 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
835 if (!sched_info_on()) {
839 if (data->u.runstate.state > RUNSTATE_offline &&
840 data->u.runstate.state != (u64)-1) {
844 /* The adjustment must add up */
845 if (data->u.runstate.state_entry_time !=
846 (data->u.runstate.time_running +
847 data->u.runstate.time_runnable +
848 data->u.runstate.time_blocked +
849 data->u.runstate.time_offline)) {
854 if (get_kvmclock_ns(vcpu->kvm) <
855 (vcpu->arch.xen.runstate_entry_time +
856 data->u.runstate.state_entry_time)) {
861 vcpu->arch.xen.runstate_entry_time +=
862 data->u.runstate.state_entry_time;
863 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
864 data->u.runstate.time_running;
865 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
866 data->u.runstate.time_runnable;
867 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
868 data->u.runstate.time_blocked;
869 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
870 data->u.runstate.time_offline;
872 if (data->u.runstate.state <= RUNSTATE_offline)
873 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
874 else if (vcpu->arch.xen.runstate_cache.active)
875 kvm_xen_update_runstate_guest(vcpu, false);
879 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
880 if (data->u.vcpu_id >= KVM_MAX_VCPUS)
883 vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
888 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
889 if (data->u.timer.port &&
890 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
895 if (!vcpu->arch.xen.timer.function)
896 kvm_xen_init_timer(vcpu);
898 /* Stop the timer (if it's running) before changing the vector */
899 kvm_xen_stop_timer(vcpu);
900 vcpu->arch.xen.timer_virq = data->u.timer.port;
902 /* Start the timer if the new value has a valid vector+expiry. */
903 if (data->u.timer.port && data->u.timer.expires_ns)
904 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
905 data->u.timer.expires_ns -
906 get_kvmclock_ns(vcpu->kvm));
911 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
912 if (data->u.vector && data->u.vector < 0x10)
915 vcpu->arch.xen.upcall_vector = data->u.vector;
924 srcu_read_unlock(&vcpu->kvm->srcu, idx);
925 mutex_unlock(&vcpu->kvm->lock);
929 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
933 mutex_lock(&vcpu->kvm->lock);
935 switch (data->type) {
936 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
937 if (vcpu->arch.xen.vcpu_info_cache.active)
938 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
940 data->u.gpa = KVM_XEN_INVALID_GPA;
944 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
945 if (vcpu->arch.xen.vcpu_time_info_cache.active)
946 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
948 data->u.gpa = KVM_XEN_INVALID_GPA;
952 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
953 if (!sched_info_on()) {
957 if (vcpu->arch.xen.runstate_cache.active) {
958 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
963 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
964 if (!sched_info_on()) {
968 data->u.runstate.state = vcpu->arch.xen.current_runstate;
972 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
973 if (!sched_info_on()) {
977 data->u.runstate.state = vcpu->arch.xen.current_runstate;
978 data->u.runstate.state_entry_time =
979 vcpu->arch.xen.runstate_entry_time;
980 data->u.runstate.time_running =
981 vcpu->arch.xen.runstate_times[RUNSTATE_running];
982 data->u.runstate.time_runnable =
983 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
984 data->u.runstate.time_blocked =
985 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
986 data->u.runstate.time_offline =
987 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
991 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
995 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
996 data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
1000 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
1001 data->u.timer.port = vcpu->arch.xen.timer_virq;
1002 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
1003 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
1007 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
1008 data->u.vector = vcpu->arch.xen.upcall_vector;
1016 mutex_unlock(&vcpu->kvm->lock);
1020 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
1022 struct kvm *kvm = vcpu->kvm;
1023 u32 page_num = data & ~PAGE_MASK;
1024 u64 page_addr = data & PAGE_MASK;
1025 bool lm = is_long_mode(vcpu);
1027 /* Latch long_mode for shared_info pages etc. */
1028 vcpu->kvm->arch.xen.long_mode = lm;
1031 * If Xen hypercall intercept is enabled, fill the hypercall
1032 * page with VMCALL/VMMCALL instructions since that's what
1033 * we catch. Else the VMM has provided the hypercall pages
1034 * with instructions of its own choosing, so use those.
1036 if (kvm_xen_hypercall_enabled(kvm)) {
1037 u8 instructions[32];
1043 /* mov imm32, %eax */
1044 instructions[0] = 0xb8;
1046 /* vmcall / vmmcall */
1047 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
1050 instructions[8] = 0xc3;
1053 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
1055 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
1056 *(u32 *)&instructions[1] = i;
1057 if (kvm_vcpu_write_guest(vcpu,
1058 page_addr + (i * sizeof(instructions)),
1059 instructions, sizeof(instructions)))
1064 * Note, truncation is a non-issue as 'lm' is guaranteed to be
1065 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
1067 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
1068 : kvm->arch.xen_hvm_config.blob_addr_32;
1069 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1070 : kvm->arch.xen_hvm_config.blob_size_32;
1074 if (page_num >= blob_size)
1077 blob_addr += page_num * PAGE_SIZE;
1079 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
1081 return PTR_ERR(page);
1083 ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
1091 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
1093 /* Only some feature flags need to be *enabled* by userspace */
1094 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
1095 KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
1097 if (xhc->flags & ~permitted_flags)
1101 * With hypercall interception the kernel generates its own
1102 * hypercall page so it must not be provided.
1104 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
1105 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
1106 xhc->blob_size_32 || xhc->blob_size_64))
1109 mutex_lock(&kvm->lock);
1111 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
1112 static_branch_inc(&kvm_xen_enabled.key);
1113 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
1114 static_branch_slow_dec_deferred(&kvm_xen_enabled);
1116 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
1118 mutex_unlock(&kvm->lock);
1122 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1124 kvm_rax_write(vcpu, result);
1125 return kvm_skip_emulated_instruction(vcpu);
1128 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1130 struct kvm_run *run = vcpu->run;
1132 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
1135 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
1138 static inline int max_evtchn_port(struct kvm *kvm)
1140 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1141 return EVTCHN_2L_NR_CHANNELS;
1143 return COMPAT_EVTCHN_2L_NR_CHANNELS;
1146 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
1147 evtchn_port_t *ports)
1149 struct kvm *kvm = vcpu->kvm;
1150 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1151 unsigned long *pending_bits;
1152 unsigned long flags;
1156 idx = srcu_read_lock(&kvm->srcu);
1157 read_lock_irqsave(&gpc->lock, flags);
1158 if (!kvm_gpc_check(gpc, PAGE_SIZE))
1162 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1163 struct shared_info *shinfo = gpc->khva;
1164 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1166 struct compat_shared_info *shinfo = gpc->khva;
1167 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1170 for (i = 0; i < nr_ports; i++) {
1171 if (test_bit(ports[i], pending_bits)) {
1178 read_unlock_irqrestore(&gpc->lock, flags);
1179 srcu_read_unlock(&kvm->srcu, idx);
1184 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
1187 struct sched_poll sched_poll;
1188 evtchn_port_t port, *ports;
1189 struct x86_exception e;
1192 if (!lapic_in_kernel(vcpu) ||
1193 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
1196 if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
1197 struct compat_sched_poll sp32;
1199 /* Sanity check that the compat struct definition is correct */
1200 BUILD_BUG_ON(sizeof(sp32) != 16);
1202 if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
1208 * This is a 32-bit pointer to an array of evtchn_port_t which
1209 * are uint32_t, so once it's converted no further compat
1210 * handling is needed.
1212 sched_poll.ports = (void *)(unsigned long)(sp32.ports);
1213 sched_poll.nr_ports = sp32.nr_ports;
1214 sched_poll.timeout = sp32.timeout;
1216 if (kvm_read_guest_virt(vcpu, param, &sched_poll,
1217 sizeof(sched_poll), &e)) {
1223 if (unlikely(sched_poll.nr_ports > 1)) {
1224 /* Xen (unofficially) limits number of pollers to 128 */
1225 if (sched_poll.nr_ports > 128) {
1230 ports = kmalloc_array(sched_poll.nr_ports,
1231 sizeof(*ports), GFP_KERNEL);
1239 if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
1240 sched_poll.nr_ports * sizeof(*ports), &e)) {
1245 for (i = 0; i < sched_poll.nr_ports; i++) {
1246 if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
1252 if (sched_poll.nr_ports == 1)
1253 vcpu->arch.xen.poll_evtchn = port;
1255 vcpu->arch.xen.poll_evtchn = -1;
1257 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1259 if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
1260 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
1262 if (sched_poll.timeout)
1263 mod_timer(&vcpu->arch.xen.poll_timer,
1264 jiffies + nsecs_to_jiffies(sched_poll.timeout));
1266 kvm_vcpu_halt(vcpu);
1268 if (sched_poll.timeout)
1269 del_timer(&vcpu->arch.xen.poll_timer);
1271 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1274 vcpu->arch.xen.poll_evtchn = 0;
1277 /* Really, this is only needed in case of timeout */
1278 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1280 if (unlikely(sched_poll.nr_ports > 1))
1285 static void cancel_evtchn_poll(struct timer_list *t)
1287 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
1289 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1290 kvm_vcpu_kick(vcpu);
1293 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode,
1294 int cmd, u64 param, u64 *r)
1298 if (kvm_xen_schedop_poll(vcpu, longmode, param, r))
1302 kvm_vcpu_on_spin(vcpu, true);
1312 struct compat_vcpu_set_singleshot_timer {
1313 uint64_t timeout_abs_ns;
1315 } __attribute__((packed));
1317 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
1318 int vcpu_id, u64 param, u64 *r)
1320 struct vcpu_set_singleshot_timer oneshot;
1321 struct x86_exception e;
1324 if (!kvm_xen_timer_enabled(vcpu))
1328 case VCPUOP_set_singleshot_timer:
1329 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1335 * The only difference for 32-bit compat is the 4 bytes of
1336 * padding after the interesting part of the structure. So
1337 * for a faithful emulation of Xen we have to *try* to copy
1338 * the padding and return -EFAULT if we can't. Otherwise we
1339 * might as well just have copied the 12-byte 32-bit struct.
1341 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1342 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1343 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1344 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1345 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
1346 offsetof(struct vcpu_set_singleshot_timer, flags));
1347 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
1348 sizeof_field(struct vcpu_set_singleshot_timer, flags));
1350 if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
1351 sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
1356 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
1357 if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) {
1362 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
1366 case VCPUOP_stop_singleshot_timer:
1367 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1371 kvm_xen_stop_timer(vcpu);
1379 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
1382 if (!kvm_xen_timer_enabled(vcpu))
1386 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
1387 int64_t delta = timeout - guest_now;
1389 /* Xen has a 'Linux workaround' in do_set_timer_op() which
1390 * checks for negative absolute timeout values (caused by
1391 * integer overflow), and for values about 13 days in the
1392 * future (2^50ns) which would be caused by jiffies
1393 * overflow. For those cases, it sets the timeout 100ms in
1394 * the future (not *too* soon, since if a guest really did
1395 * set a long timeout on purpose we don't want to keep
1396 * churning CPU time by waking it up).
1398 if (unlikely((int64_t)timeout < 0 ||
1399 (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
1400 delta = 100 * NSEC_PER_MSEC;
1401 timeout = guest_now + delta;
1404 kvm_xen_start_timer(vcpu, timeout, delta);
1406 kvm_xen_stop_timer(vcpu);
1413 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
1416 u64 input, params[6], r = -ENOSYS;
1417 bool handled = false;
1420 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
1422 /* Hyper-V hypercalls get bit 31 set in EAX */
1423 if ((input & 0x80000000) &&
1424 kvm_hv_hypercall_enabled(vcpu))
1425 return kvm_hv_hypercall(vcpu);
1427 longmode = is_64_bit_hypercall(vcpu);
1429 params[0] = (u32)kvm_rbx_read(vcpu);
1430 params[1] = (u32)kvm_rcx_read(vcpu);
1431 params[2] = (u32)kvm_rdx_read(vcpu);
1432 params[3] = (u32)kvm_rsi_read(vcpu);
1433 params[4] = (u32)kvm_rdi_read(vcpu);
1434 params[5] = (u32)kvm_rbp_read(vcpu);
1436 #ifdef CONFIG_X86_64
1438 params[0] = (u64)kvm_rdi_read(vcpu);
1439 params[1] = (u64)kvm_rsi_read(vcpu);
1440 params[2] = (u64)kvm_rdx_read(vcpu);
1441 params[3] = (u64)kvm_r10_read(vcpu);
1442 params[4] = (u64)kvm_r8_read(vcpu);
1443 params[5] = (u64)kvm_r9_read(vcpu);
1446 cpl = static_call(kvm_x86_get_cpl)(vcpu);
1447 trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
1448 params[3], params[4], params[5]);
1451 * Only allow hypercall acceleration for CPL0. The rare hypercalls that
1452 * are permitted in guest userspace can be handled by the VMM.
1454 if (unlikely(cpl > 0))
1455 goto handle_in_userspace;
1458 case __HYPERVISOR_xen_version:
1459 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
1460 r = vcpu->kvm->arch.xen.xen_version;
1464 case __HYPERVISOR_event_channel_op:
1465 if (params[0] == EVTCHNOP_send)
1466 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
1468 case __HYPERVISOR_sched_op:
1469 handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0],
1472 case __HYPERVISOR_vcpu_op:
1473 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
1476 case __HYPERVISOR_set_timer_op: {
1477 u64 timeout = params[0];
1478 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
1480 timeout |= params[1] << 32;
1481 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
1489 return kvm_xen_hypercall_set_result(vcpu, r);
1491 handle_in_userspace:
1492 vcpu->run->exit_reason = KVM_EXIT_XEN;
1493 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
1494 vcpu->run->xen.u.hcall.longmode = longmode;
1495 vcpu->run->xen.u.hcall.cpl = cpl;
1496 vcpu->run->xen.u.hcall.input = input;
1497 vcpu->run->xen.u.hcall.params[0] = params[0];
1498 vcpu->run->xen.u.hcall.params[1] = params[1];
1499 vcpu->run->xen.u.hcall.params[2] = params[2];
1500 vcpu->run->xen.u.hcall.params[3] = params[3];
1501 vcpu->run->xen.u.hcall.params[4] = params[4];
1502 vcpu->run->xen.u.hcall.params[5] = params[5];
1503 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
1504 vcpu->arch.complete_userspace_io =
1505 kvm_xen_hypercall_complete_userspace;
1510 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
1512 int poll_evtchn = vcpu->arch.xen.poll_evtchn;
1514 if ((poll_evtchn == port || poll_evtchn == -1) &&
1515 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
1516 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1517 kvm_vcpu_kick(vcpu);
1522 * The return value from this function is propagated to kvm_set_irq() API,
1524 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1525 * = 0 Interrupt was coalesced (previous irq is still pending)
1526 * > 0 Number of CPUs interrupt was delivered to
1528 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
1529 * only check on its return value is a comparison with -EWOULDBLOCK'.
1531 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1533 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1534 struct kvm_vcpu *vcpu;
1535 unsigned long *pending_bits, *mask_bits;
1536 unsigned long flags;
1538 bool kick_vcpu = false;
1539 int vcpu_idx, idx, rc;
1541 vcpu_idx = READ_ONCE(xe->vcpu_idx);
1543 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1545 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1548 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
1551 if (!vcpu->arch.xen.vcpu_info_cache.active)
1554 if (xe->port >= max_evtchn_port(kvm))
1559 idx = srcu_read_lock(&kvm->srcu);
1561 read_lock_irqsave(&gpc->lock, flags);
1562 if (!kvm_gpc_check(gpc, PAGE_SIZE))
1565 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1566 struct shared_info *shinfo = gpc->khva;
1567 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1568 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1569 port_word_bit = xe->port / 64;
1571 struct compat_shared_info *shinfo = gpc->khva;
1572 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1573 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1574 port_word_bit = xe->port / 32;
1578 * If this port wasn't already set, and if it isn't masked, then
1579 * we try to set the corresponding bit in the in-kernel shadow of
1580 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
1581 * already set, then we kick the vCPU in question to write to the
1582 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
1584 if (test_and_set_bit(xe->port, pending_bits)) {
1585 rc = 0; /* It was already raised */
1586 } else if (test_bit(xe->port, mask_bits)) {
1587 rc = -ENOTCONN; /* Masked */
1588 kvm_xen_check_poller(vcpu, xe->port);
1590 rc = 1; /* Delivered to the bitmap in shared_info. */
1591 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
1592 read_unlock_irqrestore(&gpc->lock, flags);
1593 gpc = &vcpu->arch.xen.vcpu_info_cache;
1595 read_lock_irqsave(&gpc->lock, flags);
1596 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
1598 * Could not access the vcpu_info. Set the bit in-kernel
1599 * and prod the vCPU to deliver it for itself.
1601 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
1606 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1607 struct vcpu_info *vcpu_info = gpc->khva;
1608 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
1609 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1613 struct compat_vcpu_info *vcpu_info = gpc->khva;
1614 if (!test_and_set_bit(port_word_bit,
1615 (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
1616 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1621 /* For the per-vCPU lapic vector, deliver it as MSI. */
1622 if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
1623 kvm_xen_inject_vcpu_vector(vcpu);
1629 read_unlock_irqrestore(&gpc->lock, flags);
1630 srcu_read_unlock(&kvm->srcu, idx);
1633 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1634 kvm_vcpu_kick(vcpu);
1640 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1642 bool mm_borrowed = false;
1645 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1646 if (rc != -EWOULDBLOCK)
1649 if (current->mm != kvm->mm) {
1651 * If not on a thread which already belongs to this KVM,
1652 * we'd better be in the irqfd workqueue.
1654 if (WARN_ON_ONCE(current->mm))
1657 kthread_use_mm(kvm->mm);
1662 * For the irqfd workqueue, using the main kvm->lock mutex is
1663 * fine since this function is invoked from kvm_set_irq() with
1664 * no other lock held, no srcu. In future if it will be called
1665 * directly from a vCPU thread (e.g. on hypercall for an IPI)
1666 * then it may need to switch to using a leaf-node mutex for
1667 * serializing the shared_info mapping.
1669 mutex_lock(&kvm->lock);
1672 * It is theoretically possible for the page to be unmapped
1673 * and the MMU notifier to invalidate the shared_info before
1674 * we even get to use it. In that case, this looks like an
1675 * infinite loop. It was tempting to do it via the userspace
1676 * HVA instead... but that just *hides* the fact that it's
1677 * an infinite loop, because if a fault occurs and it waits
1678 * for the page to come back, it can *still* immediately
1679 * fault and have to wait again, repeatedly.
1681 * Conversely, the page could also have been reinstated by
1682 * another thread before we even obtain the mutex above, so
1683 * check again *first* before remapping it.
1686 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1689 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1690 if (rc != -EWOULDBLOCK)
1693 idx = srcu_read_lock(&kvm->srcu);
1694 rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
1695 srcu_read_unlock(&kvm->srcu, idx);
1698 mutex_unlock(&kvm->lock);
1701 kthread_unuse_mm(kvm->mm);
1706 /* This is the version called from kvm_set_irq() as the .set function */
1707 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1708 int irq_source_id, int level, bool line_status)
1713 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1717 * Set up an event channel interrupt from the KVM IRQ routing table.
1718 * Used for e.g. PIRQ from passed through physical devices.
1720 int kvm_xen_setup_evtchn(struct kvm *kvm,
1721 struct kvm_kernel_irq_routing_entry *e,
1722 const struct kvm_irq_routing_entry *ue)
1725 struct kvm_vcpu *vcpu;
1727 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1730 /* We only support 2 level event channels for now */
1731 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1735 * Xen gives us interesting mappings from vCPU index to APIC ID,
1736 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1737 * to find it. Do that once at setup time, instead of every time.
1738 * But beware that on live update / live migration, the routing
1739 * table might be reinstated before the vCPU threads have finished
1740 * recreating their vCPUs.
1742 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1744 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx;
1746 e->xen_evtchn.vcpu_idx = -1;
1748 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1749 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1750 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1751 e->set = evtchn_set_fn;
1757 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
1759 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1761 struct kvm_xen_evtchn e;
1764 if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1767 /* We only support 2 level event channels for now */
1768 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1772 e.vcpu_id = uxe->vcpu;
1774 e.priority = uxe->priority;
1776 ret = kvm_xen_set_evtchn(&e, kvm);
1779 * None of that 'return 1 if it actually got delivered' nonsense.
1780 * We don't care if it was masked (-ENOTCONN) either.
1782 if (ret > 0 || ret == -ENOTCONN)
1789 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
1795 struct kvm_xen_evtchn port;
1797 u32 port; /* zero */
1798 struct eventfd_ctx *ctx;
1804 * Update target vCPU or priority for a registered sending channel.
1806 static int kvm_xen_eventfd_update(struct kvm *kvm,
1807 struct kvm_xen_hvm_attr *data)
1809 u32 port = data->u.evtchn.send_port;
1810 struct evtchnfd *evtchnfd;
1813 /* Protect writes to evtchnfd as well as the idr lookup. */
1814 mutex_lock(&kvm->lock);
1815 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1821 /* For an UPDATE, nothing may change except the priority/vcpu */
1823 if (evtchnfd->type != data->u.evtchn.type)
1827 * Port cannot change, and if it's zero that was an eventfd
1828 * which can't be changed either.
1830 if (!evtchnfd->deliver.port.port ||
1831 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1834 /* We only support 2 level event channels for now */
1835 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1838 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1839 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1840 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1841 evtchnfd->deliver.port.vcpu_idx = -1;
1845 mutex_unlock(&kvm->lock);
1850 * Configure the target (eventfd or local port delivery) for sending on
1851 * a given event channel.
1853 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1854 struct kvm_xen_hvm_attr *data)
1856 u32 port = data->u.evtchn.send_port;
1857 struct eventfd_ctx *eventfd = NULL;
1858 struct evtchnfd *evtchnfd;
1861 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1865 switch(data->u.evtchn.type) {
1866 case EVTCHNSTAT_ipi:
1867 /* IPI must map back to the same port# */
1868 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1869 goto out_noeventfd; /* -EINVAL */
1872 case EVTCHNSTAT_interdomain:
1873 if (data->u.evtchn.deliver.port.port) {
1874 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1875 goto out_noeventfd; /* -EINVAL */
1877 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1878 if (IS_ERR(eventfd)) {
1879 ret = PTR_ERR(eventfd);
1885 case EVTCHNSTAT_virq:
1886 case EVTCHNSTAT_closed:
1887 case EVTCHNSTAT_unbound:
1888 case EVTCHNSTAT_pirq:
1889 default: /* Unknown event channel type */
1890 goto out; /* -EINVAL */
1893 evtchnfd->send_port = data->u.evtchn.send_port;
1894 evtchnfd->type = data->u.evtchn.type;
1896 evtchnfd->deliver.eventfd.ctx = eventfd;
1898 /* We only support 2 level event channels for now */
1899 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1900 goto out; /* -EINVAL; */
1902 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1903 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1904 evtchnfd->deliver.port.vcpu_idx = -1;
1905 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1908 mutex_lock(&kvm->lock);
1909 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1911 mutex_unlock(&kvm->lock);
1919 eventfd_ctx_put(eventfd);
1925 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1927 struct evtchnfd *evtchnfd;
1929 mutex_lock(&kvm->lock);
1930 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1931 mutex_unlock(&kvm->lock);
1936 synchronize_srcu(&kvm->srcu);
1937 if (!evtchnfd->deliver.port.port)
1938 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1943 static int kvm_xen_eventfd_reset(struct kvm *kvm)
1945 struct evtchnfd *evtchnfd, **all_evtchnfds;
1949 mutex_lock(&kvm->lock);
1952 * Because synchronize_srcu() cannot be called inside the
1953 * critical section, first collect all the evtchnfd objects
1954 * in an array as they are removed from evtchn_ports.
1956 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
1959 all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
1960 if (!all_evtchnfds) {
1961 mutex_unlock(&kvm->lock);
1966 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1967 all_evtchnfds[n++] = evtchnfd;
1968 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
1970 mutex_unlock(&kvm->lock);
1972 synchronize_srcu(&kvm->srcu);
1975 evtchnfd = all_evtchnfds[n];
1976 if (!evtchnfd->deliver.port.port)
1977 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1980 kfree(all_evtchnfds);
1985 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
1987 u32 port = data->u.evtchn.send_port;
1989 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
1990 return kvm_xen_eventfd_reset(kvm);
1992 if (!port || port >= max_evtchn_port(kvm))
1995 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
1996 return kvm_xen_eventfd_deassign(kvm, port);
1997 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
1998 return kvm_xen_eventfd_update(kvm, data);
1999 if (data->u.evtchn.flags)
2002 return kvm_xen_eventfd_assign(kvm, data);
2005 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
2007 struct evtchnfd *evtchnfd;
2008 struct evtchn_send send;
2009 struct x86_exception e;
2011 /* Sanity check: this structure is the same for 32-bit and 64-bit */
2012 BUILD_BUG_ON(sizeof(send) != 4);
2013 if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
2019 * evtchnfd is protected by kvm->srcu; the idr lookup instead
2020 * is protected by RCU.
2023 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
2028 if (evtchnfd->deliver.port.port) {
2029 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
2030 if (ret < 0 && ret != -ENOTCONN)
2033 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
2040 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
2042 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
2043 vcpu->arch.xen.poll_evtchn = 0;
2045 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
2047 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2049 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2051 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2053 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2057 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
2059 if (kvm_xen_timer_enabled(vcpu))
2060 kvm_xen_stop_timer(vcpu);
2062 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
2063 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
2064 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
2065 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
2067 del_timer_sync(&vcpu->arch.xen.poll_timer);
2070 void kvm_xen_init_vm(struct kvm *kvm)
2072 idr_init(&kvm->arch.xen.evtchn_ports);
2073 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
2076 void kvm_xen_destroy_vm(struct kvm *kvm)
2078 struct evtchnfd *evtchnfd;
2081 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
2083 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2084 if (!evtchnfd->deliver.port.port)
2085 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2088 idr_destroy(&kvm->arch.xen.evtchn_ports);
2090 if (kvm->arch.xen_hvm_config.msr)
2091 static_branch_slow_dec_deferred(&kvm_xen_enabled);