1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
14 #include <linux/eventfd.h>
15 #include <linux/kvm_host.h>
16 #include <linux/sched/stat.h>
18 #include <trace/events/kvm.h>
19 #include <xen/interface/xen.h>
20 #include <xen/interface/vcpu.h>
21 #include <xen/interface/event_channel.h>
22 #include <xen/interface/sched.h>
26 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
27 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
28 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
30 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
32 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
34 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
35 struct pvclock_wall_clock *wc;
36 gpa_t gpa = gfn_to_gpa(gfn);
41 int idx = srcu_read_lock(&kvm->srcu);
43 if (gfn == GPA_INVALID) {
44 kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
49 ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
55 * This code mirrors kvm_write_wall_clock() except that it writes
56 * directly through the pfn cache and doesn't mark the page dirty.
58 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
60 /* It could be invalid again already, so we need to check */
61 read_lock_irq(&gpc->lock);
66 read_unlock_irq(&gpc->lock);
69 /* Paranoia checks on the 32-bit struct layout */
70 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
71 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
72 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
75 /* Paranoia checks on the 64-bit struct layout */
76 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
77 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
79 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
80 struct shared_info *shinfo = gpc->khva;
82 wc_sec_hi = &shinfo->wc_sec_hi;
87 struct compat_shared_info *shinfo = gpc->khva;
89 wc_sec_hi = &shinfo->arch.wc_sec_hi;
93 /* Increment and ensure an odd value */
94 wc_version = wc->version = (wc->version + 1) | 1;
97 wc->nsec = do_div(wall_nsec, 1000000000);
98 wc->sec = (u32)wall_nsec;
99 *wc_sec_hi = wall_nsec >> 32;
102 wc->version = wc_version + 1;
103 read_unlock_irq(&gpc->lock);
105 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
108 srcu_read_unlock(&kvm->srcu, idx);
112 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
114 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
115 struct kvm_xen_evtchn e;
117 e.vcpu_id = vcpu->vcpu_id;
118 e.vcpu_idx = vcpu->vcpu_idx;
119 e.port = vcpu->arch.xen.timer_virq;
120 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
122 kvm_xen_set_evtchn(&e, vcpu->kvm);
124 vcpu->arch.xen.timer_expires = 0;
125 atomic_set(&vcpu->arch.xen.timer_pending, 0);
129 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
131 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
133 if (atomic_read(&vcpu->arch.xen.timer_pending))
134 return HRTIMER_NORESTART;
136 atomic_inc(&vcpu->arch.xen.timer_pending);
137 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
140 return HRTIMER_NORESTART;
143 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
145 atomic_set(&vcpu->arch.xen.timer_pending, 0);
146 vcpu->arch.xen.timer_expires = guest_abs;
149 xen_timer_callback(&vcpu->arch.xen.timer);
151 ktime_t ktime_now = ktime_get();
152 hrtimer_start(&vcpu->arch.xen.timer,
153 ktime_add_ns(ktime_now, delta_ns),
154 HRTIMER_MODE_ABS_HARD);
158 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
160 hrtimer_cancel(&vcpu->arch.xen.timer);
161 vcpu->arch.xen.timer_expires = 0;
162 atomic_set(&vcpu->arch.xen.timer_pending, 0);
165 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
167 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
168 HRTIMER_MODE_ABS_HARD);
169 vcpu->arch.xen.timer.function = xen_timer_callback;
172 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
174 struct kvm_vcpu_xen *vx = &v->arch.xen;
175 u64 now = get_kvmclock_ns(v->kvm);
176 u64 delta_ns = now - vx->runstate_entry_time;
177 u64 run_delay = current->sched_info.run_delay;
179 if (unlikely(!vx->runstate_entry_time))
180 vx->current_runstate = RUNSTATE_offline;
183 * Time waiting for the scheduler isn't "stolen" if the
184 * vCPU wasn't running anyway.
186 if (vx->current_runstate == RUNSTATE_running) {
187 u64 steal_ns = run_delay - vx->last_steal;
189 delta_ns -= steal_ns;
191 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
193 vx->last_steal = run_delay;
195 vx->runstate_times[vx->current_runstate] += delta_ns;
196 vx->current_runstate = state;
197 vx->runstate_entry_time = now;
200 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
202 struct kvm_vcpu_xen *vx = &v->arch.xen;
203 struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
204 uint64_t *user_times;
209 kvm_xen_update_runstate(v, state);
211 if (!vx->runstate_cache.active)
214 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
215 user_len = sizeof(struct vcpu_runstate_info);
217 user_len = sizeof(struct compat_vcpu_runstate_info);
219 read_lock_irqsave(&gpc->lock, flags);
220 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
222 read_unlock_irqrestore(&gpc->lock, flags);
224 /* When invoked from kvm_sched_out() we cannot sleep */
225 if (state == RUNSTATE_runnable)
228 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
231 read_lock_irqsave(&gpc->lock, flags);
235 * The only difference between 32-bit and 64-bit versions of the
236 * runstate struct us the alignment of uint64_t in 32-bit, which
237 * means that the 64-bit version has an additional 4 bytes of
238 * padding after the first field 'state'.
240 * So we use 'int __user *user_state' to point to the state field,
241 * and 'uint64_t __user *user_times' for runstate_entry_time. So
242 * the actual array of time[] in each state starts at user_times[1].
244 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
245 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
246 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
248 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
249 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
250 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
251 offsetof(struct compat_vcpu_runstate_info, time) + 4);
254 user_state = gpc->khva;
256 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
257 user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
260 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
264 * First write the updated state_entry_time at the appropriate
265 * location determined by 'offset'.
267 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
268 sizeof(user_times[0]));
269 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
270 sizeof(user_times[0]));
272 user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
276 * Next, write the new runstate. This is in the *same* place
277 * for 32-bit and 64-bit guests, asserted here for paranoia.
279 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
280 offsetof(struct compat_vcpu_runstate_info, state));
281 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
282 sizeof(vx->current_runstate));
283 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
284 sizeof(vx->current_runstate));
286 *user_state = vx->current_runstate;
289 * Write the actual runstate times immediately after the
290 * runstate_entry_time.
292 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
293 offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
294 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
295 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
296 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
297 sizeof_field(struct compat_vcpu_runstate_info, time));
298 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
299 sizeof(vx->runstate_times));
301 memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
305 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
306 * runstate_entry_time field.
308 user_times[0] &= ~XEN_RUNSTATE_UPDATE;
311 read_unlock_irqrestore(&gpc->lock, flags);
313 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
317 * On event channel delivery, the vcpu_info may not have been accessible.
318 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
319 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
320 * Do so now that we can sleep in the context of the vCPU to bring the
321 * page in, and refresh the pfn cache for it.
323 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
325 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
326 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
329 if (!evtchn_pending_sel)
333 * Yes, this is an open-coded loop. But that's just what put_user()
334 * does anyway. Page it in and retry the instruction. We're just a
335 * little more honest about it.
337 read_lock_irqsave(&gpc->lock, flags);
338 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
339 sizeof(struct vcpu_info))) {
340 read_unlock_irqrestore(&gpc->lock, flags);
342 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
343 sizeof(struct vcpu_info)))
346 read_lock_irqsave(&gpc->lock, flags);
349 /* Now gpc->khva is a valid kernel address for the vcpu_info */
350 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
351 struct vcpu_info *vi = gpc->khva;
353 asm volatile(LOCK_PREFIX "orq %0, %1\n"
355 LOCK_PREFIX "andq %0, %2\n"
356 : "=r" (evtchn_pending_sel),
357 "+m" (vi->evtchn_pending_sel),
358 "+m" (v->arch.xen.evtchn_pending_sel)
359 : "0" (evtchn_pending_sel));
360 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
362 u32 evtchn_pending_sel32 = evtchn_pending_sel;
363 struct compat_vcpu_info *vi = gpc->khva;
365 asm volatile(LOCK_PREFIX "orl %0, %1\n"
367 LOCK_PREFIX "andl %0, %2\n"
368 : "=r" (evtchn_pending_sel32),
369 "+m" (vi->evtchn_pending_sel),
370 "+m" (v->arch.xen.evtchn_pending_sel)
371 : "0" (evtchn_pending_sel32));
372 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
374 read_unlock_irqrestore(&gpc->lock, flags);
376 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
379 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
381 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
386 * If the global upcall vector (HVMIRQ_callback_vector) is set and
387 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
390 /* No need for compat handling here */
391 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
392 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
393 BUILD_BUG_ON(sizeof(rc) !=
394 sizeof_field(struct vcpu_info, evtchn_upcall_pending));
395 BUILD_BUG_ON(sizeof(rc) !=
396 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
398 read_lock_irqsave(&gpc->lock, flags);
399 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
400 sizeof(struct vcpu_info))) {
401 read_unlock_irqrestore(&gpc->lock, flags);
404 * This function gets called from kvm_vcpu_block() after setting the
405 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
406 * from a HLT. So we really mustn't sleep. If the page ended up absent
407 * at that point, just return 1 in order to trigger an immediate wake,
408 * and we'll end up getting called again from a context where we *can*
409 * fault in the page and wait for it.
411 if (in_atomic() || !task_is_running(current))
414 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
415 sizeof(struct vcpu_info))) {
417 * If this failed, userspace has screwed up the
418 * vcpu_info mapping. No interrupts for you.
422 read_lock_irqsave(&gpc->lock, flags);
425 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
426 read_unlock_irqrestore(&gpc->lock, flags);
430 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
435 switch (data->type) {
436 case KVM_XEN_ATTR_TYPE_LONG_MODE:
437 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
440 mutex_lock(&kvm->lock);
441 kvm->arch.xen.long_mode = !!data->u.long_mode;
442 mutex_unlock(&kvm->lock);
447 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
448 mutex_lock(&kvm->lock);
449 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
450 mutex_unlock(&kvm->lock);
453 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
454 if (data->u.vector && data->u.vector < 0x10)
457 mutex_lock(&kvm->lock);
458 kvm->arch.xen.upcall_vector = data->u.vector;
459 mutex_unlock(&kvm->lock);
464 case KVM_XEN_ATTR_TYPE_EVTCHN:
465 r = kvm_xen_setattr_evtchn(kvm, data);
475 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
479 mutex_lock(&kvm->lock);
481 switch (data->type) {
482 case KVM_XEN_ATTR_TYPE_LONG_MODE:
483 data->u.long_mode = kvm->arch.xen.long_mode;
487 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
488 if (kvm->arch.xen.shinfo_cache.active)
489 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
491 data->u.shared_info.gfn = GPA_INVALID;
495 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
496 data->u.vector = kvm->arch.xen.upcall_vector;
504 mutex_unlock(&kvm->lock);
508 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
510 int idx, r = -ENOENT;
512 mutex_lock(&vcpu->kvm->lock);
513 idx = srcu_read_lock(&vcpu->kvm->srcu);
515 switch (data->type) {
516 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
517 /* No compat necessary here. */
518 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
519 sizeof(struct compat_vcpu_info));
520 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
521 offsetof(struct compat_vcpu_info, time));
523 if (data->u.gpa == GPA_INVALID) {
524 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
529 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
530 &vcpu->arch.xen.vcpu_info_cache,
531 NULL, KVM_HOST_USES_PFN, data->u.gpa,
532 sizeof(struct vcpu_info));
534 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
538 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
539 if (data->u.gpa == GPA_INVALID) {
540 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
541 &vcpu->arch.xen.vcpu_time_info_cache);
546 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
547 &vcpu->arch.xen.vcpu_time_info_cache,
548 NULL, KVM_HOST_USES_PFN, data->u.gpa,
549 sizeof(struct pvclock_vcpu_time_info));
551 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
554 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
555 if (!sched_info_on()) {
559 if (data->u.gpa == GPA_INVALID) {
560 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
561 &vcpu->arch.xen.runstate_cache);
566 r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
567 &vcpu->arch.xen.runstate_cache,
568 NULL, KVM_HOST_USES_PFN, data->u.gpa,
569 sizeof(struct vcpu_runstate_info));
572 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
573 if (!sched_info_on()) {
577 if (data->u.runstate.state > RUNSTATE_offline) {
582 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
586 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
587 if (!sched_info_on()) {
591 if (data->u.runstate.state > RUNSTATE_offline) {
595 if (data->u.runstate.state_entry_time !=
596 (data->u.runstate.time_running +
597 data->u.runstate.time_runnable +
598 data->u.runstate.time_blocked +
599 data->u.runstate.time_offline)) {
603 if (get_kvmclock_ns(vcpu->kvm) <
604 data->u.runstate.state_entry_time) {
609 vcpu->arch.xen.current_runstate = data->u.runstate.state;
610 vcpu->arch.xen.runstate_entry_time =
611 data->u.runstate.state_entry_time;
612 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
613 data->u.runstate.time_running;
614 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
615 data->u.runstate.time_runnable;
616 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
617 data->u.runstate.time_blocked;
618 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
619 data->u.runstate.time_offline;
620 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
624 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
625 if (!sched_info_on()) {
629 if (data->u.runstate.state > RUNSTATE_offline &&
630 data->u.runstate.state != (u64)-1) {
634 /* The adjustment must add up */
635 if (data->u.runstate.state_entry_time !=
636 (data->u.runstate.time_running +
637 data->u.runstate.time_runnable +
638 data->u.runstate.time_blocked +
639 data->u.runstate.time_offline)) {
644 if (get_kvmclock_ns(vcpu->kvm) <
645 (vcpu->arch.xen.runstate_entry_time +
646 data->u.runstate.state_entry_time)) {
651 vcpu->arch.xen.runstate_entry_time +=
652 data->u.runstate.state_entry_time;
653 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
654 data->u.runstate.time_running;
655 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
656 data->u.runstate.time_runnable;
657 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
658 data->u.runstate.time_blocked;
659 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
660 data->u.runstate.time_offline;
662 if (data->u.runstate.state <= RUNSTATE_offline)
663 kvm_xen_update_runstate(vcpu, data->u.runstate.state);
667 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
668 if (data->u.vcpu_id >= KVM_MAX_VCPUS)
671 vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
676 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
677 if (data->u.timer.port) {
678 if (data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
682 vcpu->arch.xen.timer_virq = data->u.timer.port;
683 kvm_xen_init_timer(vcpu);
685 /* Restart the timer if it's set */
686 if (data->u.timer.expires_ns)
687 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
688 data->u.timer.expires_ns -
689 get_kvmclock_ns(vcpu->kvm));
690 } else if (kvm_xen_timer_enabled(vcpu)) {
691 kvm_xen_stop_timer(vcpu);
692 vcpu->arch.xen.timer_virq = 0;
702 srcu_read_unlock(&vcpu->kvm->srcu, idx);
703 mutex_unlock(&vcpu->kvm->lock);
707 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
711 mutex_lock(&vcpu->kvm->lock);
713 switch (data->type) {
714 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
715 if (vcpu->arch.xen.vcpu_info_cache.active)
716 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
718 data->u.gpa = GPA_INVALID;
722 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
723 if (vcpu->arch.xen.vcpu_time_info_cache.active)
724 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
726 data->u.gpa = GPA_INVALID;
730 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
731 if (!sched_info_on()) {
735 if (vcpu->arch.xen.runstate_cache.active) {
736 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
741 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
742 if (!sched_info_on()) {
746 data->u.runstate.state = vcpu->arch.xen.current_runstate;
750 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
751 if (!sched_info_on()) {
755 data->u.runstate.state = vcpu->arch.xen.current_runstate;
756 data->u.runstate.state_entry_time =
757 vcpu->arch.xen.runstate_entry_time;
758 data->u.runstate.time_running =
759 vcpu->arch.xen.runstate_times[RUNSTATE_running];
760 data->u.runstate.time_runnable =
761 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
762 data->u.runstate.time_blocked =
763 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
764 data->u.runstate.time_offline =
765 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
769 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
773 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
774 data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
778 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
779 data->u.timer.port = vcpu->arch.xen.timer_virq;
780 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
781 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
789 mutex_unlock(&vcpu->kvm->lock);
793 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
795 struct kvm *kvm = vcpu->kvm;
796 u32 page_num = data & ~PAGE_MASK;
797 u64 page_addr = data & PAGE_MASK;
798 bool lm = is_long_mode(vcpu);
800 /* Latch long_mode for shared_info pages etc. */
801 vcpu->kvm->arch.xen.long_mode = lm;
804 * If Xen hypercall intercept is enabled, fill the hypercall
805 * page with VMCALL/VMMCALL instructions since that's what
806 * we catch. Else the VMM has provided the hypercall pages
807 * with instructions of its own choosing, so use those.
809 if (kvm_xen_hypercall_enabled(kvm)) {
816 /* mov imm32, %eax */
817 instructions[0] = 0xb8;
819 /* vmcall / vmmcall */
820 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
823 instructions[8] = 0xc3;
826 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
828 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
829 *(u32 *)&instructions[1] = i;
830 if (kvm_vcpu_write_guest(vcpu,
831 page_addr + (i * sizeof(instructions)),
832 instructions, sizeof(instructions)))
837 * Note, truncation is a non-issue as 'lm' is guaranteed to be
838 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
840 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
841 : kvm->arch.xen_hvm_config.blob_addr_32;
842 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
843 : kvm->arch.xen_hvm_config.blob_size_32;
846 if (page_num >= blob_size)
849 blob_addr += page_num * PAGE_SIZE;
851 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
853 return PTR_ERR(page);
855 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
863 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
865 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
869 * With hypercall interception the kernel generates its own
870 * hypercall page so it must not be provided.
872 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
873 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
874 xhc->blob_size_32 || xhc->blob_size_64))
877 mutex_lock(&kvm->lock);
879 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
880 static_branch_inc(&kvm_xen_enabled.key);
881 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
882 static_branch_slow_dec_deferred(&kvm_xen_enabled);
884 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
886 mutex_unlock(&kvm->lock);
890 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
892 kvm_rax_write(vcpu, result);
893 return kvm_skip_emulated_instruction(vcpu);
896 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
898 struct kvm_run *run = vcpu->run;
900 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
903 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
906 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, int cmd, u64 param, u64 *r)
910 kvm_vcpu_on_spin(vcpu, true);
920 struct compat_vcpu_set_singleshot_timer {
921 uint64_t timeout_abs_ns;
923 } __attribute__((packed));
925 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
926 int vcpu_id, u64 param, u64 *r)
928 struct vcpu_set_singleshot_timer oneshot;
933 if (!kvm_xen_timer_enabled(vcpu))
937 case VCPUOP_set_singleshot_timer:
938 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
942 idx = srcu_read_lock(&vcpu->kvm->srcu);
943 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
944 srcu_read_unlock(&vcpu->kvm->srcu, idx);
947 * The only difference for 32-bit compat is the 4 bytes of
948 * padding after the interesting part of the structure. So
949 * for a faithful emulation of Xen we have to *try* to copy
950 * the padding and return -EFAULT if we can't. Otherwise we
951 * might as well just have copied the 12-byte 32-bit struct.
953 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
954 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
955 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
956 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
957 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
958 offsetof(struct vcpu_set_singleshot_timer, flags));
959 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
960 sizeof_field(struct vcpu_set_singleshot_timer, flags));
963 kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) :
964 sizeof(struct compat_vcpu_set_singleshot_timer))) {
969 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
970 if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) {
975 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
979 case VCPUOP_stop_singleshot_timer:
980 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
984 kvm_xen_stop_timer(vcpu);
992 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
995 if (!kvm_xen_timer_enabled(vcpu))
999 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
1000 int64_t delta = timeout - guest_now;
1002 /* Xen has a 'Linux workaround' in do_set_timer_op() which
1003 * checks for negative absolute timeout values (caused by
1004 * integer overflow), and for values about 13 days in the
1005 * future (2^50ns) which would be caused by jiffies
1006 * overflow. For those cases, it sets the timeout 100ms in
1007 * the future (not *too* soon, since if a guest really did
1008 * set a long timeout on purpose we don't want to keep
1009 * churning CPU time by waking it up).
1011 if (unlikely((int64_t)timeout < 0 ||
1012 (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
1013 delta = 100 * NSEC_PER_MSEC;
1014 timeout = guest_now + delta;
1017 kvm_xen_start_timer(vcpu, timeout, delta);
1019 kvm_xen_stop_timer(vcpu);
1026 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
1029 u64 input, params[6], r = -ENOSYS;
1030 bool handled = false;
1032 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
1034 /* Hyper-V hypercalls get bit 31 set in EAX */
1035 if ((input & 0x80000000) &&
1036 kvm_hv_hypercall_enabled(vcpu))
1037 return kvm_hv_hypercall(vcpu);
1039 longmode = is_64_bit_hypercall(vcpu);
1041 params[0] = (u32)kvm_rbx_read(vcpu);
1042 params[1] = (u32)kvm_rcx_read(vcpu);
1043 params[2] = (u32)kvm_rdx_read(vcpu);
1044 params[3] = (u32)kvm_rsi_read(vcpu);
1045 params[4] = (u32)kvm_rdi_read(vcpu);
1046 params[5] = (u32)kvm_rbp_read(vcpu);
1048 #ifdef CONFIG_X86_64
1050 params[0] = (u64)kvm_rdi_read(vcpu);
1051 params[1] = (u64)kvm_rsi_read(vcpu);
1052 params[2] = (u64)kvm_rdx_read(vcpu);
1053 params[3] = (u64)kvm_r10_read(vcpu);
1054 params[4] = (u64)kvm_r8_read(vcpu);
1055 params[5] = (u64)kvm_r9_read(vcpu);
1058 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
1059 params[3], params[4], params[5]);
1062 case __HYPERVISOR_event_channel_op:
1063 if (params[0] == EVTCHNOP_send)
1064 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
1066 case __HYPERVISOR_sched_op:
1067 handled = kvm_xen_hcall_sched_op(vcpu, params[0], params[1], &r);
1069 case __HYPERVISOR_vcpu_op:
1070 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
1073 case __HYPERVISOR_set_timer_op: {
1074 u64 timeout = params[0];
1075 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
1077 timeout |= params[1] << 32;
1078 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
1086 return kvm_xen_hypercall_set_result(vcpu, r);
1088 vcpu->run->exit_reason = KVM_EXIT_XEN;
1089 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
1090 vcpu->run->xen.u.hcall.longmode = longmode;
1091 vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
1092 vcpu->run->xen.u.hcall.input = input;
1093 vcpu->run->xen.u.hcall.params[0] = params[0];
1094 vcpu->run->xen.u.hcall.params[1] = params[1];
1095 vcpu->run->xen.u.hcall.params[2] = params[2];
1096 vcpu->run->xen.u.hcall.params[3] = params[3];
1097 vcpu->run->xen.u.hcall.params[4] = params[4];
1098 vcpu->run->xen.u.hcall.params[5] = params[5];
1099 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
1100 vcpu->arch.complete_userspace_io =
1101 kvm_xen_hypercall_complete_userspace;
1106 static inline int max_evtchn_port(struct kvm *kvm)
1108 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1109 return EVTCHN_2L_NR_CHANNELS;
1111 return COMPAT_EVTCHN_2L_NR_CHANNELS;
1115 * The return value from this function is propagated to kvm_set_irq() API,
1117 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1118 * = 0 Interrupt was coalesced (previous irq is still pending)
1119 * > 0 Number of CPUs interrupt was delivered to
1121 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
1122 * only check on its return value is a comparison with -EWOULDBLOCK'.
1124 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1126 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1127 struct kvm_vcpu *vcpu;
1128 unsigned long *pending_bits, *mask_bits;
1129 unsigned long flags;
1131 bool kick_vcpu = false;
1132 int vcpu_idx, idx, rc;
1134 vcpu_idx = READ_ONCE(xe->vcpu_idx);
1136 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1138 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1141 WRITE_ONCE(xe->vcpu_idx, kvm_vcpu_get_idx(vcpu));
1144 if (!vcpu->arch.xen.vcpu_info_cache.active)
1147 if (xe->port >= max_evtchn_port(kvm))
1152 idx = srcu_read_lock(&kvm->srcu);
1154 read_lock_irqsave(&gpc->lock, flags);
1155 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
1158 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1159 struct shared_info *shinfo = gpc->khva;
1160 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1161 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1162 port_word_bit = xe->port / 64;
1164 struct compat_shared_info *shinfo = gpc->khva;
1165 pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1166 mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1167 port_word_bit = xe->port / 32;
1171 * If this port wasn't already set, and if it isn't masked, then
1172 * we try to set the corresponding bit in the in-kernel shadow of
1173 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
1174 * already set, then we kick the vCPU in question to write to the
1175 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
1177 if (test_and_set_bit(xe->port, pending_bits)) {
1178 rc = 0; /* It was already raised */
1179 } else if (test_bit(xe->port, mask_bits)) {
1180 rc = -ENOTCONN; /* Masked */
1182 rc = 1; /* Delivered to the bitmap in shared_info. */
1183 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
1184 read_unlock_irqrestore(&gpc->lock, flags);
1185 gpc = &vcpu->arch.xen.vcpu_info_cache;
1187 read_lock_irqsave(&gpc->lock, flags);
1188 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
1190 * Could not access the vcpu_info. Set the bit in-kernel
1191 * and prod the vCPU to deliver it for itself.
1193 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
1198 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1199 struct vcpu_info *vcpu_info = gpc->khva;
1200 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
1201 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1205 struct compat_vcpu_info *vcpu_info = gpc->khva;
1206 if (!test_and_set_bit(port_word_bit,
1207 (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
1208 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1215 read_unlock_irqrestore(&gpc->lock, flags);
1216 srcu_read_unlock(&kvm->srcu, idx);
1219 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1220 kvm_vcpu_kick(vcpu);
1226 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1228 bool mm_borrowed = false;
1231 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1232 if (rc != -EWOULDBLOCK)
1235 if (current->mm != kvm->mm) {
1237 * If not on a thread which already belongs to this KVM,
1238 * we'd better be in the irqfd workqueue.
1240 if (WARN_ON_ONCE(current->mm))
1243 kthread_use_mm(kvm->mm);
1248 * For the irqfd workqueue, using the main kvm->lock mutex is
1249 * fine since this function is invoked from kvm_set_irq() with
1250 * no other lock held, no srcu. In future if it will be called
1251 * directly from a vCPU thread (e.g. on hypercall for an IPI)
1252 * then it may need to switch to using a leaf-node mutex for
1253 * serializing the shared_info mapping.
1255 mutex_lock(&kvm->lock);
1258 * It is theoretically possible for the page to be unmapped
1259 * and the MMU notifier to invalidate the shared_info before
1260 * we even get to use it. In that case, this looks like an
1261 * infinite loop. It was tempting to do it via the userspace
1262 * HVA instead... but that just *hides* the fact that it's
1263 * an infinite loop, because if a fault occurs and it waits
1264 * for the page to come back, it can *still* immediately
1265 * fault and have to wait again, repeatedly.
1267 * Conversely, the page could also have been reinstated by
1268 * another thread before we even obtain the mutex above, so
1269 * check again *first* before remapping it.
1272 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1275 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1276 if (rc != -EWOULDBLOCK)
1279 idx = srcu_read_lock(&kvm->srcu);
1280 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
1281 srcu_read_unlock(&kvm->srcu, idx);
1284 mutex_unlock(&kvm->lock);
1287 kthread_unuse_mm(kvm->mm);
1292 /* This is the version called from kvm_set_irq() as the .set function */
1293 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1294 int irq_source_id, int level, bool line_status)
1299 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1303 * Set up an event channel interrupt from the KVM IRQ routing table.
1304 * Used for e.g. PIRQ from passed through physical devices.
1306 int kvm_xen_setup_evtchn(struct kvm *kvm,
1307 struct kvm_kernel_irq_routing_entry *e,
1308 const struct kvm_irq_routing_entry *ue)
1311 struct kvm_vcpu *vcpu;
1313 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1316 /* We only support 2 level event channels for now */
1317 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1321 * Xen gives us interesting mappings from vCPU index to APIC ID,
1322 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1323 * to find it. Do that once at setup time, instead of every time.
1324 * But beware that on live update / live migration, the routing
1325 * table might be reinstated before the vCPU threads have finished
1326 * recreating their vCPUs.
1328 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1330 e->xen_evtchn.vcpu_idx = kvm_vcpu_get_idx(vcpu);
1332 e->xen_evtchn.vcpu_idx = -1;
1334 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1335 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1336 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1337 e->set = evtchn_set_fn;
1343 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
1345 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1347 struct kvm_xen_evtchn e;
1350 if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1353 /* We only support 2 level event channels for now */
1354 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1358 e.vcpu_id = uxe->vcpu;
1360 e.priority = uxe->priority;
1362 ret = kvm_xen_set_evtchn(&e, kvm);
1365 * None of that 'return 1 if it actually got delivered' nonsense.
1366 * We don't care if it was masked (-ENOTCONN) either.
1368 if (ret > 0 || ret == -ENOTCONN)
1375 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
1381 struct kvm_xen_evtchn port;
1383 u32 port; /* zero */
1384 struct eventfd_ctx *ctx;
1390 * Update target vCPU or priority for a registered sending channel.
1392 static int kvm_xen_eventfd_update(struct kvm *kvm,
1393 struct kvm_xen_hvm_attr *data)
1395 u32 port = data->u.evtchn.send_port;
1396 struct evtchnfd *evtchnfd;
1398 if (!port || port >= max_evtchn_port(kvm))
1401 mutex_lock(&kvm->lock);
1402 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1403 mutex_unlock(&kvm->lock);
1408 /* For an UPDATE, nothing may change except the priority/vcpu */
1409 if (evtchnfd->type != data->u.evtchn.type)
1413 * Port cannot change, and if it's zero that was an eventfd
1414 * which can't be changed either.
1416 if (!evtchnfd->deliver.port.port ||
1417 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1420 /* We only support 2 level event channels for now */
1421 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1424 mutex_lock(&kvm->lock);
1425 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1426 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1427 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1428 evtchnfd->deliver.port.vcpu_idx = -1;
1430 mutex_unlock(&kvm->lock);
1435 * Configure the target (eventfd or local port delivery) for sending on
1436 * a given event channel.
1438 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1439 struct kvm_xen_hvm_attr *data)
1441 u32 port = data->u.evtchn.send_port;
1442 struct eventfd_ctx *eventfd = NULL;
1443 struct evtchnfd *evtchnfd = NULL;
1446 if (!port || port >= max_evtchn_port(kvm))
1449 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1453 switch(data->u.evtchn.type) {
1454 case EVTCHNSTAT_ipi:
1455 /* IPI must map back to the same port# */
1456 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1457 goto out; /* -EINVAL */
1460 case EVTCHNSTAT_interdomain:
1461 if (data->u.evtchn.deliver.port.port) {
1462 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1463 goto out; /* -EINVAL */
1465 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1466 if (IS_ERR(eventfd)) {
1467 ret = PTR_ERR(eventfd);
1473 case EVTCHNSTAT_virq:
1474 case EVTCHNSTAT_closed:
1475 case EVTCHNSTAT_unbound:
1476 case EVTCHNSTAT_pirq:
1477 default: /* Unknown event channel type */
1478 goto out; /* -EINVAL */
1481 evtchnfd->send_port = data->u.evtchn.send_port;
1482 evtchnfd->type = data->u.evtchn.type;
1484 evtchnfd->deliver.eventfd.ctx = eventfd;
1486 /* We only support 2 level event channels for now */
1487 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1488 goto out; /* -EINVAL; */
1490 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1491 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1492 evtchnfd->deliver.port.vcpu_idx = -1;
1493 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1496 mutex_lock(&kvm->lock);
1497 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1499 mutex_unlock(&kvm->lock);
1507 eventfd_ctx_put(eventfd);
1512 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1514 struct evtchnfd *evtchnfd;
1516 mutex_lock(&kvm->lock);
1517 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1518 mutex_unlock(&kvm->lock);
1524 synchronize_srcu(&kvm->srcu);
1525 if (!evtchnfd->deliver.port.port)
1526 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1531 static int kvm_xen_eventfd_reset(struct kvm *kvm)
1533 struct evtchnfd *evtchnfd;
1536 mutex_lock(&kvm->lock);
1537 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1538 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
1539 synchronize_srcu(&kvm->srcu);
1540 if (!evtchnfd->deliver.port.port)
1541 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1544 mutex_unlock(&kvm->lock);
1549 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
1551 u32 port = data->u.evtchn.send_port;
1553 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
1554 return kvm_xen_eventfd_reset(kvm);
1556 if (!port || port >= max_evtchn_port(kvm))
1559 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
1560 return kvm_xen_eventfd_deassign(kvm, port);
1561 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
1562 return kvm_xen_eventfd_update(kvm, data);
1563 if (data->u.evtchn.flags)
1566 return kvm_xen_eventfd_assign(kvm, data);
1569 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
1571 struct evtchnfd *evtchnfd;
1572 struct evtchn_send send;
1576 idx = srcu_read_lock(&vcpu->kvm->srcu);
1577 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
1578 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1580 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
1585 /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
1586 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
1590 if (evtchnfd->deliver.port.port) {
1591 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
1592 if (ret < 0 && ret != -ENOTCONN)
1595 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
1602 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
1604 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
1607 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
1609 if (kvm_xen_timer_enabled(vcpu))
1610 kvm_xen_stop_timer(vcpu);
1612 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1613 &vcpu->arch.xen.runstate_cache);
1614 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1615 &vcpu->arch.xen.vcpu_info_cache);
1616 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
1617 &vcpu->arch.xen.vcpu_time_info_cache);
1620 void kvm_xen_init_vm(struct kvm *kvm)
1622 idr_init(&kvm->arch.xen.evtchn_ports);
1625 void kvm_xen_destroy_vm(struct kvm *kvm)
1627 struct evtchnfd *evtchnfd;
1630 kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
1632 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1633 if (!evtchnfd->deliver.port.port)
1634 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
1637 idr_destroy(&kvm->arch.xen.evtchn_ports);
1639 if (kvm->arch.xen_hvm_config.msr)
1640 static_branch_slow_dec_deferred(&kvm_xen_enabled);