1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
13 #include <linux/jump_label_ratelimit.h>
15 extern struct static_key_false_deferred kvm_xen_enabled;
17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
18 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
19 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
21 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
23 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
24 void kvm_xen_init_vm(struct kvm *kvm);
25 void kvm_xen_destroy_vm(struct kvm *kvm);
27 int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
29 int kvm_xen_setup_evtchn(struct kvm *kvm,
30 struct kvm_kernel_irq_routing_entry *e,
31 const struct kvm_irq_routing_entry *ue);
33 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
35 return static_branch_unlikely(&kvm_xen_enabled.key) &&
36 kvm->arch.xen_hvm_config.msr;
39 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
41 return static_branch_unlikely(&kvm_xen_enabled.key) &&
42 (kvm->arch.xen_hvm_config.flags &
43 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
46 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
48 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
49 vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
50 return __kvm_xen_has_interrupt(vcpu);
55 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
60 static inline void kvm_xen_init_vm(struct kvm *kvm)
64 static inline void kvm_xen_destroy_vm(struct kvm *kvm)
68 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
73 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
78 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
84 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
86 #include <asm/pvclock-abi.h>
87 #include <asm/xen/interface.h>
88 #include <xen/interface/vcpu.h>
90 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
92 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
94 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
97 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
100 * If the vCPU wasn't preempted but took a normal exit for
101 * some reason (hypercalls, I/O, etc.), that is accounted as
102 * still RUNSTATE_running, as the VMM is still operating on
103 * behalf of the vCPU. Only if the VMM does actually block
104 * does it need to enter RUNSTATE_blocked.
107 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
110 /* 32-bit compatibility definitions, also used natively in 32-bit build */
111 struct compat_arch_vcpu_info {
116 struct compat_vcpu_info {
117 uint8_t evtchn_upcall_pending;
118 uint8_t evtchn_upcall_mask;
120 uint32_t evtchn_pending_sel;
121 struct compat_arch_vcpu_info arch;
122 struct pvclock_vcpu_time_info time;
123 }; /* 64 bytes (x86) */
125 struct compat_arch_shared_info {
126 unsigned int max_pfn;
127 unsigned int pfn_to_mfn_frame_list_list;
128 unsigned int nmi_reason;
129 unsigned int p2m_cr3;
130 unsigned int p2m_vaddr;
131 unsigned int p2m_generation;
135 struct compat_shared_info {
136 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
137 uint32_t evtchn_pending[32];
138 uint32_t evtchn_mask[32];
139 struct pvclock_wall_clock wc;
140 struct compat_arch_shared_info arch;
143 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
144 sizeof_field(struct compat_shared_info, \
146 struct compat_vcpu_runstate_info {
148 uint64_t state_entry_time;
150 } __attribute__((packed));
152 #endif /* __ARCH_X86_KVM_XEN_H__ */