1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
13 #include <linux/kvm_host.h>
15 #include <trace/events/kvm.h>
16 #include <xen/interface/xen.h>
20 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
22 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
24 gpa_t gpa = gfn_to_gpa(gfn);
25 int wc_ofs, sec_hi_ofs;
27 int idx = srcu_read_lock(&kvm->srcu);
29 ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache,
34 kvm->arch.xen.shinfo_set = true;
36 /* Paranoia checks on the 32-bit struct layout */
37 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
38 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
39 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
41 /* 32-bit location by default */
42 wc_ofs = offsetof(struct compat_shared_info, wc);
43 sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
46 /* Paranoia checks on the 64-bit struct layout */
47 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
48 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
50 if (kvm->arch.xen.long_mode) {
51 wc_ofs = offsetof(struct shared_info, wc);
52 sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
56 kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
57 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
60 srcu_read_unlock(&kvm->srcu, idx);
64 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
69 * If the global upcall vector (HVMIRQ_callback_vector) is set and
70 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
72 struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
73 struct kvm_memslots *slots = kvm_memslots(v->kvm);
74 unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
76 /* No need for compat handling here */
77 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
78 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
79 BUILD_BUG_ON(sizeof(rc) !=
80 sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
81 BUILD_BUG_ON(sizeof(rc) !=
82 sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
85 * For efficiency, this mirrors the checks for using the valid
86 * cache in kvm_read_guest_offset_cached(), but just uses
87 * __get_user() instead. And falls back to the slow path.
89 if (likely(slots->generation == ghc->generation &&
90 !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
92 __get_user(rc, (u8 __user *)ghc->hva + offset);
95 kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
102 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
106 mutex_lock(&kvm->lock);
108 switch (data->type) {
109 case KVM_XEN_ATTR_TYPE_LONG_MODE:
110 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
113 kvm->arch.xen.long_mode = !!data->u.long_mode;
118 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
119 if (data->u.shared_info.gfn == GPA_INVALID) {
120 kvm->arch.xen.shinfo_set = false;
124 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
128 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
129 if (data->u.vector && data->u.vector < 0x10)
132 kvm->arch.xen.upcall_vector = data->u.vector;
141 mutex_unlock(&kvm->lock);
145 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
149 mutex_lock(&kvm->lock);
151 switch (data->type) {
152 case KVM_XEN_ATTR_TYPE_LONG_MODE:
153 data->u.long_mode = kvm->arch.xen.long_mode;
157 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
158 if (kvm->arch.xen.shinfo_set)
159 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
161 data->u.shared_info.gfn = GPA_INVALID;
165 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
166 data->u.vector = kvm->arch.xen.upcall_vector;
174 mutex_unlock(&kvm->lock);
178 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
180 int idx, r = -ENOENT;
182 mutex_lock(&vcpu->kvm->lock);
183 idx = srcu_read_lock(&vcpu->kvm->srcu);
185 switch (data->type) {
186 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
187 /* No compat necessary here. */
188 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
189 sizeof(struct compat_vcpu_info));
190 BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
191 offsetof(struct compat_vcpu_info, time));
193 if (data->u.gpa == GPA_INVALID) {
194 vcpu->arch.xen.vcpu_info_set = false;
199 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
200 &vcpu->arch.xen.vcpu_info_cache,
202 sizeof(struct vcpu_info));
204 vcpu->arch.xen.vcpu_info_set = true;
205 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
209 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
210 if (data->u.gpa == GPA_INVALID) {
211 vcpu->arch.xen.vcpu_time_info_set = false;
216 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
217 &vcpu->arch.xen.vcpu_time_info_cache,
219 sizeof(struct pvclock_vcpu_time_info));
221 vcpu->arch.xen.vcpu_time_info_set = true;
222 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
230 srcu_read_unlock(&vcpu->kvm->srcu, idx);
231 mutex_unlock(&vcpu->kvm->lock);
235 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
239 mutex_lock(&vcpu->kvm->lock);
241 switch (data->type) {
242 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
243 if (vcpu->arch.xen.vcpu_info_set)
244 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
246 data->u.gpa = GPA_INVALID;
250 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
251 if (vcpu->arch.xen.vcpu_time_info_set)
252 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
254 data->u.gpa = GPA_INVALID;
262 mutex_unlock(&vcpu->kvm->lock);
266 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
268 struct kvm *kvm = vcpu->kvm;
269 u32 page_num = data & ~PAGE_MASK;
270 u64 page_addr = data & PAGE_MASK;
271 bool lm = is_long_mode(vcpu);
273 /* Latch long_mode for shared_info pages etc. */
274 vcpu->kvm->arch.xen.long_mode = lm;
277 * If Xen hypercall intercept is enabled, fill the hypercall
278 * page with VMCALL/VMMCALL instructions since that's what
279 * we catch. Else the VMM has provided the hypercall pages
280 * with instructions of its own choosing, so use those.
282 if (kvm_xen_hypercall_enabled(kvm)) {
289 /* mov imm32, %eax */
290 instructions[0] = 0xb8;
292 /* vmcall / vmmcall */
293 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
296 instructions[8] = 0xc3;
299 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
301 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
302 *(u32 *)&instructions[1] = i;
303 if (kvm_vcpu_write_guest(vcpu,
304 page_addr + (i * sizeof(instructions)),
305 instructions, sizeof(instructions)))
310 * Note, truncation is a non-issue as 'lm' is guaranteed to be
311 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
313 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
314 : kvm->arch.xen_hvm_config.blob_addr_32;
315 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
316 : kvm->arch.xen_hvm_config.blob_size_32;
319 if (page_num >= blob_size)
322 blob_addr += page_num * PAGE_SIZE;
324 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
326 return PTR_ERR(page);
328 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
336 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
338 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
342 * With hypercall interception the kernel generates its own
343 * hypercall page so it must not be provided.
345 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
346 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
347 xhc->blob_size_32 || xhc->blob_size_64))
350 mutex_lock(&kvm->lock);
352 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
353 static_branch_inc(&kvm_xen_enabled.key);
354 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
355 static_branch_slow_dec_deferred(&kvm_xen_enabled);
357 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
359 mutex_unlock(&kvm->lock);
363 void kvm_xen_destroy_vm(struct kvm *kvm)
365 if (kvm->arch.xen_hvm_config.msr)
366 static_branch_slow_dec_deferred(&kvm_xen_enabled);
369 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
371 kvm_rax_write(vcpu, result);
372 return kvm_skip_emulated_instruction(vcpu);
375 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
377 struct kvm_run *run = vcpu->run;
379 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
382 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
385 int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
388 u64 input, params[6];
390 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
392 /* Hyper-V hypercalls get bit 31 set in EAX */
393 if ((input & 0x80000000) &&
394 kvm_hv_hypercall_enabled(vcpu))
395 return kvm_hv_hypercall(vcpu);
397 longmode = is_64_bit_mode(vcpu);
399 params[0] = (u32)kvm_rbx_read(vcpu);
400 params[1] = (u32)kvm_rcx_read(vcpu);
401 params[2] = (u32)kvm_rdx_read(vcpu);
402 params[3] = (u32)kvm_rsi_read(vcpu);
403 params[4] = (u32)kvm_rdi_read(vcpu);
404 params[5] = (u32)kvm_rbp_read(vcpu);
408 params[0] = (u64)kvm_rdi_read(vcpu);
409 params[1] = (u64)kvm_rsi_read(vcpu);
410 params[2] = (u64)kvm_rdx_read(vcpu);
411 params[3] = (u64)kvm_r10_read(vcpu);
412 params[4] = (u64)kvm_r8_read(vcpu);
413 params[5] = (u64)kvm_r9_read(vcpu);
416 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
417 params[3], params[4], params[5]);
419 vcpu->run->exit_reason = KVM_EXIT_XEN;
420 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
421 vcpu->run->xen.u.hcall.longmode = longmode;
422 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
423 vcpu->run->xen.u.hcall.input = input;
424 vcpu->run->xen.u.hcall.params[0] = params[0];
425 vcpu->run->xen.u.hcall.params[1] = params[1];
426 vcpu->run->xen.u.hcall.params[2] = params[2];
427 vcpu->run->xen.u.hcall.params[3] = params[3];
428 vcpu->run->xen.u.hcall.params[4] = params[4];
429 vcpu->run->xen.u.hcall.params[5] = params[5];
430 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
431 vcpu->arch.complete_userspace_io =
432 kvm_xen_hypercall_complete_userspace;