1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
30 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
31 struct x86_exception *fault)
33 struct vcpu_svm *svm = to_svm(vcpu);
35 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
37 * TODO: track the cause of the nested page fault, and
38 * correctly fill in the high bits of exit_info_1.
40 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
41 svm->vmcb->control.exit_code_hi = 0;
42 svm->vmcb->control.exit_info_1 = (1ULL << 32);
43 svm->vmcb->control.exit_info_2 = fault->address;
46 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
47 svm->vmcb->control.exit_info_1 |= fault->error_code;
50 * The present bit is always zero for page structure faults on real
53 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
54 svm->vmcb->control.exit_info_1 &= ~1;
56 nested_svm_vmexit(svm);
59 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
61 struct vcpu_svm *svm = to_svm(vcpu);
62 u64 cr3 = svm->nested.nested_cr3;
66 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
67 offset_in_page(cr3) + index * 8, 8);
73 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
75 struct vcpu_svm *svm = to_svm(vcpu);
77 return svm->nested.nested_cr3;
80 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
82 WARN_ON(mmu_is_nested(vcpu));
84 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
85 kvm_init_shadow_mmu(vcpu);
86 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
87 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
88 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
89 vcpu->arch.mmu->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
90 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
91 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
94 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
96 vcpu->arch.mmu = &vcpu->arch.root_mmu;
97 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
100 void recalc_intercepts(struct vcpu_svm *svm)
102 struct vmcb_control_area *c, *h;
103 struct nested_state *g;
105 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
107 if (!is_guest_mode(&svm->vcpu))
110 c = &svm->vmcb->control;
111 h = &svm->nested.hsave->control;
114 c->intercept_cr = h->intercept_cr;
115 c->intercept_dr = h->intercept_dr;
116 c->intercept_exceptions = h->intercept_exceptions;
117 c->intercept = h->intercept;
119 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
120 /* We only want the cr8 intercept bits of L1 */
121 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
122 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
125 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
126 * affect any interrupt we may want to inject; therefore,
127 * interrupt window vmexits are irrelevant to L0.
129 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
132 /* We don't want to see VMMCALLs from a nested guest */
133 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
135 c->intercept_cr |= g->intercept_cr;
136 c->intercept_dr |= g->intercept_dr;
137 c->intercept_exceptions |= g->intercept_exceptions;
138 c->intercept |= g->intercept;
141 static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
143 struct vmcb_control_area *dst = &dst_vmcb->control;
144 struct vmcb_control_area *from = &from_vmcb->control;
146 dst->intercept_cr = from->intercept_cr;
147 dst->intercept_dr = from->intercept_dr;
148 dst->intercept_exceptions = from->intercept_exceptions;
149 dst->intercept = from->intercept;
150 dst->iopm_base_pa = from->iopm_base_pa;
151 dst->msrpm_base_pa = from->msrpm_base_pa;
152 dst->tsc_offset = from->tsc_offset;
153 dst->asid = from->asid;
154 dst->tlb_ctl = from->tlb_ctl;
155 dst->int_ctl = from->int_ctl;
156 dst->int_vector = from->int_vector;
157 dst->int_state = from->int_state;
158 dst->exit_code = from->exit_code;
159 dst->exit_code_hi = from->exit_code_hi;
160 dst->exit_info_1 = from->exit_info_1;
161 dst->exit_info_2 = from->exit_info_2;
162 dst->exit_int_info = from->exit_int_info;
163 dst->exit_int_info_err = from->exit_int_info_err;
164 dst->nested_ctl = from->nested_ctl;
165 dst->event_inj = from->event_inj;
166 dst->event_inj_err = from->event_inj_err;
167 dst->nested_cr3 = from->nested_cr3;
168 dst->virt_ext = from->virt_ext;
169 dst->pause_filter_count = from->pause_filter_count;
170 dst->pause_filter_thresh = from->pause_filter_thresh;
173 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
176 * This function merges the msr permission bitmaps of kvm and the
177 * nested vmcb. It is optimized in that it only merges the parts where
178 * the kvm msr permission bitmap may contain zero bits
182 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
185 for (i = 0; i < MSRPM_OFFSETS; i++) {
189 if (msrpm_offsets[i] == 0xffffffff)
192 p = msrpm_offsets[i];
193 offset = svm->nested.vmcb_msrpm + (p * 4);
195 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
198 svm->nested.msrpm[p] = svm->msrpm[p] | value;
201 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
206 static bool nested_vmcb_checks(struct vmcb *vmcb)
208 if ((vmcb->save.efer & EFER_SVME) == 0)
211 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
212 (vmcb->save.cr0 & X86_CR0_NW))
215 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
218 if (vmcb->control.asid == 0)
221 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
228 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
229 struct vmcb *nested_vmcb, struct kvm_host_map *map)
231 bool evaluate_pending_interrupts =
232 is_intercept(svm, INTERCEPT_VINTR) ||
233 is_intercept(svm, INTERCEPT_IRET);
235 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
236 svm->vcpu.arch.hflags |= HF_HIF_MASK;
238 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
240 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
241 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
242 nested_svm_init_mmu_context(&svm->vcpu);
245 /* Load the nested guest state */
246 svm->vmcb->save.es = nested_vmcb->save.es;
247 svm->vmcb->save.cs = nested_vmcb->save.cs;
248 svm->vmcb->save.ss = nested_vmcb->save.ss;
249 svm->vmcb->save.ds = nested_vmcb->save.ds;
250 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
251 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
252 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
253 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
254 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
255 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
257 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
258 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
260 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
262 /* Guest paging mode is active - reset mmu */
263 kvm_mmu_reset_context(&svm->vcpu);
265 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
266 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
267 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
268 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
270 /* In case we don't even reach vcpu_run, the fields are not updated */
271 svm->vmcb->save.rax = nested_vmcb->save.rax;
272 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
273 svm->vmcb->save.rip = nested_vmcb->save.rip;
274 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
275 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
276 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
278 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
279 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
281 /* cache intercepts */
282 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
283 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
284 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
285 svm->nested.intercept = nested_vmcb->control.intercept;
287 svm_flush_tlb(&svm->vcpu);
288 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
289 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
290 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
292 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
294 svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
295 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
297 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
298 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
299 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
300 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
301 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
303 svm->vmcb->control.pause_filter_count =
304 nested_vmcb->control.pause_filter_count;
305 svm->vmcb->control.pause_filter_thresh =
306 nested_vmcb->control.pause_filter_thresh;
308 kvm_vcpu_unmap(&svm->vcpu, map, true);
310 /* Enter Guest-Mode */
311 enter_guest_mode(&svm->vcpu);
314 * Merge guest and host intercepts - must be called with vcpu in
315 * guest-mode to take affect here
317 recalc_intercepts(svm);
319 svm->nested.vmcb = vmcb_gpa;
322 * If L1 had a pending IRQ/NMI before executing VMRUN,
323 * which wasn't delivered because it was disallowed (e.g.
324 * interrupts disabled), L0 needs to evaluate if this pending
325 * event should cause an exit from L2 to L1 or be delivered
328 * Usually this would be handled by the processor noticing an
329 * IRQ/NMI window request. However, VMRUN can unblock interrupts
330 * by implicitly setting GIF, so force L0 to perform pending event
331 * evaluation by requesting a KVM_REQ_EVENT.
334 if (unlikely(evaluate_pending_interrupts))
335 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
337 mark_all_dirty(svm->vmcb);
340 int nested_svm_vmrun(struct vcpu_svm *svm)
343 struct vmcb *nested_vmcb;
344 struct vmcb *hsave = svm->nested.hsave;
345 struct vmcb *vmcb = svm->vmcb;
346 struct kvm_host_map map;
349 if (is_smm(&svm->vcpu)) {
350 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
354 vmcb_gpa = svm->vmcb->save.rax;
355 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
356 if (ret == -EINVAL) {
357 kvm_inject_gp(&svm->vcpu, 0);
360 return kvm_skip_emulated_instruction(&svm->vcpu);
363 ret = kvm_skip_emulated_instruction(&svm->vcpu);
365 nested_vmcb = map.hva;
367 if (!nested_vmcb_checks(nested_vmcb)) {
368 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
369 nested_vmcb->control.exit_code_hi = 0;
370 nested_vmcb->control.exit_info_1 = 0;
371 nested_vmcb->control.exit_info_2 = 0;
373 kvm_vcpu_unmap(&svm->vcpu, &map, true);
378 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
379 nested_vmcb->save.rip,
380 nested_vmcb->control.int_ctl,
381 nested_vmcb->control.event_inj,
382 nested_vmcb->control.nested_ctl);
384 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
385 nested_vmcb->control.intercept_cr >> 16,
386 nested_vmcb->control.intercept_exceptions,
387 nested_vmcb->control.intercept);
389 /* Clear internal status */
390 kvm_clear_exception_queue(&svm->vcpu);
391 kvm_clear_interrupt_queue(&svm->vcpu);
394 * Save the old vmcb, so we don't need to pick what we save, but can
395 * restore everything when a VMEXIT occurs
397 hsave->save.es = vmcb->save.es;
398 hsave->save.cs = vmcb->save.cs;
399 hsave->save.ss = vmcb->save.ss;
400 hsave->save.ds = vmcb->save.ds;
401 hsave->save.gdtr = vmcb->save.gdtr;
402 hsave->save.idtr = vmcb->save.idtr;
403 hsave->save.efer = svm->vcpu.arch.efer;
404 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
405 hsave->save.cr4 = svm->vcpu.arch.cr4;
406 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
407 hsave->save.rip = kvm_rip_read(&svm->vcpu);
408 hsave->save.rsp = vmcb->save.rsp;
409 hsave->save.rax = vmcb->save.rax;
411 hsave->save.cr3 = vmcb->save.cr3;
413 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
415 copy_vmcb_control_area(hsave, vmcb);
417 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
419 if (!nested_svm_vmrun_msrpm(svm)) {
420 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
421 svm->vmcb->control.exit_code_hi = 0;
422 svm->vmcb->control.exit_info_1 = 0;
423 svm->vmcb->control.exit_info_2 = 0;
425 nested_svm_vmexit(svm);
431 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
433 to_vmcb->save.fs = from_vmcb->save.fs;
434 to_vmcb->save.gs = from_vmcb->save.gs;
435 to_vmcb->save.tr = from_vmcb->save.tr;
436 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
437 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
438 to_vmcb->save.star = from_vmcb->save.star;
439 to_vmcb->save.lstar = from_vmcb->save.lstar;
440 to_vmcb->save.cstar = from_vmcb->save.cstar;
441 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
442 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
443 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
444 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
447 int nested_svm_vmexit(struct vcpu_svm *svm)
450 struct vmcb *nested_vmcb;
451 struct vmcb *hsave = svm->nested.hsave;
452 struct vmcb *vmcb = svm->vmcb;
453 struct kvm_host_map map;
455 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
456 vmcb->control.exit_info_1,
457 vmcb->control.exit_info_2,
458 vmcb->control.exit_int_info,
459 vmcb->control.exit_int_info_err,
462 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
465 kvm_inject_gp(&svm->vcpu, 0);
469 nested_vmcb = map.hva;
471 /* Exit Guest-Mode */
472 leave_guest_mode(&svm->vcpu);
473 svm->nested.vmcb = 0;
475 /* Give the current vmcb to the guest */
478 nested_vmcb->save.es = vmcb->save.es;
479 nested_vmcb->save.cs = vmcb->save.cs;
480 nested_vmcb->save.ss = vmcb->save.ss;
481 nested_vmcb->save.ds = vmcb->save.ds;
482 nested_vmcb->save.gdtr = vmcb->save.gdtr;
483 nested_vmcb->save.idtr = vmcb->save.idtr;
484 nested_vmcb->save.efer = svm->vcpu.arch.efer;
485 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
486 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
487 nested_vmcb->save.cr2 = vmcb->save.cr2;
488 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
489 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
490 nested_vmcb->save.rip = vmcb->save.rip;
491 nested_vmcb->save.rsp = vmcb->save.rsp;
492 nested_vmcb->save.rax = vmcb->save.rax;
493 nested_vmcb->save.dr7 = vmcb->save.dr7;
494 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
495 nested_vmcb->save.cpl = vmcb->save.cpl;
497 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
498 nested_vmcb->control.int_vector = vmcb->control.int_vector;
499 nested_vmcb->control.int_state = vmcb->control.int_state;
500 nested_vmcb->control.exit_code = vmcb->control.exit_code;
501 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
502 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
503 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
504 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
505 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
507 if (svm->nrips_enabled)
508 nested_vmcb->control.next_rip = vmcb->control.next_rip;
511 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
512 * to make sure that we do not lose injected events. So check event_inj
513 * here and copy it to exit_int_info if it is valid.
514 * Exit_int_info and event_inj can't be both valid because the case
515 * below only happens on a VMRUN instruction intercept which has
516 * no valid exit_int_info set.
518 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
519 struct vmcb_control_area *nc = &nested_vmcb->control;
521 nc->exit_int_info = vmcb->control.event_inj;
522 nc->exit_int_info_err = vmcb->control.event_inj_err;
525 nested_vmcb->control.tlb_ctl = 0;
526 nested_vmcb->control.event_inj = 0;
527 nested_vmcb->control.event_inj_err = 0;
529 nested_vmcb->control.pause_filter_count =
530 svm->vmcb->control.pause_filter_count;
531 nested_vmcb->control.pause_filter_thresh =
532 svm->vmcb->control.pause_filter_thresh;
534 /* We always set V_INTR_MASKING and remember the old value in hflags */
535 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
536 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
538 /* Restore the original control entries */
539 copy_vmcb_control_area(vmcb, hsave);
541 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
542 kvm_clear_exception_queue(&svm->vcpu);
543 kvm_clear_interrupt_queue(&svm->vcpu);
545 svm->nested.nested_cr3 = 0;
547 /* Restore selected save entries */
548 svm->vmcb->save.es = hsave->save.es;
549 svm->vmcb->save.cs = hsave->save.cs;
550 svm->vmcb->save.ss = hsave->save.ss;
551 svm->vmcb->save.ds = hsave->save.ds;
552 svm->vmcb->save.gdtr = hsave->save.gdtr;
553 svm->vmcb->save.idtr = hsave->save.idtr;
554 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
555 svm_set_efer(&svm->vcpu, hsave->save.efer);
556 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
557 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
559 svm->vmcb->save.cr3 = hsave->save.cr3;
560 svm->vcpu.arch.cr3 = hsave->save.cr3;
562 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
564 kvm_rax_write(&svm->vcpu, hsave->save.rax);
565 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
566 kvm_rip_write(&svm->vcpu, hsave->save.rip);
567 svm->vmcb->save.dr7 = 0;
568 svm->vmcb->save.cpl = 0;
569 svm->vmcb->control.exit_int_info = 0;
571 mark_all_dirty(svm->vmcb);
573 kvm_vcpu_unmap(&svm->vcpu, &map, true);
575 nested_svm_uninit_mmu_context(&svm->vcpu);
576 kvm_mmu_reset_context(&svm->vcpu);
577 kvm_mmu_load(&svm->vcpu);
580 * Drop what we picked up for L2 via svm_complete_interrupts() so it
581 * doesn't end up in L1.
583 svm->vcpu.arch.nmi_injected = false;
584 kvm_clear_exception_queue(&svm->vcpu);
585 kvm_clear_interrupt_queue(&svm->vcpu);
590 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
592 u32 offset, msr, value;
595 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
596 return NESTED_EXIT_HOST;
598 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
599 offset = svm_msrpm_offset(msr);
600 write = svm->vmcb->control.exit_info_1 & 1;
601 mask = 1 << ((2 * (msr & 0xf)) + write);
603 if (offset == MSR_INVALID)
604 return NESTED_EXIT_DONE;
606 /* Offset is in 32 bit units but need in 8 bit units */
609 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
610 return NESTED_EXIT_DONE;
612 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
615 /* DB exceptions for our internal use must not cause vmexit */
616 static int nested_svm_intercept_db(struct vcpu_svm *svm)
618 unsigned long dr6 = svm->vmcb->save.dr6;
620 /* Always catch it and pass it to userspace if debugging. */
621 if (svm->vcpu.guest_debug &
622 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
623 return NESTED_EXIT_HOST;
625 /* if we're not singlestepping, it's not ours */
626 if (!svm->nmi_singlestep)
629 /* if it's not a singlestep exception, it's not ours */
633 /* if the guest is singlestepping, it should get the vmexit */
634 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
635 disable_nmi_singlestep(svm);
639 /* it's ours, the nested hypervisor must not see this one */
640 return NESTED_EXIT_HOST;
644 * Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
645 * it will be moved into the nested VMCB by nested_svm_vmexit. Once
646 * exceptions will be moved to svm_check_nested_events, all this stuff
647 * will just go away and we could just return NESTED_EXIT_HOST
648 * unconditionally. db_interception will queue the exception, which
649 * will be processed by svm_check_nested_events if a nested vmexit is
650 * required, and we will just use kvm_deliver_exception_payload to copy
651 * the payload to DR6 before vmexit.
653 WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
654 svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
655 svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
656 return NESTED_EXIT_DONE;
659 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
661 unsigned port, size, iopm_len;
666 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
667 return NESTED_EXIT_HOST;
669 port = svm->vmcb->control.exit_info_1 >> 16;
670 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
672 gpa = svm->nested.vmcb_iopm + (port / 8);
673 start_bit = port % 8;
674 iopm_len = (start_bit + size > 8) ? 2 : 1;
675 mask = (0xf >> (4 - size)) << start_bit;
678 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
679 return NESTED_EXIT_DONE;
681 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
684 static int nested_svm_intercept(struct vcpu_svm *svm)
686 u32 exit_code = svm->vmcb->control.exit_code;
687 int vmexit = NESTED_EXIT_HOST;
691 vmexit = nested_svm_exit_handled_msr(svm);
694 vmexit = nested_svm_intercept_ioio(svm);
696 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
697 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
698 if (svm->nested.intercept_cr & bit)
699 vmexit = NESTED_EXIT_DONE;
702 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
703 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
704 if (svm->nested.intercept_dr & bit)
705 vmexit = NESTED_EXIT_DONE;
708 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
709 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
710 if (svm->nested.intercept_exceptions & excp_bits) {
711 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
712 vmexit = nested_svm_intercept_db(svm);
713 else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
714 svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
715 vmexit = NESTED_EXIT_HOST;
717 vmexit = NESTED_EXIT_DONE;
719 /* async page fault always cause vmexit */
720 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
721 svm->vcpu.arch.exception.nested_apf != 0)
722 vmexit = NESTED_EXIT_DONE;
726 vmexit = NESTED_EXIT_DONE;
730 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
731 if (svm->nested.intercept & exit_bits)
732 vmexit = NESTED_EXIT_DONE;
739 int nested_svm_exit_handled(struct vcpu_svm *svm)
743 vmexit = nested_svm_intercept(svm);
745 if (vmexit == NESTED_EXIT_DONE)
746 nested_svm_vmexit(svm);
751 int nested_svm_check_permissions(struct vcpu_svm *svm)
753 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
754 !is_paging(&svm->vcpu)) {
755 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
759 if (svm->vmcb->save.cpl) {
760 kvm_inject_gp(&svm->vcpu, 0);
767 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
768 bool has_error_code, u32 error_code)
772 if (!is_guest_mode(&svm->vcpu))
775 vmexit = nested_svm_intercept(svm);
776 if (vmexit != NESTED_EXIT_DONE)
779 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
780 svm->vmcb->control.exit_code_hi = 0;
781 svm->vmcb->control.exit_info_1 = error_code;
784 * EXITINFO2 is undefined for all exception intercepts other
787 if (svm->vcpu.arch.exception.nested_apf)
788 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
789 else if (svm->vcpu.arch.exception.has_payload)
790 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
792 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
794 svm->nested.exit_required = true;
798 static void nested_svm_intr(struct vcpu_svm *svm)
800 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
801 svm->vmcb->control.exit_info_1 = 0;
802 svm->vmcb->control.exit_info_2 = 0;
804 /* nested_svm_vmexit this gets called afterwards from handle_exit */
805 svm->nested.exit_required = true;
806 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
809 static bool nested_exit_on_intr(struct vcpu_svm *svm)
811 return (svm->nested.intercept & 1ULL);
814 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
816 struct vcpu_svm *svm = to_svm(vcpu);
817 bool block_nested_events =
818 kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
820 if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
821 if (block_nested_events)
823 nested_svm_intr(svm);
830 int nested_svm_exit_special(struct vcpu_svm *svm)
832 u32 exit_code = svm->vmcb->control.exit_code;
837 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
838 return NESTED_EXIT_HOST;
840 /* For now we are always handling NPFs when using them */
842 return NESTED_EXIT_HOST;
844 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
845 /* When we're shadowing, trap PFs, but not async PF */
846 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
847 return NESTED_EXIT_HOST;
853 return NESTED_EXIT_CONTINUE;
856 struct kvm_x86_nested_ops svm_nested_ops = {
857 .check_events = svm_check_nested_events,