1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33 struct x86_exception *fault)
35 struct vcpu_svm *svm = to_svm(vcpu);
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
39 * TODO: track the cause of the nested page fault, and
40 * correctly fill in the high bits of exit_info_1.
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 svm->vmcb->control.exit_code_hi = 0;
44 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 svm->vmcb->control.exit_info_2 = fault->address;
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 svm->vmcb->control.exit_info_1 |= fault->error_code;
51 nested_svm_vmexit(svm);
54 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
56 struct vcpu_svm *svm = to_svm(vcpu);
57 WARN_ON(!is_guest_mode(vcpu));
59 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60 !svm->nested.nested_run_pending) {
61 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62 svm->vmcb->control.exit_code_hi = 0;
63 svm->vmcb->control.exit_info_1 = fault->error_code;
64 svm->vmcb->control.exit_info_2 = fault->address;
65 nested_svm_vmexit(svm);
67 kvm_inject_page_fault(vcpu, fault);
71 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
73 struct vcpu_svm *svm = to_svm(vcpu);
74 u64 cr3 = svm->nested.ctl.nested_cr3;
78 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
79 offset_in_page(cr3) + index * 8, 8);
85 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
87 struct vcpu_svm *svm = to_svm(vcpu);
89 return svm->nested.ctl.nested_cr3;
92 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
94 struct vcpu_svm *svm = to_svm(vcpu);
96 WARN_ON(mmu_is_nested(vcpu));
98 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
99 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
100 svm->vmcb01.ptr->save.efer,
101 svm->nested.ctl.nested_cr3);
102 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
103 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
104 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
105 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
106 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
109 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
111 vcpu->arch.mmu = &vcpu->arch.root_mmu;
112 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
115 void recalc_intercepts(struct vcpu_svm *svm)
117 struct vmcb_control_area *c, *h, *g;
120 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
122 if (!is_guest_mode(&svm->vcpu))
125 c = &svm->vmcb->control;
126 h = &svm->vmcb01.ptr->control;
127 g = &svm->nested.ctl;
129 for (i = 0; i < MAX_INTERCEPT; i++)
130 c->intercepts[i] = h->intercepts[i];
132 if (g->int_ctl & V_INTR_MASKING_MASK) {
133 /* We only want the cr8 intercept bits of L1 */
134 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
135 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
138 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
139 * affect any interrupt we may want to inject; therefore,
140 * interrupt window vmexits are irrelevant to L0.
142 vmcb_clr_intercept(c, INTERCEPT_VINTR);
145 /* We don't want to see VMMCALLs from a nested guest */
146 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
148 for (i = 0; i < MAX_INTERCEPT; i++)
149 c->intercepts[i] |= g->intercepts[i];
152 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
153 struct vmcb_control_area *from)
157 for (i = 0; i < MAX_INTERCEPT; i++)
158 dst->intercepts[i] = from->intercepts[i];
160 dst->iopm_base_pa = from->iopm_base_pa;
161 dst->msrpm_base_pa = from->msrpm_base_pa;
162 dst->tsc_offset = from->tsc_offset;
163 /* asid not copied, it is handled manually for svm->vmcb. */
164 dst->tlb_ctl = from->tlb_ctl;
165 dst->int_ctl = from->int_ctl;
166 dst->int_vector = from->int_vector;
167 dst->int_state = from->int_state;
168 dst->exit_code = from->exit_code;
169 dst->exit_code_hi = from->exit_code_hi;
170 dst->exit_info_1 = from->exit_info_1;
171 dst->exit_info_2 = from->exit_info_2;
172 dst->exit_int_info = from->exit_int_info;
173 dst->exit_int_info_err = from->exit_int_info_err;
174 dst->nested_ctl = from->nested_ctl;
175 dst->event_inj = from->event_inj;
176 dst->event_inj_err = from->event_inj_err;
177 dst->nested_cr3 = from->nested_cr3;
178 dst->virt_ext = from->virt_ext;
179 dst->pause_filter_count = from->pause_filter_count;
180 dst->pause_filter_thresh = from->pause_filter_thresh;
183 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
186 * This function merges the msr permission bitmaps of kvm and the
187 * nested vmcb. It is optimized in that it only merges the parts where
188 * the kvm msr permission bitmap may contain zero bits
192 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
195 for (i = 0; i < MSRPM_OFFSETS; i++) {
199 if (msrpm_offsets[i] == 0xffffffff)
202 p = msrpm_offsets[i];
203 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
205 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
208 svm->nested.msrpm[p] = svm->msrpm[p] | value;
211 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
216 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
218 struct vcpu_svm *svm = to_svm(vcpu);
220 if (WARN_ON(!is_guest_mode(vcpu)))
223 if (!nested_svm_vmrun_msrpm(svm)) {
224 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
225 vcpu->run->internal.suberror =
226 KVM_INTERNAL_ERROR_EMULATION;
227 vcpu->run->internal.ndata = 0;
234 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
236 if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
239 if (control->asid == 0)
242 if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
249 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
251 struct kvm_vcpu *vcpu = &svm->vcpu;
254 if ((vmcb12->save.efer & EFER_SVME) == 0)
257 if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
260 if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
263 vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
266 if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
267 !(vmcb12->save.cr0 & X86_CR0_PE) ||
268 kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
271 if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
274 return nested_vmcb_check_controls(&vmcb12->control);
277 static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
278 struct vmcb_control_area *control)
280 copy_vmcb_control_area(&svm->nested.ctl, control);
282 /* Copy it here because nested_svm_check_controls will check it. */
283 svm->nested.ctl.asid = control->asid;
284 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
285 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
289 * Synchronize fields that are written by the processor, so that
290 * they can be copied back into the vmcb12.
292 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
295 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
296 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
298 /* Only a few fields of int_ctl are written by the processor. */
299 mask = V_IRQ_MASK | V_TPR_MASK;
300 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
301 svm_is_intercept(svm, INTERCEPT_VINTR)) {
303 * In order to request an interrupt window, L0 is usurping
304 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
305 * even if it was clear in L1's VMCB. Restoring it would be
306 * wrong. However, in this case V_IRQ will remain true until
307 * interrupt_window_interception calls svm_clear_vintr and
308 * restores int_ctl. We can just leave it aside.
312 svm->nested.ctl.int_ctl &= ~mask;
313 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
317 * Transfer any event that L0 or L1 wanted to inject into L2 to
320 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
323 struct kvm_vcpu *vcpu = &svm->vcpu;
324 u32 exit_int_info = 0;
327 if (vcpu->arch.exception.injected) {
328 nr = vcpu->arch.exception.nr;
329 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
331 if (vcpu->arch.exception.has_error_code) {
332 exit_int_info |= SVM_EVTINJ_VALID_ERR;
333 vmcb12->control.exit_int_info_err =
334 vcpu->arch.exception.error_code;
337 } else if (vcpu->arch.nmi_injected) {
338 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
340 } else if (vcpu->arch.interrupt.injected) {
341 nr = vcpu->arch.interrupt.nr;
342 exit_int_info = nr | SVM_EVTINJ_VALID;
344 if (vcpu->arch.interrupt.soft)
345 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
347 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
350 vmcb12->control.exit_int_info = exit_int_info;
353 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
355 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
359 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
360 * if we are emulating VM-Entry into a guest with NPT enabled.
362 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
365 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
368 if (!nested_npt && is_pae_paging(vcpu) &&
369 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
370 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
375 * TODO: optimize unconditional TLB flush/MMU sync here and in
376 * kvm_init_shadow_npt_mmu().
379 kvm_mmu_new_pgd(vcpu, cr3, false, false);
381 vcpu->arch.cr3 = cr3;
382 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
384 kvm_init_mmu(vcpu, false);
389 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
391 if (!svm->nested.vmcb02.ptr)
394 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
395 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
398 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
400 nested_vmcb02_compute_g_pat(svm);
402 /* Load the nested guest state */
403 svm->vmcb->save.es = vmcb12->save.es;
404 svm->vmcb->save.cs = vmcb12->save.cs;
405 svm->vmcb->save.ss = vmcb12->save.ss;
406 svm->vmcb->save.ds = vmcb12->save.ds;
407 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
408 svm->vmcb->save.idtr = vmcb12->save.idtr;
409 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
410 svm_set_efer(&svm->vcpu, vmcb12->save.efer);
411 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
412 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
413 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
414 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
415 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
416 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
418 /* In case we don't even reach vcpu_run, the fields are not updated */
419 svm->vmcb->save.rax = vmcb12->save.rax;
420 svm->vmcb->save.rsp = vmcb12->save.rsp;
421 svm->vmcb->save.rip = vmcb12->save.rip;
422 svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
423 svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
424 svm->vmcb->save.cpl = vmcb12->save.cpl;
427 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
429 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
432 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
433 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
437 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
440 WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
442 /* Copied from vmcb01. msrpm_base can be overwritten later. */
443 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
444 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
445 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
447 /* Done at vmrun: asid. */
449 /* Also overwritten later if necessary. */
450 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
453 if (nested_npt_enabled(svm))
454 nested_svm_init_mmu_context(&svm->vcpu);
456 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
457 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
459 svm->vmcb->control.int_ctl =
460 (svm->nested.ctl.int_ctl & ~mask) |
461 (svm->vmcb01.ptr->control.int_ctl & mask);
463 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
464 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
465 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
466 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
467 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
469 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
470 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
472 /* Enter Guest-Mode */
473 enter_guest_mode(&svm->vcpu);
476 * Merge guest and host intercepts - must be called with vcpu in
477 * guest-mode to take affect here
479 recalc_intercepts(svm);
481 vmcb_mark_all_dirty(svm->vmcb);
484 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
489 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
491 vmcb12->control.int_ctl,
492 vmcb12->control.event_inj,
493 vmcb12->control.nested_ctl);
495 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
496 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
497 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
498 vmcb12->control.intercepts[INTERCEPT_WORD3],
499 vmcb12->control.intercepts[INTERCEPT_WORD4],
500 vmcb12->control.intercepts[INTERCEPT_WORD5]);
503 svm->nested.vmcb12_gpa = vmcb12_gpa;
505 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
507 nested_svm_vmloadsave(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
508 nested_load_control_from_vmcb12(svm, &vmcb12->control);
510 svm_switch_vmcb(svm, &svm->nested.vmcb02);
511 nested_vmcb02_prepare_control(svm);
512 nested_vmcb02_prepare_save(svm, vmcb12);
514 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
515 nested_npt_enabled(svm));
520 svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
522 svm_set_gif(svm, true);
527 int nested_svm_vmrun(struct vcpu_svm *svm)
531 struct kvm_host_map map;
534 ++svm->vcpu.stat.nested_run;
536 if (is_smm(&svm->vcpu)) {
537 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
541 vmcb12_gpa = svm->vmcb->save.rax;
542 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
543 if (ret == -EINVAL) {
544 kvm_inject_gp(&svm->vcpu, 0);
547 return kvm_skip_emulated_instruction(&svm->vcpu);
550 ret = kvm_skip_emulated_instruction(&svm->vcpu);
554 if (WARN_ON_ONCE(!svm->nested.initialized))
557 if (!nested_vmcb_checks(svm, vmcb12)) {
558 vmcb12->control.exit_code = SVM_EXIT_ERR;
559 vmcb12->control.exit_code_hi = 0;
560 vmcb12->control.exit_info_1 = 0;
561 vmcb12->control.exit_info_2 = 0;
566 /* Clear internal status */
567 kvm_clear_exception_queue(&svm->vcpu);
568 kvm_clear_interrupt_queue(&svm->vcpu);
571 * Since vmcb01 is not in use, we can use it to store some of the L1
574 svm->vmcb01.ptr->save.efer = svm->vcpu.arch.efer;
575 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(&svm->vcpu);
576 svm->vmcb01.ptr->save.cr4 = svm->vcpu.arch.cr4;
577 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(&svm->vcpu);
578 svm->vmcb01.ptr->save.rip = kvm_rip_read(&svm->vcpu);
581 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(&svm->vcpu);
583 svm->nested.nested_run_pending = 1;
585 if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
588 if (nested_svm_vmrun_msrpm(svm))
592 svm->nested.nested_run_pending = 0;
594 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
595 svm->vmcb->control.exit_code_hi = 0;
596 svm->vmcb->control.exit_info_1 = 0;
597 svm->vmcb->control.exit_info_2 = 0;
599 nested_svm_vmexit(svm);
602 kvm_vcpu_unmap(&svm->vcpu, &map, true);
607 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
609 to_vmcb->save.fs = from_vmcb->save.fs;
610 to_vmcb->save.gs = from_vmcb->save.gs;
611 to_vmcb->save.tr = from_vmcb->save.tr;
612 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
613 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
614 to_vmcb->save.star = from_vmcb->save.star;
615 to_vmcb->save.lstar = from_vmcb->save.lstar;
616 to_vmcb->save.cstar = from_vmcb->save.cstar;
617 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
618 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
619 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
620 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
623 int nested_svm_vmexit(struct vcpu_svm *svm)
627 struct vmcb *vmcb = svm->vmcb;
628 struct kvm_host_map map;
630 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
633 kvm_inject_gp(&svm->vcpu, 0);
639 /* Exit Guest-Mode */
640 leave_guest_mode(&svm->vcpu);
641 svm->nested.vmcb12_gpa = 0;
642 WARN_ON_ONCE(svm->nested.nested_run_pending);
644 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
646 /* in case we halted in L2 */
647 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
649 /* Give the current vmcb to the guest */
651 vmcb12->save.es = vmcb->save.es;
652 vmcb12->save.cs = vmcb->save.cs;
653 vmcb12->save.ss = vmcb->save.ss;
654 vmcb12->save.ds = vmcb->save.ds;
655 vmcb12->save.gdtr = vmcb->save.gdtr;
656 vmcb12->save.idtr = vmcb->save.idtr;
657 vmcb12->save.efer = svm->vcpu.arch.efer;
658 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
659 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
660 vmcb12->save.cr2 = vmcb->save.cr2;
661 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
662 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
663 vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
664 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
665 vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
666 vmcb12->save.dr7 = vmcb->save.dr7;
667 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
668 vmcb12->save.cpl = vmcb->save.cpl;
670 vmcb12->control.int_state = vmcb->control.int_state;
671 vmcb12->control.exit_code = vmcb->control.exit_code;
672 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
673 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
674 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
676 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
677 nested_save_pending_event_to_vmcb12(svm, vmcb12);
679 if (svm->nrips_enabled)
680 vmcb12->control.next_rip = vmcb->control.next_rip;
682 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
683 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
684 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
685 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
687 vmcb12->control.pause_filter_count =
688 svm->vmcb->control.pause_filter_count;
689 vmcb12->control.pause_filter_thresh =
690 svm->vmcb->control.pause_filter_thresh;
692 nested_svm_vmloadsave(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
694 svm_switch_vmcb(svm, &svm->vmcb01);
697 * On vmexit the GIF is set to false and
698 * no event can be injected in L1.
700 svm_set_gif(svm, false);
701 svm->vmcb->control.exit_int_info = 0;
703 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
704 svm->vcpu.arch.l1_tsc_offset;
706 svm->nested.ctl.nested_cr3 = 0;
709 * Restore processor state that had been saved in vmcb01
711 kvm_set_rflags(&svm->vcpu, svm->vmcb->save.rflags | X86_EFLAGS_FIXED);
712 svm_set_efer(&svm->vcpu, svm->vmcb->save.efer);
713 svm_set_cr0(&svm->vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
714 svm_set_cr4(&svm->vcpu, svm->vmcb->save.cr4);
715 kvm_rax_write(&svm->vcpu, svm->vmcb->save.rax);
716 kvm_rsp_write(&svm->vcpu, svm->vmcb->save.rsp);
717 kvm_rip_write(&svm->vcpu, svm->vmcb->save.rip);
719 svm->vcpu.arch.dr7 = DR7_FIXED_1;
720 kvm_update_dr7(&svm->vcpu);
722 vmcb_mark_all_dirty(svm->vmcb);
724 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
725 vmcb12->control.exit_info_1,
726 vmcb12->control.exit_info_2,
727 vmcb12->control.exit_int_info,
728 vmcb12->control.exit_int_info_err,
731 kvm_vcpu_unmap(&svm->vcpu, &map, true);
733 nested_svm_uninit_mmu_context(&svm->vcpu);
735 rc = nested_svm_load_cr3(&svm->vcpu, svm->vmcb->save.cr3, false);
740 * Drop what we picked up for L2 via svm_complete_interrupts() so it
741 * doesn't end up in L1.
743 svm->vcpu.arch.nmi_injected = false;
744 kvm_clear_exception_queue(&svm->vcpu);
745 kvm_clear_interrupt_queue(&svm->vcpu);
750 int svm_allocate_nested(struct vcpu_svm *svm)
752 struct page *vmcb02_page;
754 if (svm->nested.initialized)
757 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
760 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
761 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
763 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
764 if (!svm->nested.msrpm)
765 goto err_free_vmcb02;
766 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
768 svm->nested.initialized = true;
772 __free_page(vmcb02_page);
776 void svm_free_nested(struct vcpu_svm *svm)
778 if (!svm->nested.initialized)
781 svm_vcpu_free_msrpm(svm->nested.msrpm);
782 svm->nested.msrpm = NULL;
784 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
785 svm->nested.vmcb02.ptr = NULL;
787 svm->nested.initialized = false;
791 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
793 void svm_leave_nested(struct vcpu_svm *svm)
795 if (is_guest_mode(&svm->vcpu)) {
796 svm->nested.nested_run_pending = 0;
797 leave_guest_mode(&svm->vcpu);
799 svm_switch_vmcb(svm, &svm->nested.vmcb02);
801 nested_svm_uninit_mmu_context(&svm->vcpu);
802 vmcb_mark_all_dirty(svm->vmcb);
805 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
808 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
810 u32 offset, msr, value;
813 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
814 return NESTED_EXIT_HOST;
816 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
817 offset = svm_msrpm_offset(msr);
818 write = svm->vmcb->control.exit_info_1 & 1;
819 mask = 1 << ((2 * (msr & 0xf)) + write);
821 if (offset == MSR_INVALID)
822 return NESTED_EXIT_DONE;
824 /* Offset is in 32 bit units but need in 8 bit units */
827 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
828 return NESTED_EXIT_DONE;
830 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
833 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
835 unsigned port, size, iopm_len;
840 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
841 return NESTED_EXIT_HOST;
843 port = svm->vmcb->control.exit_info_1 >> 16;
844 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
846 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
847 start_bit = port % 8;
848 iopm_len = (start_bit + size > 8) ? 2 : 1;
849 mask = (0xf >> (4 - size)) << start_bit;
852 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
853 return NESTED_EXIT_DONE;
855 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
858 static int nested_svm_intercept(struct vcpu_svm *svm)
860 u32 exit_code = svm->vmcb->control.exit_code;
861 int vmexit = NESTED_EXIT_HOST;
865 vmexit = nested_svm_exit_handled_msr(svm);
868 vmexit = nested_svm_intercept_ioio(svm);
870 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
871 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
872 vmexit = NESTED_EXIT_DONE;
875 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
876 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
877 vmexit = NESTED_EXIT_DONE;
880 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
882 * Host-intercepted exceptions have been checked already in
883 * nested_svm_exit_special. There is nothing to do here,
884 * the vmexit is injected by svm_check_nested_events.
886 vmexit = NESTED_EXIT_DONE;
890 vmexit = NESTED_EXIT_DONE;
894 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
895 vmexit = NESTED_EXIT_DONE;
902 int nested_svm_exit_handled(struct vcpu_svm *svm)
906 vmexit = nested_svm_intercept(svm);
908 if (vmexit == NESTED_EXIT_DONE)
909 nested_svm_vmexit(svm);
914 int nested_svm_check_permissions(struct vcpu_svm *svm)
916 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
917 !is_paging(&svm->vcpu)) {
918 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
922 if (svm->vmcb->save.cpl) {
923 kvm_inject_gp(&svm->vcpu, 0);
930 static bool nested_exit_on_exception(struct vcpu_svm *svm)
932 unsigned int nr = svm->vcpu.arch.exception.nr;
934 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
937 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
939 unsigned int nr = svm->vcpu.arch.exception.nr;
941 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
942 svm->vmcb->control.exit_code_hi = 0;
944 if (svm->vcpu.arch.exception.has_error_code)
945 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
948 * EXITINFO2 is undefined for all exception intercepts other
951 if (nr == PF_VECTOR) {
952 if (svm->vcpu.arch.exception.nested_apf)
953 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
954 else if (svm->vcpu.arch.exception.has_payload)
955 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
957 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
958 } else if (nr == DB_VECTOR) {
959 /* See inject_pending_event. */
960 kvm_deliver_exception_payload(&svm->vcpu);
961 if (svm->vcpu.arch.dr7 & DR7_GD) {
962 svm->vcpu.arch.dr7 &= ~DR7_GD;
963 kvm_update_dr7(&svm->vcpu);
966 WARN_ON(svm->vcpu.arch.exception.has_payload);
968 nested_svm_vmexit(svm);
971 static void nested_svm_smi(struct vcpu_svm *svm)
973 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
974 svm->vmcb->control.exit_info_1 = 0;
975 svm->vmcb->control.exit_info_2 = 0;
977 nested_svm_vmexit(svm);
980 static void nested_svm_nmi(struct vcpu_svm *svm)
982 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
983 svm->vmcb->control.exit_info_1 = 0;
984 svm->vmcb->control.exit_info_2 = 0;
986 nested_svm_vmexit(svm);
989 static void nested_svm_intr(struct vcpu_svm *svm)
991 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
993 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
994 svm->vmcb->control.exit_info_1 = 0;
995 svm->vmcb->control.exit_info_2 = 0;
997 nested_svm_vmexit(svm);
1000 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1002 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1005 static void nested_svm_init(struct vcpu_svm *svm)
1007 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
1008 svm->vmcb->control.exit_info_1 = 0;
1009 svm->vmcb->control.exit_info_2 = 0;
1011 nested_svm_vmexit(svm);
1015 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1017 struct vcpu_svm *svm = to_svm(vcpu);
1018 bool block_nested_events =
1019 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1020 struct kvm_lapic *apic = vcpu->arch.apic;
1022 if (lapic_in_kernel(vcpu) &&
1023 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1024 if (block_nested_events)
1026 if (!nested_exit_on_init(svm))
1028 nested_svm_init(svm);
1032 if (vcpu->arch.exception.pending) {
1033 if (block_nested_events)
1035 if (!nested_exit_on_exception(svm))
1037 nested_svm_inject_exception_vmexit(svm);
1041 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1042 if (block_nested_events)
1044 if (!nested_exit_on_smi(svm))
1046 nested_svm_smi(svm);
1050 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1051 if (block_nested_events)
1053 if (!nested_exit_on_nmi(svm))
1055 nested_svm_nmi(svm);
1059 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1060 if (block_nested_events)
1062 if (!nested_exit_on_intr(svm))
1064 nested_svm_intr(svm);
1071 int nested_svm_exit_special(struct vcpu_svm *svm)
1073 u32 exit_code = svm->vmcb->control.exit_code;
1075 switch (exit_code) {
1079 return NESTED_EXIT_HOST;
1080 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1081 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1083 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1085 return NESTED_EXIT_HOST;
1086 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1087 svm->vcpu.arch.apf.host_apf_flags)
1088 /* Trap async PF even if not shadowing */
1089 return NESTED_EXIT_HOST;
1096 return NESTED_EXIT_CONTINUE;
1099 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1100 struct kvm_nested_state __user *user_kvm_nested_state,
1103 struct vcpu_svm *svm;
1104 struct kvm_nested_state kvm_state = {
1106 .format = KVM_STATE_NESTED_FORMAT_SVM,
1107 .size = sizeof(kvm_state),
1109 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1110 &user_kvm_nested_state->data.svm[0];
1113 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1117 if (user_data_size < kvm_state.size)
1120 /* First fill in the header and copy it out. */
1121 if (is_guest_mode(vcpu)) {
1122 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1123 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1124 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1126 if (svm->nested.nested_run_pending)
1127 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1131 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1133 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1136 if (!is_guest_mode(vcpu))
1140 * Copy over the full size of the VMCB rather than just the size
1143 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1145 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1146 sizeof(user_vmcb->control)))
1148 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1149 sizeof(user_vmcb->save)))
1152 return kvm_state.size;
1155 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1156 struct kvm_nested_state __user *user_kvm_nested_state,
1157 struct kvm_nested_state *kvm_state)
1159 struct vcpu_svm *svm = to_svm(vcpu);
1160 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1161 &user_kvm_nested_state->data.svm[0];
1162 struct vmcb_control_area *ctl;
1163 struct vmcb_save_area *save;
1167 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1168 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1170 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1173 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1174 KVM_STATE_NESTED_RUN_PENDING |
1175 KVM_STATE_NESTED_GIF_SET))
1179 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1180 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1182 if (!(vcpu->arch.efer & EFER_SVME)) {
1183 /* GIF=1 and no guest mode are required if SVME=0. */
1184 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1188 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1189 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1192 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1193 svm_leave_nested(svm);
1194 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1198 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1200 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1204 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1205 save = kzalloc(sizeof(*save), GFP_KERNEL);
1210 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1212 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1216 if (!nested_vmcb_check_controls(ctl))
1220 * Processor state contains L2 state. Check that it is
1221 * valid for guest mode (see nested_vmcb_checks).
1223 cr0 = kvm_read_cr0(vcpu);
1224 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1228 * Validate host state saved from before VMRUN (see
1229 * nested_svm_check_permissions).
1230 * TODO: validate reserved bits for all saved state.
1232 if (!(save->cr0 & X86_CR0_PG))
1236 * All checks done, we can enter guest mode. Userspace provides
1237 * vmcb12.control, which will be combined with L1 and stored into
1238 * vmcb02, and the L1 save state which we store in vmcb01.
1239 * L2 registers if needed are moved from the current VMCB to VMCB02.
1242 svm->nested.nested_run_pending =
1243 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1245 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1246 if (svm->current_vmcb == &svm->vmcb01)
1247 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1248 svm->vmcb01.ptr->save = *save;
1249 nested_load_control_from_vmcb12(svm, ctl);
1251 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1253 nested_vmcb02_prepare_control(svm);
1255 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1264 struct kvm_x86_nested_ops svm_nested_ops = {
1265 .check_events = svm_check_nested_events,
1266 .get_nested_state_pages = svm_get_nested_state_pages,
1267 .get_state = svm_get_nested_state,
1268 .set_state = svm_set_nested_state,