1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
33 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
35 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
36 struct x86_exception *fault)
38 struct vcpu_svm *svm = to_svm(vcpu);
40 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
42 * TODO: track the cause of the nested page fault, and
43 * correctly fill in the high bits of exit_info_1.
45 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
46 svm->vmcb->control.exit_code_hi = 0;
47 svm->vmcb->control.exit_info_1 = (1ULL << 32);
48 svm->vmcb->control.exit_info_2 = fault->address;
51 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
52 svm->vmcb->control.exit_info_1 |= fault->error_code;
54 nested_svm_vmexit(svm);
57 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
59 struct vcpu_svm *svm = to_svm(vcpu);
60 WARN_ON(!is_guest_mode(vcpu));
62 if (vmcb12_is_intercept(&svm->nested.ctl,
63 INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
64 !svm->nested.nested_run_pending) {
65 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
66 svm->vmcb->control.exit_code_hi = 0;
67 svm->vmcb->control.exit_info_1 = fault->error_code;
68 svm->vmcb->control.exit_info_2 = fault->address;
69 nested_svm_vmexit(svm);
71 kvm_inject_page_fault(vcpu, fault);
75 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
77 struct vcpu_svm *svm = to_svm(vcpu);
78 u64 cr3 = svm->nested.ctl.nested_cr3;
82 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
83 offset_in_page(cr3) + index * 8, 8);
89 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
91 struct vcpu_svm *svm = to_svm(vcpu);
93 return svm->nested.ctl.nested_cr3;
96 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
98 struct vcpu_svm *svm = to_svm(vcpu);
100 WARN_ON(mmu_is_nested(vcpu));
102 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
105 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
106 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
107 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
109 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
110 svm->vmcb01.ptr->save.efer,
111 svm->nested.ctl.nested_cr3);
112 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
113 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
114 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
115 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
118 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
120 vcpu->arch.mmu = &vcpu->arch.root_mmu;
121 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
124 void recalc_intercepts(struct vcpu_svm *svm)
126 struct vmcb_control_area *c, *h;
127 struct vmcb_ctrl_area_cached *g;
130 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
132 if (!is_guest_mode(&svm->vcpu))
135 c = &svm->vmcb->control;
136 h = &svm->vmcb01.ptr->control;
137 g = &svm->nested.ctl;
139 for (i = 0; i < MAX_INTERCEPT; i++)
140 c->intercepts[i] = h->intercepts[i];
142 if (g->int_ctl & V_INTR_MASKING_MASK) {
143 /* We only want the cr8 intercept bits of L1 */
144 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
145 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
148 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
149 * affect any interrupt we may want to inject; therefore,
150 * interrupt window vmexits are irrelevant to L0.
152 vmcb_clr_intercept(c, INTERCEPT_VINTR);
155 /* We don't want to see VMMCALLs from a nested guest */
156 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
158 for (i = 0; i < MAX_INTERCEPT; i++)
159 c->intercepts[i] |= g->intercepts[i];
161 /* If SMI is not intercepted, ignore guest SMI intercept as well */
163 vmcb_clr_intercept(c, INTERCEPT_SMI);
165 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
166 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
170 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
171 * is optimized in that it only merges the parts where KVM MSR permission bitmap
172 * may contain zero bits.
174 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
176 struct hv_enlightenments *hve =
177 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
181 * MSR bitmap update can be skipped when:
182 * - MSR bitmap for L1 hasn't changed.
183 * - Nested hypervisor (L1) is attempting to launch the same L2 as
185 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
186 * tells KVM (L0) there were no changes in MSR bitmap for L2.
188 if (!svm->nested.force_msr_bitmap_recalc &&
189 kvm_hv_hypercall_enabled(&svm->vcpu) &&
190 hve->hv_enlightenments_control.msr_bitmap &&
191 (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
192 goto set_msrpm_base_pa;
194 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
197 for (i = 0; i < MSRPM_OFFSETS; i++) {
201 if (msrpm_offsets[i] == 0xffffffff)
204 p = msrpm_offsets[i];
205 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
207 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
210 svm->nested.msrpm[p] = svm->msrpm[p] | value;
213 svm->nested.force_msr_bitmap_recalc = false;
216 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
222 * Bits 11:0 of bitmap address are ignored by hardware
224 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
226 u64 addr = PAGE_ALIGN(pa);
228 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
229 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
232 static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
234 /* Nested FLUSHBYASID is not supported yet. */
236 case TLB_CONTROL_DO_NOTHING:
237 case TLB_CONTROL_FLUSH_ALL_ASID:
244 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
245 struct vmcb_ctrl_area_cached *control)
247 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
250 if (CC(control->asid == 0))
253 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
256 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
259 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
263 if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
269 /* Common checks that apply to both L1 and L2 state. */
270 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
271 struct vmcb_save_area_cached *save)
273 if (CC(!(save->efer & EFER_SVME)))
276 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
277 CC(save->cr0 & ~0xffffffffULL))
280 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
284 * These checks are also performed by KVM_SET_SREGS,
285 * except that EFER.LMA is not checked by SVM against
286 * CR0.PG && EFER.LME.
288 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
289 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
290 CC(!(save->cr0 & X86_CR0_PE)) ||
291 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
295 if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
298 if (CC(!kvm_valid_efer(vcpu, save->efer)))
304 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
306 struct vcpu_svm *svm = to_svm(vcpu);
307 struct vmcb_save_area_cached *save = &svm->nested.save;
309 return __nested_vmcb_check_save(vcpu, save);
312 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
314 struct vcpu_svm *svm = to_svm(vcpu);
315 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
317 return __nested_vmcb_check_controls(vcpu, ctl);
321 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
322 struct vmcb_ctrl_area_cached *to,
323 struct vmcb_control_area *from)
327 for (i = 0; i < MAX_INTERCEPT; i++)
328 to->intercepts[i] = from->intercepts[i];
330 to->iopm_base_pa = from->iopm_base_pa;
331 to->msrpm_base_pa = from->msrpm_base_pa;
332 to->tsc_offset = from->tsc_offset;
333 to->tlb_ctl = from->tlb_ctl;
334 to->int_ctl = from->int_ctl;
335 to->int_vector = from->int_vector;
336 to->int_state = from->int_state;
337 to->exit_code = from->exit_code;
338 to->exit_code_hi = from->exit_code_hi;
339 to->exit_info_1 = from->exit_info_1;
340 to->exit_info_2 = from->exit_info_2;
341 to->exit_int_info = from->exit_int_info;
342 to->exit_int_info_err = from->exit_int_info_err;
343 to->nested_ctl = from->nested_ctl;
344 to->event_inj = from->event_inj;
345 to->event_inj_err = from->event_inj_err;
346 to->nested_cr3 = from->nested_cr3;
347 to->virt_ext = from->virt_ext;
348 to->pause_filter_count = from->pause_filter_count;
349 to->pause_filter_thresh = from->pause_filter_thresh;
351 /* Copy asid here because nested_vmcb_check_controls will check it. */
352 to->asid = from->asid;
353 to->msrpm_base_pa &= ~0x0fffULL;
354 to->iopm_base_pa &= ~0x0fffULL;
356 /* Hyper-V extensions (Enlightened VMCB) */
357 if (kvm_hv_hypercall_enabled(vcpu)) {
358 to->clean = from->clean;
359 memcpy(to->reserved_sw, from->reserved_sw,
360 sizeof(struct hv_enlightenments));
364 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
365 struct vmcb_control_area *control)
367 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
370 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
371 struct vmcb_save_area *from)
374 * Copy only fields that are validated, as we need them
375 * to avoid TOC/TOU races.
377 to->efer = from->efer;
386 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
387 struct vmcb_save_area *save)
389 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
393 * Synchronize fields that are written by the processor, so that
394 * they can be copied back into the vmcb12.
396 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
399 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
400 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
402 /* Only a few fields of int_ctl are written by the processor. */
403 mask = V_IRQ_MASK | V_TPR_MASK;
404 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
405 svm_is_intercept(svm, INTERCEPT_VINTR)) {
407 * In order to request an interrupt window, L0 is usurping
408 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
409 * even if it was clear in L1's VMCB. Restoring it would be
410 * wrong. However, in this case V_IRQ will remain true until
411 * interrupt_window_interception calls svm_clear_vintr and
412 * restores int_ctl. We can just leave it aside.
416 svm->nested.ctl.int_ctl &= ~mask;
417 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
421 * Transfer any event that L0 or L1 wanted to inject into L2 to
424 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
427 struct kvm_vcpu *vcpu = &svm->vcpu;
428 u32 exit_int_info = 0;
431 if (vcpu->arch.exception.injected) {
432 nr = vcpu->arch.exception.nr;
433 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
435 if (vcpu->arch.exception.has_error_code) {
436 exit_int_info |= SVM_EVTINJ_VALID_ERR;
437 vmcb12->control.exit_int_info_err =
438 vcpu->arch.exception.error_code;
441 } else if (vcpu->arch.nmi_injected) {
442 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
444 } else if (vcpu->arch.interrupt.injected) {
445 nr = vcpu->arch.interrupt.nr;
446 exit_int_info = nr | SVM_EVTINJ_VALID;
448 if (vcpu->arch.interrupt.soft)
449 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
451 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
454 vmcb12->control.exit_int_info = exit_int_info;
457 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
459 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
462 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
465 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
466 * things to fix before this can be conditional:
468 * - Flush TLBs for both L1 and L2 remote TLB flush
469 * - Honor L1's request to flush an ASID on nested VMRUN
470 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
471 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
472 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
474 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
475 * NPT guest-physical mappings on VMRUN.
477 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
478 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
482 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
483 * if we are emulating VM-Entry into a guest with NPT enabled.
485 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
486 bool nested_npt, bool reload_pdptrs)
488 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
491 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
492 CC(!load_pdptrs(vcpu, cr3)))
495 vcpu->arch.cr3 = cr3;
497 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
501 kvm_mmu_new_pgd(vcpu, cr3);
506 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
508 if (!svm->nested.vmcb02.ptr)
511 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
512 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
515 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
517 bool new_vmcb12 = false;
519 nested_vmcb02_compute_g_pat(svm);
521 /* Load the nested guest state */
522 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
524 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
525 svm->nested.force_msr_bitmap_recalc = true;
528 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
529 svm->vmcb->save.es = vmcb12->save.es;
530 svm->vmcb->save.cs = vmcb12->save.cs;
531 svm->vmcb->save.ss = vmcb12->save.ss;
532 svm->vmcb->save.ds = vmcb12->save.ds;
533 svm->vmcb->save.cpl = vmcb12->save.cpl;
534 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
537 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
538 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
539 svm->vmcb->save.idtr = vmcb12->save.idtr;
540 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
543 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
545 svm_set_efer(&svm->vcpu, svm->nested.save.efer);
547 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
548 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
550 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
552 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
553 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
554 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
556 /* In case we don't even reach vcpu_run, the fields are not updated */
557 svm->vmcb->save.rax = vmcb12->save.rax;
558 svm->vmcb->save.rsp = vmcb12->save.rsp;
559 svm->vmcb->save.rip = vmcb12->save.rip;
561 /* These bits will be set properly on the first execution when new_vmc12 is true */
562 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
563 svm->vmcb->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
564 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
565 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
569 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
571 const u32 int_ctl_vmcb01_bits =
572 V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
574 const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
576 struct kvm_vcpu *vcpu = &svm->vcpu;
579 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
580 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
584 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
587 WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
589 /* Copied from vmcb01. msrpm_base can be overwritten later. */
590 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
591 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
592 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
594 /* Done at vmrun: asid. */
596 /* Also overwritten later if necessary. */
597 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
600 if (nested_npt_enabled(svm))
601 nested_svm_init_mmu_context(vcpu);
603 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
604 vcpu->arch.l1_tsc_offset,
605 svm->nested.ctl.tsc_offset,
608 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
610 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
611 WARN_ON(!svm->tsc_scaling_enabled);
612 nested_svm_update_tsc_ratio_msr(vcpu);
615 svm->vmcb->control.int_ctl =
616 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
617 (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
619 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
620 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
621 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
622 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
624 nested_svm_transition_tlb_flush(vcpu);
626 /* Enter Guest-Mode */
627 enter_guest_mode(vcpu);
630 * Merge guest and host intercepts - must be called with vcpu in
631 * guest-mode to take effect.
633 recalc_intercepts(svm);
636 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
639 * Some VMCB state is shared between L1 and L2 and thus has to be
640 * moved at the time of nested vmrun and vmexit.
642 * VMLOAD/VMSAVE state would also belong in this category, but KVM
643 * always performs VMLOAD and VMSAVE from the VMCB01.
645 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
648 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
649 struct vmcb *vmcb12, bool from_vmrun)
651 struct vcpu_svm *svm = to_svm(vcpu);
654 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
656 vmcb12->control.int_ctl,
657 vmcb12->control.event_inj,
658 vmcb12->control.nested_ctl);
660 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
661 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
662 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
663 vmcb12->control.intercepts[INTERCEPT_WORD3],
664 vmcb12->control.intercepts[INTERCEPT_WORD4],
665 vmcb12->control.intercepts[INTERCEPT_WORD5]);
668 svm->nested.vmcb12_gpa = vmcb12_gpa;
670 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
672 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
674 svm_switch_vmcb(svm, &svm->nested.vmcb02);
675 nested_vmcb02_prepare_control(svm);
676 nested_vmcb02_prepare_save(svm, vmcb12);
678 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
679 nested_npt_enabled(svm), from_vmrun);
684 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
687 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
689 svm_set_gif(svm, true);
694 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
696 struct vcpu_svm *svm = to_svm(vcpu);
699 struct kvm_host_map map;
702 if (!svm->nested.hsave_msr) {
703 kvm_inject_gp(vcpu, 0);
708 kvm_queue_exception(vcpu, UD_VECTOR);
712 vmcb12_gpa = svm->vmcb->save.rax;
713 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
714 if (ret == -EINVAL) {
715 kvm_inject_gp(vcpu, 0);
718 return kvm_skip_emulated_instruction(vcpu);
721 ret = kvm_skip_emulated_instruction(vcpu);
725 if (WARN_ON_ONCE(!svm->nested.initialized))
728 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
729 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
731 if (!nested_vmcb_check_save(vcpu) ||
732 !nested_vmcb_check_controls(vcpu)) {
733 vmcb12->control.exit_code = SVM_EXIT_ERR;
734 vmcb12->control.exit_code_hi = 0;
735 vmcb12->control.exit_info_1 = 0;
736 vmcb12->control.exit_info_2 = 0;
741 * Since vmcb01 is not in use, we can use it to store some of the L1
744 svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
745 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
746 svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
747 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
748 svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
751 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
753 svm->nested.nested_run_pending = 1;
755 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
758 if (nested_svm_vmrun_msrpm(svm))
762 svm->nested.nested_run_pending = 0;
764 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
765 svm->vmcb->control.exit_code_hi = 0;
766 svm->vmcb->control.exit_info_1 = 0;
767 svm->vmcb->control.exit_info_2 = 0;
769 nested_svm_vmexit(svm);
772 kvm_vcpu_unmap(vcpu, &map, true);
777 /* Copy state save area fields which are handled by VMRUN */
778 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
779 struct vmcb_save_area *from_save)
781 to_save->es = from_save->es;
782 to_save->cs = from_save->cs;
783 to_save->ss = from_save->ss;
784 to_save->ds = from_save->ds;
785 to_save->gdtr = from_save->gdtr;
786 to_save->idtr = from_save->idtr;
787 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
788 to_save->efer = from_save->efer;
789 to_save->cr0 = from_save->cr0;
790 to_save->cr3 = from_save->cr3;
791 to_save->cr4 = from_save->cr4;
792 to_save->rax = from_save->rax;
793 to_save->rsp = from_save->rsp;
794 to_save->rip = from_save->rip;
798 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
800 to_vmcb->save.fs = from_vmcb->save.fs;
801 to_vmcb->save.gs = from_vmcb->save.gs;
802 to_vmcb->save.tr = from_vmcb->save.tr;
803 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
804 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
805 to_vmcb->save.star = from_vmcb->save.star;
806 to_vmcb->save.lstar = from_vmcb->save.lstar;
807 to_vmcb->save.cstar = from_vmcb->save.cstar;
808 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
809 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
810 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
811 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
814 int nested_svm_vmexit(struct vcpu_svm *svm)
816 struct kvm_vcpu *vcpu = &svm->vcpu;
818 struct vmcb *vmcb = svm->vmcb;
819 struct kvm_host_map map;
822 /* Triple faults in L2 should never escape. */
823 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
825 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
828 kvm_inject_gp(vcpu, 0);
834 /* Exit Guest-Mode */
835 leave_guest_mode(vcpu);
836 svm->nested.vmcb12_gpa = 0;
837 WARN_ON_ONCE(svm->nested.nested_run_pending);
839 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
841 /* in case we halted in L2 */
842 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
844 /* Give the current vmcb to the guest */
846 vmcb12->save.es = vmcb->save.es;
847 vmcb12->save.cs = vmcb->save.cs;
848 vmcb12->save.ss = vmcb->save.ss;
849 vmcb12->save.ds = vmcb->save.ds;
850 vmcb12->save.gdtr = vmcb->save.gdtr;
851 vmcb12->save.idtr = vmcb->save.idtr;
852 vmcb12->save.efer = svm->vcpu.arch.efer;
853 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
854 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
855 vmcb12->save.cr2 = vmcb->save.cr2;
856 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
857 vmcb12->save.rflags = kvm_get_rflags(vcpu);
858 vmcb12->save.rip = kvm_rip_read(vcpu);
859 vmcb12->save.rsp = kvm_rsp_read(vcpu);
860 vmcb12->save.rax = kvm_rax_read(vcpu);
861 vmcb12->save.dr7 = vmcb->save.dr7;
862 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
863 vmcb12->save.cpl = vmcb->save.cpl;
865 vmcb12->control.int_state = vmcb->control.int_state;
866 vmcb12->control.exit_code = vmcb->control.exit_code;
867 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
868 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
869 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
871 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
872 nested_save_pending_event_to_vmcb12(svm, vmcb12);
874 if (svm->nrips_enabled)
875 vmcb12->control.next_rip = vmcb->control.next_rip;
877 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
878 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
879 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
880 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
882 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
884 svm_switch_vmcb(svm, &svm->vmcb01);
887 * On vmexit the GIF is set to false and
888 * no event can be injected in L1.
890 svm_set_gif(svm, false);
891 svm->vmcb->control.exit_int_info = 0;
893 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
894 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
895 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
896 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
899 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
900 WARN_ON(!svm->tsc_scaling_enabled);
901 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
902 svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
905 svm->nested.ctl.nested_cr3 = 0;
908 * Restore processor state that had been saved in vmcb01
910 kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
911 svm_set_efer(vcpu, svm->vmcb->save.efer);
912 svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
913 svm_set_cr4(vcpu, svm->vmcb->save.cr4);
914 kvm_rax_write(vcpu, svm->vmcb->save.rax);
915 kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
916 kvm_rip_write(vcpu, svm->vmcb->save.rip);
918 svm->vcpu.arch.dr7 = DR7_FIXED_1;
919 kvm_update_dr7(&svm->vcpu);
921 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
922 vmcb12->control.exit_info_1,
923 vmcb12->control.exit_info_2,
924 vmcb12->control.exit_int_info,
925 vmcb12->control.exit_int_info_err,
928 kvm_vcpu_unmap(vcpu, &map, true);
930 nested_svm_transition_tlb_flush(vcpu);
932 nested_svm_uninit_mmu_context(vcpu);
934 rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
939 * Drop what we picked up for L2 via svm_complete_interrupts() so it
940 * doesn't end up in L1.
942 svm->vcpu.arch.nmi_injected = false;
943 kvm_clear_exception_queue(vcpu);
944 kvm_clear_interrupt_queue(vcpu);
947 * If we are here following the completion of a VMRUN that
948 * is being single-stepped, queue the pending #DB intercept
949 * right now so that it an be accounted for before we execute
950 * L1's next instruction.
952 if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
953 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
958 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
960 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
963 int svm_allocate_nested(struct vcpu_svm *svm)
965 struct page *vmcb02_page;
967 if (svm->nested.initialized)
970 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
973 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
974 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
976 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
977 if (!svm->nested.msrpm)
978 goto err_free_vmcb02;
979 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
981 svm->nested.initialized = true;
985 __free_page(vmcb02_page);
989 void svm_free_nested(struct vcpu_svm *svm)
991 if (!svm->nested.initialized)
994 svm_vcpu_free_msrpm(svm->nested.msrpm);
995 svm->nested.msrpm = NULL;
997 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
998 svm->nested.vmcb02.ptr = NULL;
1001 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1002 * some vmcb12 fields are not loaded if they are marked clean
1003 * in the vmcb12, since in this case they are up to date already.
1005 * When the vmcb02 is freed, this optimization becomes invalid.
1007 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1009 svm->nested.initialized = false;
1013 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
1015 void svm_leave_nested(struct kvm_vcpu *vcpu)
1017 struct vcpu_svm *svm = to_svm(vcpu);
1019 if (is_guest_mode(vcpu)) {
1020 svm->nested.nested_run_pending = 0;
1021 svm->nested.vmcb12_gpa = INVALID_GPA;
1023 leave_guest_mode(vcpu);
1025 svm_switch_vmcb(svm, &svm->vmcb01);
1027 nested_svm_uninit_mmu_context(vcpu);
1028 vmcb_mark_all_dirty(svm->vmcb);
1031 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1034 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1036 u32 offset, msr, value;
1039 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1040 return NESTED_EXIT_HOST;
1042 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1043 offset = svm_msrpm_offset(msr);
1044 write = svm->vmcb->control.exit_info_1 & 1;
1045 mask = 1 << ((2 * (msr & 0xf)) + write);
1047 if (offset == MSR_INVALID)
1048 return NESTED_EXIT_DONE;
1050 /* Offset is in 32 bit units but need in 8 bit units */
1053 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1054 return NESTED_EXIT_DONE;
1056 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1059 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1061 unsigned port, size, iopm_len;
1066 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1067 return NESTED_EXIT_HOST;
1069 port = svm->vmcb->control.exit_info_1 >> 16;
1070 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1071 SVM_IOIO_SIZE_SHIFT;
1072 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1073 start_bit = port % 8;
1074 iopm_len = (start_bit + size > 8) ? 2 : 1;
1075 mask = (0xf >> (4 - size)) << start_bit;
1078 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1079 return NESTED_EXIT_DONE;
1081 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1084 static int nested_svm_intercept(struct vcpu_svm *svm)
1086 u32 exit_code = svm->vmcb->control.exit_code;
1087 int vmexit = NESTED_EXIT_HOST;
1089 switch (exit_code) {
1091 vmexit = nested_svm_exit_handled_msr(svm);
1094 vmexit = nested_svm_intercept_ioio(svm);
1096 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1097 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1098 vmexit = NESTED_EXIT_DONE;
1101 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1102 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1103 vmexit = NESTED_EXIT_DONE;
1106 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1108 * Host-intercepted exceptions have been checked already in
1109 * nested_svm_exit_special. There is nothing to do here,
1110 * the vmexit is injected by svm_check_nested_events.
1112 vmexit = NESTED_EXIT_DONE;
1115 case SVM_EXIT_ERR: {
1116 vmexit = NESTED_EXIT_DONE;
1120 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1121 vmexit = NESTED_EXIT_DONE;
1128 int nested_svm_exit_handled(struct vcpu_svm *svm)
1132 vmexit = nested_svm_intercept(svm);
1134 if (vmexit == NESTED_EXIT_DONE)
1135 nested_svm_vmexit(svm);
1140 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1142 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1143 kvm_queue_exception(vcpu, UD_VECTOR);
1147 if (to_svm(vcpu)->vmcb->save.cpl) {
1148 kvm_inject_gp(vcpu, 0);
1155 static bool nested_exit_on_exception(struct vcpu_svm *svm)
1157 unsigned int nr = svm->vcpu.arch.exception.nr;
1159 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1162 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1164 unsigned int nr = svm->vcpu.arch.exception.nr;
1166 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1167 svm->vmcb->control.exit_code_hi = 0;
1169 if (svm->vcpu.arch.exception.has_error_code)
1170 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1173 * EXITINFO2 is undefined for all exception intercepts other
1176 if (nr == PF_VECTOR) {
1177 if (svm->vcpu.arch.exception.nested_apf)
1178 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1179 else if (svm->vcpu.arch.exception.has_payload)
1180 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1182 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1183 } else if (nr == DB_VECTOR) {
1184 /* See inject_pending_event. */
1185 kvm_deliver_exception_payload(&svm->vcpu);
1186 if (svm->vcpu.arch.dr7 & DR7_GD) {
1187 svm->vcpu.arch.dr7 &= ~DR7_GD;
1188 kvm_update_dr7(&svm->vcpu);
1191 WARN_ON(svm->vcpu.arch.exception.has_payload);
1193 nested_svm_vmexit(svm);
1196 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1198 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1201 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1203 struct vcpu_svm *svm = to_svm(vcpu);
1204 bool block_nested_events =
1205 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1206 struct kvm_lapic *apic = vcpu->arch.apic;
1208 if (lapic_in_kernel(vcpu) &&
1209 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1210 if (block_nested_events)
1212 if (!nested_exit_on_init(svm))
1214 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1218 if (vcpu->arch.exception.pending) {
1220 * Only a pending nested run can block a pending exception.
1221 * Otherwise an injected NMI/interrupt should either be
1222 * lost or delivered to the nested hypervisor in the EXITINTINFO
1223 * vmcb field, while delivering the pending exception.
1225 if (svm->nested.nested_run_pending)
1227 if (!nested_exit_on_exception(svm))
1229 nested_svm_inject_exception_vmexit(svm);
1233 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1234 if (block_nested_events)
1236 if (!nested_exit_on_smi(svm))
1238 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1242 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1243 if (block_nested_events)
1245 if (!nested_exit_on_nmi(svm))
1247 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1251 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1252 if (block_nested_events)
1254 if (!nested_exit_on_intr(svm))
1256 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1257 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1264 int nested_svm_exit_special(struct vcpu_svm *svm)
1266 u32 exit_code = svm->vmcb->control.exit_code;
1268 switch (exit_code) {
1272 return NESTED_EXIT_HOST;
1273 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1274 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1276 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1278 return NESTED_EXIT_HOST;
1279 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1280 svm->vcpu.arch.apf.host_apf_flags)
1281 /* Trap async PF even if not shadowing */
1282 return NESTED_EXIT_HOST;
1289 return NESTED_EXIT_CONTINUE;
1292 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1294 struct vcpu_svm *svm = to_svm(vcpu);
1296 vcpu->arch.tsc_scaling_ratio =
1297 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1298 svm->tsc_ratio_msr);
1299 svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
1302 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
1303 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1304 struct vmcb_ctrl_area_cached *from)
1308 memset(dst, 0, sizeof(struct vmcb_control_area));
1310 for (i = 0; i < MAX_INTERCEPT; i++)
1311 dst->intercepts[i] = from->intercepts[i];
1313 dst->iopm_base_pa = from->iopm_base_pa;
1314 dst->msrpm_base_pa = from->msrpm_base_pa;
1315 dst->tsc_offset = from->tsc_offset;
1316 dst->asid = from->asid;
1317 dst->tlb_ctl = from->tlb_ctl;
1318 dst->int_ctl = from->int_ctl;
1319 dst->int_vector = from->int_vector;
1320 dst->int_state = from->int_state;
1321 dst->exit_code = from->exit_code;
1322 dst->exit_code_hi = from->exit_code_hi;
1323 dst->exit_info_1 = from->exit_info_1;
1324 dst->exit_info_2 = from->exit_info_2;
1325 dst->exit_int_info = from->exit_int_info;
1326 dst->exit_int_info_err = from->exit_int_info_err;
1327 dst->nested_ctl = from->nested_ctl;
1328 dst->event_inj = from->event_inj;
1329 dst->event_inj_err = from->event_inj_err;
1330 dst->nested_cr3 = from->nested_cr3;
1331 dst->virt_ext = from->virt_ext;
1332 dst->pause_filter_count = from->pause_filter_count;
1333 dst->pause_filter_thresh = from->pause_filter_thresh;
1334 /* 'clean' and 'reserved_sw' are not changed by KVM */
1337 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1338 struct kvm_nested_state __user *user_kvm_nested_state,
1341 struct vcpu_svm *svm;
1342 struct vmcb_control_area *ctl;
1344 struct kvm_nested_state kvm_state = {
1346 .format = KVM_STATE_NESTED_FORMAT_SVM,
1347 .size = sizeof(kvm_state),
1349 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1350 &user_kvm_nested_state->data.svm[0];
1353 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1357 if (user_data_size < kvm_state.size)
1360 /* First fill in the header and copy it out. */
1361 if (is_guest_mode(vcpu)) {
1362 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1363 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1364 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1366 if (svm->nested.nested_run_pending)
1367 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1371 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1373 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1376 if (!is_guest_mode(vcpu))
1380 * Copy over the full size of the VMCB rather than just the size
1383 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1386 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1390 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1391 r = copy_to_user(&user_vmcb->control, ctl,
1392 sizeof(user_vmcb->control));
1397 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1398 sizeof(user_vmcb->save)))
1401 return kvm_state.size;
1404 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1405 struct kvm_nested_state __user *user_kvm_nested_state,
1406 struct kvm_nested_state *kvm_state)
1408 struct vcpu_svm *svm = to_svm(vcpu);
1409 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1410 &user_kvm_nested_state->data.svm[0];
1411 struct vmcb_control_area *ctl;
1412 struct vmcb_save_area *save;
1413 struct vmcb_save_area_cached save_cached;
1414 struct vmcb_ctrl_area_cached ctl_cached;
1418 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1419 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1421 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1424 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1425 KVM_STATE_NESTED_RUN_PENDING |
1426 KVM_STATE_NESTED_GIF_SET))
1430 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1431 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1433 if (!(vcpu->arch.efer & EFER_SVME)) {
1434 /* GIF=1 and no guest mode are required if SVME=0. */
1435 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1439 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1440 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1443 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1444 svm_leave_nested(vcpu);
1445 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1449 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1451 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1455 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1456 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1461 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1463 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1467 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1468 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1472 * Processor state contains L2 state. Check that it is
1473 * valid for guest mode (see nested_vmcb_check_save).
1475 cr0 = kvm_read_cr0(vcpu);
1476 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1480 * Validate host state saved from before VMRUN (see
1481 * nested_svm_check_permissions).
1483 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1484 if (!(save->cr0 & X86_CR0_PG) ||
1485 !(save->cr0 & X86_CR0_PE) ||
1486 (save->rflags & X86_EFLAGS_VM) ||
1487 !__nested_vmcb_check_save(vcpu, &save_cached))
1492 * All checks done, we can enter guest mode. Userspace provides
1493 * vmcb12.control, which will be combined with L1 and stored into
1494 * vmcb02, and the L1 save state which we store in vmcb01.
1495 * L2 registers if needed are moved from the current VMCB to VMCB02.
1498 if (is_guest_mode(vcpu))
1499 svm_leave_nested(vcpu);
1501 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1503 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1505 svm->nested.nested_run_pending =
1506 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1508 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1510 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1511 nested_copy_vmcb_control_to_cache(svm, ctl);
1513 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1514 nested_vmcb02_prepare_control(svm);
1517 * While the nested guest CR3 is already checked and set by
1518 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1519 * thus MMU might not be initialized correctly.
1520 * Set it again to fix this.
1523 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1524 nested_npt_enabled(svm), false);
1525 if (WARN_ON_ONCE(ret))
1528 svm->nested.force_msr_bitmap_recalc = true;
1530 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1539 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1541 struct vcpu_svm *svm = to_svm(vcpu);
1543 if (WARN_ON(!is_guest_mode(vcpu)))
1546 if (!vcpu->arch.pdptrs_from_userspace &&
1547 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1549 * Reload the guest's PDPTRs since after a migration
1550 * the guest CR3 might be restored prior to setting the nested
1551 * state which can lead to a load of wrong PDPTRs.
1553 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1556 if (!nested_svm_vmrun_msrpm(svm)) {
1557 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1558 vcpu->run->internal.suberror =
1559 KVM_INTERNAL_ERROR_EMULATION;
1560 vcpu->run->internal.ndata = 0;
1567 struct kvm_x86_nested_ops svm_nested_ops = {
1568 .leave_nested = svm_leave_nested,
1569 .check_events = svm_check_nested_events,
1570 .triple_fault = nested_svm_triple_fault,
1571 .get_nested_state_pages = svm_get_nested_state_pages,
1572 .get_state = svm_get_nested_state,
1573 .set_state = svm_set_nested_state,