1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33 struct x86_exception *fault)
35 struct vcpu_svm *svm = to_svm(vcpu);
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
39 * TODO: track the cause of the nested page fault, and
40 * correctly fill in the high bits of exit_info_1.
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 svm->vmcb->control.exit_code_hi = 0;
44 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 svm->vmcb->control.exit_info_2 = fault->address;
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 svm->vmcb->control.exit_info_1 |= fault->error_code;
51 nested_svm_vmexit(svm);
54 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
56 struct vcpu_svm *svm = to_svm(vcpu);
57 u64 cr3 = svm->nested.ctl.nested_cr3;
61 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
62 offset_in_page(cr3) + index * 8, 8);
68 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
70 struct vcpu_svm *svm = to_svm(vcpu);
72 return svm->nested.ctl.nested_cr3;
75 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
77 struct vcpu_svm *svm = to_svm(vcpu);
78 struct vmcb *hsave = svm->nested.hsave;
80 WARN_ON(mmu_is_nested(vcpu));
82 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
83 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
84 svm->nested.ctl.nested_cr3);
85 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
86 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
87 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
89 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
92 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
94 vcpu->arch.mmu = &vcpu->arch.root_mmu;
95 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
98 void recalc_intercepts(struct vcpu_svm *svm)
100 struct vmcb_control_area *c, *h, *g;
103 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
105 if (!is_guest_mode(&svm->vcpu))
108 c = &svm->vmcb->control;
109 h = &svm->nested.hsave->control;
110 g = &svm->nested.ctl;
112 for (i = 0; i < MAX_INTERCEPT; i++)
113 c->intercepts[i] = h->intercepts[i];
115 if (g->int_ctl & V_INTR_MASKING_MASK) {
116 /* We only want the cr8 intercept bits of L1 */
117 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
118 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
121 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
122 * affect any interrupt we may want to inject; therefore,
123 * interrupt window vmexits are irrelevant to L0.
125 vmcb_clr_intercept(c, INTERCEPT_VINTR);
128 /* We don't want to see VMMCALLs from a nested guest */
129 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
131 for (i = 0; i < MAX_INTERCEPT; i++)
132 c->intercepts[i] |= g->intercepts[i];
135 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
136 struct vmcb_control_area *from)
140 for (i = 0; i < MAX_INTERCEPT; i++)
141 dst->intercepts[i] = from->intercepts[i];
143 dst->iopm_base_pa = from->iopm_base_pa;
144 dst->msrpm_base_pa = from->msrpm_base_pa;
145 dst->tsc_offset = from->tsc_offset;
146 /* asid not copied, it is handled manually for svm->vmcb. */
147 dst->tlb_ctl = from->tlb_ctl;
148 dst->int_ctl = from->int_ctl;
149 dst->int_vector = from->int_vector;
150 dst->int_state = from->int_state;
151 dst->exit_code = from->exit_code;
152 dst->exit_code_hi = from->exit_code_hi;
153 dst->exit_info_1 = from->exit_info_1;
154 dst->exit_info_2 = from->exit_info_2;
155 dst->exit_int_info = from->exit_int_info;
156 dst->exit_int_info_err = from->exit_int_info_err;
157 dst->nested_ctl = from->nested_ctl;
158 dst->event_inj = from->event_inj;
159 dst->event_inj_err = from->event_inj_err;
160 dst->nested_cr3 = from->nested_cr3;
161 dst->virt_ext = from->virt_ext;
162 dst->pause_filter_count = from->pause_filter_count;
163 dst->pause_filter_thresh = from->pause_filter_thresh;
166 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
169 * This function merges the msr permission bitmaps of kvm and the
170 * nested vmcb. It is optimized in that it only merges the parts where
171 * the kvm msr permission bitmap may contain zero bits
175 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
178 for (i = 0; i < MSRPM_OFFSETS; i++) {
182 if (msrpm_offsets[i] == 0xffffffff)
185 p = msrpm_offsets[i];
186 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
188 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
191 svm->nested.msrpm[p] = svm->msrpm[p] | value;
194 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
199 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
201 if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
204 if (control->asid == 0)
207 if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
214 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
218 if ((vmcb12->save.efer & EFER_SVME) == 0)
221 if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
224 if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
227 vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
230 if (vmcb12->save.cr4 & X86_CR4_PAE) {
231 if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
234 if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
238 if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
239 !(vmcb12->save.cr0 & X86_CR0_PE) ||
240 (vmcb12->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
243 if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
246 return nested_vmcb_check_controls(&vmcb12->control);
249 static void load_nested_vmcb_control(struct vcpu_svm *svm,
250 struct vmcb_control_area *control)
252 copy_vmcb_control_area(&svm->nested.ctl, control);
254 /* Copy it here because nested_svm_check_controls will check it. */
255 svm->nested.ctl.asid = control->asid;
256 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
257 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
261 * Synchronize fields that are written by the processor, so that
262 * they can be copied back into the nested_vmcb.
264 void sync_nested_vmcb_control(struct vcpu_svm *svm)
267 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
268 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
270 /* Only a few fields of int_ctl are written by the processor. */
271 mask = V_IRQ_MASK | V_TPR_MASK;
272 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
273 svm_is_intercept(svm, INTERCEPT_VINTR)) {
275 * In order to request an interrupt window, L0 is usurping
276 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
277 * even if it was clear in L1's VMCB. Restoring it would be
278 * wrong. However, in this case V_IRQ will remain true until
279 * interrupt_window_interception calls svm_clear_vintr and
280 * restores int_ctl. We can just leave it aside.
284 svm->nested.ctl.int_ctl &= ~mask;
285 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
289 * Transfer any event that L0 or L1 wanted to inject into L2 to
292 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
295 struct kvm_vcpu *vcpu = &svm->vcpu;
296 u32 exit_int_info = 0;
299 if (vcpu->arch.exception.injected) {
300 nr = vcpu->arch.exception.nr;
301 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
303 if (vcpu->arch.exception.has_error_code) {
304 exit_int_info |= SVM_EVTINJ_VALID_ERR;
305 vmcb12->control.exit_int_info_err =
306 vcpu->arch.exception.error_code;
309 } else if (vcpu->arch.nmi_injected) {
310 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
312 } else if (vcpu->arch.interrupt.injected) {
313 nr = vcpu->arch.interrupt.nr;
314 exit_int_info = nr | SVM_EVTINJ_VALID;
316 if (vcpu->arch.interrupt.soft)
317 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
319 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
322 vmcb12->control.exit_int_info = exit_int_info;
325 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
327 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
331 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
332 * if we are emulating VM-Entry into a guest with NPT enabled.
334 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
337 if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
340 if (!nested_npt && is_pae_paging(vcpu) &&
341 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
342 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
347 * TODO: optimize unconditional TLB flush/MMU sync here and in
348 * kvm_init_shadow_npt_mmu().
351 kvm_mmu_new_pgd(vcpu, cr3, false, false);
353 vcpu->arch.cr3 = cr3;
354 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
356 kvm_init_mmu(vcpu, false);
361 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
363 /* Load the nested guest state */
364 svm->vmcb->save.es = vmcb12->save.es;
365 svm->vmcb->save.cs = vmcb12->save.cs;
366 svm->vmcb->save.ss = vmcb12->save.ss;
367 svm->vmcb->save.ds = vmcb12->save.ds;
368 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
369 svm->vmcb->save.idtr = vmcb12->save.idtr;
370 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
371 svm_set_efer(&svm->vcpu, vmcb12->save.efer);
372 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
373 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
374 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
375 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
376 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
377 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
379 /* In case we don't even reach vcpu_run, the fields are not updated */
380 svm->vmcb->save.rax = vmcb12->save.rax;
381 svm->vmcb->save.rsp = vmcb12->save.rsp;
382 svm->vmcb->save.rip = vmcb12->save.rip;
383 svm->vmcb->save.dr7 = vmcb12->save.dr7;
384 svm->vcpu.arch.dr6 = vmcb12->save.dr6;
385 svm->vmcb->save.cpl = vmcb12->save.cpl;
388 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
390 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
392 if (nested_npt_enabled(svm))
393 nested_svm_init_mmu_context(&svm->vcpu);
395 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
396 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
398 svm->vmcb->control.int_ctl =
399 (svm->nested.ctl.int_ctl & ~mask) |
400 (svm->nested.hsave->control.int_ctl & mask);
402 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
403 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
404 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
405 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
406 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
408 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
409 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
411 /* Enter Guest-Mode */
412 enter_guest_mode(&svm->vcpu);
415 * Merge guest and host intercepts - must be called with vcpu in
416 * guest-mode to take affect here
418 recalc_intercepts(svm);
420 vmcb_mark_all_dirty(svm->vmcb);
423 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
428 svm->nested.vmcb12_gpa = vmcb12_gpa;
429 load_nested_vmcb_control(svm, &vmcb12->control);
430 nested_prepare_vmcb_save(svm, vmcb12);
431 nested_prepare_vmcb_control(svm);
433 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
434 nested_npt_enabled(svm));
438 svm_set_gif(svm, true);
443 int nested_svm_vmrun(struct vcpu_svm *svm)
447 struct vmcb *hsave = svm->nested.hsave;
448 struct vmcb *vmcb = svm->vmcb;
449 struct kvm_host_map map;
452 if (is_smm(&svm->vcpu)) {
453 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
457 vmcb12_gpa = svm->vmcb->save.rax;
458 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
459 if (ret == -EINVAL) {
460 kvm_inject_gp(&svm->vcpu, 0);
463 return kvm_skip_emulated_instruction(&svm->vcpu);
466 ret = kvm_skip_emulated_instruction(&svm->vcpu);
470 if (!nested_vmcb_checks(svm, vmcb12)) {
471 vmcb12->control.exit_code = SVM_EXIT_ERR;
472 vmcb12->control.exit_code_hi = 0;
473 vmcb12->control.exit_info_1 = 0;
474 vmcb12->control.exit_info_2 = 0;
478 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
480 vmcb12->control.int_ctl,
481 vmcb12->control.event_inj,
482 vmcb12->control.nested_ctl);
484 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
485 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
486 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
487 vmcb12->control.intercepts[INTERCEPT_WORD3],
488 vmcb12->control.intercepts[INTERCEPT_WORD4],
489 vmcb12->control.intercepts[INTERCEPT_WORD5]);
491 /* Clear internal status */
492 kvm_clear_exception_queue(&svm->vcpu);
493 kvm_clear_interrupt_queue(&svm->vcpu);
496 * Save the old vmcb, so we don't need to pick what we save, but can
497 * restore everything when a VMEXIT occurs
499 hsave->save.es = vmcb->save.es;
500 hsave->save.cs = vmcb->save.cs;
501 hsave->save.ss = vmcb->save.ss;
502 hsave->save.ds = vmcb->save.ds;
503 hsave->save.gdtr = vmcb->save.gdtr;
504 hsave->save.idtr = vmcb->save.idtr;
505 hsave->save.efer = svm->vcpu.arch.efer;
506 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
507 hsave->save.cr4 = svm->vcpu.arch.cr4;
508 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
509 hsave->save.rip = kvm_rip_read(&svm->vcpu);
510 hsave->save.rsp = vmcb->save.rsp;
511 hsave->save.rax = vmcb->save.rax;
513 hsave->save.cr3 = vmcb->save.cr3;
515 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
517 copy_vmcb_control_area(&hsave->control, &vmcb->control);
519 svm->nested.nested_run_pending = 1;
521 if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
524 if (nested_svm_vmrun_msrpm(svm))
528 svm->nested.nested_run_pending = 0;
530 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
531 svm->vmcb->control.exit_code_hi = 0;
532 svm->vmcb->control.exit_info_1 = 0;
533 svm->vmcb->control.exit_info_2 = 0;
535 nested_svm_vmexit(svm);
538 kvm_vcpu_unmap(&svm->vcpu, &map, true);
543 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
545 to_vmcb->save.fs = from_vmcb->save.fs;
546 to_vmcb->save.gs = from_vmcb->save.gs;
547 to_vmcb->save.tr = from_vmcb->save.tr;
548 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
549 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
550 to_vmcb->save.star = from_vmcb->save.star;
551 to_vmcb->save.lstar = from_vmcb->save.lstar;
552 to_vmcb->save.cstar = from_vmcb->save.cstar;
553 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
554 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
555 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
556 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
559 int nested_svm_vmexit(struct vcpu_svm *svm)
563 struct vmcb *hsave = svm->nested.hsave;
564 struct vmcb *vmcb = svm->vmcb;
565 struct kvm_host_map map;
567 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
570 kvm_inject_gp(&svm->vcpu, 0);
576 /* Exit Guest-Mode */
577 leave_guest_mode(&svm->vcpu);
578 svm->nested.vmcb12_gpa = 0;
579 WARN_ON_ONCE(svm->nested.nested_run_pending);
581 /* in case we halted in L2 */
582 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
584 /* Give the current vmcb to the guest */
586 vmcb12->save.es = vmcb->save.es;
587 vmcb12->save.cs = vmcb->save.cs;
588 vmcb12->save.ss = vmcb->save.ss;
589 vmcb12->save.ds = vmcb->save.ds;
590 vmcb12->save.gdtr = vmcb->save.gdtr;
591 vmcb12->save.idtr = vmcb->save.idtr;
592 vmcb12->save.efer = svm->vcpu.arch.efer;
593 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
594 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
595 vmcb12->save.cr2 = vmcb->save.cr2;
596 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
597 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
598 vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
599 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
600 vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
601 vmcb12->save.dr7 = vmcb->save.dr7;
602 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
603 vmcb12->save.cpl = vmcb->save.cpl;
605 vmcb12->control.int_state = vmcb->control.int_state;
606 vmcb12->control.exit_code = vmcb->control.exit_code;
607 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
608 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
609 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
611 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
612 nested_vmcb_save_pending_event(svm, vmcb12);
614 if (svm->nrips_enabled)
615 vmcb12->control.next_rip = vmcb->control.next_rip;
617 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
618 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
619 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
620 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
622 vmcb12->control.pause_filter_count =
623 svm->vmcb->control.pause_filter_count;
624 vmcb12->control.pause_filter_thresh =
625 svm->vmcb->control.pause_filter_thresh;
627 /* Restore the original control entries */
628 copy_vmcb_control_area(&vmcb->control, &hsave->control);
630 /* On vmexit the GIF is set to false */
631 svm_set_gif(svm, false);
633 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
634 svm->vcpu.arch.l1_tsc_offset;
636 svm->nested.ctl.nested_cr3 = 0;
638 /* Restore selected save entries */
639 svm->vmcb->save.es = hsave->save.es;
640 svm->vmcb->save.cs = hsave->save.cs;
641 svm->vmcb->save.ss = hsave->save.ss;
642 svm->vmcb->save.ds = hsave->save.ds;
643 svm->vmcb->save.gdtr = hsave->save.gdtr;
644 svm->vmcb->save.idtr = hsave->save.idtr;
645 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
646 svm_set_efer(&svm->vcpu, hsave->save.efer);
647 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
648 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
649 kvm_rax_write(&svm->vcpu, hsave->save.rax);
650 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
651 kvm_rip_write(&svm->vcpu, hsave->save.rip);
652 svm->vmcb->save.dr7 = 0;
653 svm->vmcb->save.cpl = 0;
654 svm->vmcb->control.exit_int_info = 0;
656 vmcb_mark_all_dirty(svm->vmcb);
658 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
659 vmcb12->control.exit_info_1,
660 vmcb12->control.exit_info_2,
661 vmcb12->control.exit_int_info,
662 vmcb12->control.exit_int_info_err,
665 kvm_vcpu_unmap(&svm->vcpu, &map, true);
667 nested_svm_uninit_mmu_context(&svm->vcpu);
669 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
674 svm->vmcb->save.cr3 = hsave->save.cr3;
677 * Drop what we picked up for L2 via svm_complete_interrupts() so it
678 * doesn't end up in L1.
680 svm->vcpu.arch.nmi_injected = false;
681 kvm_clear_exception_queue(&svm->vcpu);
682 kvm_clear_interrupt_queue(&svm->vcpu);
688 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
690 void svm_leave_nested(struct vcpu_svm *svm)
692 if (is_guest_mode(&svm->vcpu)) {
693 struct vmcb *hsave = svm->nested.hsave;
694 struct vmcb *vmcb = svm->vmcb;
696 svm->nested.nested_run_pending = 0;
697 leave_guest_mode(&svm->vcpu);
698 copy_vmcb_control_area(&vmcb->control, &hsave->control);
699 nested_svm_uninit_mmu_context(&svm->vcpu);
703 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
705 u32 offset, msr, value;
708 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
709 return NESTED_EXIT_HOST;
711 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
712 offset = svm_msrpm_offset(msr);
713 write = svm->vmcb->control.exit_info_1 & 1;
714 mask = 1 << ((2 * (msr & 0xf)) + write);
716 if (offset == MSR_INVALID)
717 return NESTED_EXIT_DONE;
719 /* Offset is in 32 bit units but need in 8 bit units */
722 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
723 return NESTED_EXIT_DONE;
725 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
728 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
730 unsigned port, size, iopm_len;
735 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
736 return NESTED_EXIT_HOST;
738 port = svm->vmcb->control.exit_info_1 >> 16;
739 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
741 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
742 start_bit = port % 8;
743 iopm_len = (start_bit + size > 8) ? 2 : 1;
744 mask = (0xf >> (4 - size)) << start_bit;
747 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
748 return NESTED_EXIT_DONE;
750 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
753 static int nested_svm_intercept(struct vcpu_svm *svm)
755 u32 exit_code = svm->vmcb->control.exit_code;
756 int vmexit = NESTED_EXIT_HOST;
760 vmexit = nested_svm_exit_handled_msr(svm);
763 vmexit = nested_svm_intercept_ioio(svm);
765 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
766 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
767 vmexit = NESTED_EXIT_DONE;
770 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
771 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
772 vmexit = NESTED_EXIT_DONE;
775 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
777 * Host-intercepted exceptions have been checked already in
778 * nested_svm_exit_special. There is nothing to do here,
779 * the vmexit is injected by svm_check_nested_events.
781 vmexit = NESTED_EXIT_DONE;
785 vmexit = NESTED_EXIT_DONE;
789 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
790 vmexit = NESTED_EXIT_DONE;
797 int nested_svm_exit_handled(struct vcpu_svm *svm)
801 vmexit = nested_svm_intercept(svm);
803 if (vmexit == NESTED_EXIT_DONE)
804 nested_svm_vmexit(svm);
809 int nested_svm_check_permissions(struct vcpu_svm *svm)
811 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
812 !is_paging(&svm->vcpu)) {
813 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
817 if (svm->vmcb->save.cpl) {
818 kvm_inject_gp(&svm->vcpu, 0);
825 static bool nested_exit_on_exception(struct vcpu_svm *svm)
827 unsigned int nr = svm->vcpu.arch.exception.nr;
829 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
832 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
834 unsigned int nr = svm->vcpu.arch.exception.nr;
836 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
837 svm->vmcb->control.exit_code_hi = 0;
839 if (svm->vcpu.arch.exception.has_error_code)
840 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
843 * EXITINFO2 is undefined for all exception intercepts other
846 if (nr == PF_VECTOR) {
847 if (svm->vcpu.arch.exception.nested_apf)
848 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
849 else if (svm->vcpu.arch.exception.has_payload)
850 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
852 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
853 } else if (nr == DB_VECTOR) {
854 /* See inject_pending_event. */
855 kvm_deliver_exception_payload(&svm->vcpu);
856 if (svm->vcpu.arch.dr7 & DR7_GD) {
857 svm->vcpu.arch.dr7 &= ~DR7_GD;
858 kvm_update_dr7(&svm->vcpu);
861 WARN_ON(svm->vcpu.arch.exception.has_payload);
863 nested_svm_vmexit(svm);
866 static void nested_svm_smi(struct vcpu_svm *svm)
868 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
869 svm->vmcb->control.exit_info_1 = 0;
870 svm->vmcb->control.exit_info_2 = 0;
872 nested_svm_vmexit(svm);
875 static void nested_svm_nmi(struct vcpu_svm *svm)
877 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
878 svm->vmcb->control.exit_info_1 = 0;
879 svm->vmcb->control.exit_info_2 = 0;
881 nested_svm_vmexit(svm);
884 static void nested_svm_intr(struct vcpu_svm *svm)
886 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
888 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
889 svm->vmcb->control.exit_info_1 = 0;
890 svm->vmcb->control.exit_info_2 = 0;
892 nested_svm_vmexit(svm);
895 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
897 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
900 static void nested_svm_init(struct vcpu_svm *svm)
902 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
903 svm->vmcb->control.exit_info_1 = 0;
904 svm->vmcb->control.exit_info_2 = 0;
906 nested_svm_vmexit(svm);
910 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
912 struct vcpu_svm *svm = to_svm(vcpu);
913 bool block_nested_events =
914 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
915 struct kvm_lapic *apic = vcpu->arch.apic;
917 if (lapic_in_kernel(vcpu) &&
918 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
919 if (block_nested_events)
921 if (!nested_exit_on_init(svm))
923 nested_svm_init(svm);
927 if (vcpu->arch.exception.pending) {
928 if (block_nested_events)
930 if (!nested_exit_on_exception(svm))
932 nested_svm_inject_exception_vmexit(svm);
936 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
937 if (block_nested_events)
939 if (!nested_exit_on_smi(svm))
945 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
946 if (block_nested_events)
948 if (!nested_exit_on_nmi(svm))
954 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
955 if (block_nested_events)
957 if (!nested_exit_on_intr(svm))
959 nested_svm_intr(svm);
966 int nested_svm_exit_special(struct vcpu_svm *svm)
968 u32 exit_code = svm->vmcb->control.exit_code;
974 return NESTED_EXIT_HOST;
975 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
976 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
978 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
980 return NESTED_EXIT_HOST;
981 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
982 svm->vcpu.arch.apf.host_apf_flags)
983 /* Trap async PF even if not shadowing */
984 return NESTED_EXIT_HOST;
991 return NESTED_EXIT_CONTINUE;
994 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
995 struct kvm_nested_state __user *user_kvm_nested_state,
998 struct vcpu_svm *svm;
999 struct kvm_nested_state kvm_state = {
1001 .format = KVM_STATE_NESTED_FORMAT_SVM,
1002 .size = sizeof(kvm_state),
1004 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1005 &user_kvm_nested_state->data.svm[0];
1008 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1012 if (user_data_size < kvm_state.size)
1015 /* First fill in the header and copy it out. */
1016 if (is_guest_mode(vcpu)) {
1017 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1018 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1019 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1021 if (svm->nested.nested_run_pending)
1022 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1026 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1028 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1031 if (!is_guest_mode(vcpu))
1035 * Copy over the full size of the VMCB rather than just the size
1038 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1040 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1041 sizeof(user_vmcb->control)))
1043 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1044 sizeof(user_vmcb->save)))
1048 return kvm_state.size;
1051 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1052 struct kvm_nested_state __user *user_kvm_nested_state,
1053 struct kvm_nested_state *kvm_state)
1055 struct vcpu_svm *svm = to_svm(vcpu);
1056 struct vmcb *hsave = svm->nested.hsave;
1057 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1058 &user_kvm_nested_state->data.svm[0];
1059 struct vmcb_control_area *ctl;
1060 struct vmcb_save_area *save;
1064 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1065 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1067 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1070 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1071 KVM_STATE_NESTED_RUN_PENDING |
1072 KVM_STATE_NESTED_GIF_SET))
1076 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1077 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1079 if (!(vcpu->arch.efer & EFER_SVME)) {
1080 /* GIF=1 and no guest mode are required if SVME=0. */
1081 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1085 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1086 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1089 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1090 svm_leave_nested(svm);
1091 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1095 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1097 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1101 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1102 save = kzalloc(sizeof(*save), GFP_KERNEL);
1107 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1109 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1113 if (!nested_vmcb_check_controls(ctl))
1117 * Processor state contains L2 state. Check that it is
1118 * valid for guest mode (see nested_vmcb_checks).
1120 cr0 = kvm_read_cr0(vcpu);
1121 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1125 * Validate host state saved from before VMRUN (see
1126 * nested_svm_check_permissions).
1127 * TODO: validate reserved bits for all saved state.
1129 if (!(save->cr0 & X86_CR0_PG))
1133 * All checks done, we can enter guest mode. L1 control fields
1134 * come from the nested save state. Guest state is already
1135 * in the registers, the save area of the nested state instead
1136 * contains saved L1 state.
1138 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1139 hsave->save = *save;
1141 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1142 load_nested_vmcb_control(svm, ctl);
1143 nested_prepare_vmcb_control(svm);
1145 if (!nested_svm_vmrun_msrpm(svm))
1156 struct kvm_x86_nested_ops svm_nested_ops = {
1157 .check_events = svm_check_nested_events,
1158 .get_state = svm_get_nested_state,
1159 .set_state = svm_set_nested_state,