1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
24 #include "kvm_emulate.h"
31 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
32 struct x86_exception *fault)
34 struct vcpu_svm *svm = to_svm(vcpu);
36 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38 * TODO: track the cause of the nested page fault, and
39 * correctly fill in the high bits of exit_info_1.
41 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
42 svm->vmcb->control.exit_code_hi = 0;
43 svm->vmcb->control.exit_info_1 = (1ULL << 32);
44 svm->vmcb->control.exit_info_2 = fault->address;
47 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
48 svm->vmcb->control.exit_info_1 |= fault->error_code;
51 * The present bit is always zero for page structure faults on real
54 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
55 svm->vmcb->control.exit_info_1 &= ~1;
57 nested_svm_vmexit(svm);
60 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
62 struct vcpu_svm *svm = to_svm(vcpu);
63 u64 cr3 = svm->nested.ctl.nested_cr3;
67 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
68 offset_in_page(cr3) + index * 8, 8);
74 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
76 struct vcpu_svm *svm = to_svm(vcpu);
78 return svm->nested.ctl.nested_cr3;
81 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
83 WARN_ON(mmu_is_nested(vcpu));
85 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
86 kvm_init_shadow_mmu(vcpu);
87 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
88 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
89 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
90 vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
91 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
92 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
95 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
97 vcpu->arch.mmu = &vcpu->arch.root_mmu;
98 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
101 void recalc_intercepts(struct vcpu_svm *svm)
103 struct vmcb_control_area *c, *h, *g;
105 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
107 if (!is_guest_mode(&svm->vcpu))
110 c = &svm->vmcb->control;
111 h = &svm->nested.hsave->control;
112 g = &svm->nested.ctl;
114 svm->nested.host_intercept_exceptions = h->intercept_exceptions;
116 c->intercept_cr = h->intercept_cr;
117 c->intercept_dr = h->intercept_dr;
118 c->intercept_exceptions = h->intercept_exceptions;
119 c->intercept = h->intercept;
121 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
122 /* We only want the cr8 intercept bits of L1 */
123 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
124 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
127 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
128 * affect any interrupt we may want to inject; therefore,
129 * interrupt window vmexits are irrelevant to L0.
131 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
134 /* We don't want to see VMMCALLs from a nested guest */
135 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
137 c->intercept_cr |= g->intercept_cr;
138 c->intercept_dr |= g->intercept_dr;
139 c->intercept_exceptions |= g->intercept_exceptions;
140 c->intercept |= g->intercept;
143 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
144 struct vmcb_control_area *from)
146 dst->intercept_cr = from->intercept_cr;
147 dst->intercept_dr = from->intercept_dr;
148 dst->intercept_exceptions = from->intercept_exceptions;
149 dst->intercept = from->intercept;
150 dst->iopm_base_pa = from->iopm_base_pa;
151 dst->msrpm_base_pa = from->msrpm_base_pa;
152 dst->tsc_offset = from->tsc_offset;
153 /* asid not copied, it is handled manually for svm->vmcb. */
154 dst->tlb_ctl = from->tlb_ctl;
155 dst->int_ctl = from->int_ctl;
156 dst->int_vector = from->int_vector;
157 dst->int_state = from->int_state;
158 dst->exit_code = from->exit_code;
159 dst->exit_code_hi = from->exit_code_hi;
160 dst->exit_info_1 = from->exit_info_1;
161 dst->exit_info_2 = from->exit_info_2;
162 dst->exit_int_info = from->exit_int_info;
163 dst->exit_int_info_err = from->exit_int_info_err;
164 dst->nested_ctl = from->nested_ctl;
165 dst->event_inj = from->event_inj;
166 dst->event_inj_err = from->event_inj_err;
167 dst->nested_cr3 = from->nested_cr3;
168 dst->virt_ext = from->virt_ext;
169 dst->pause_filter_count = from->pause_filter_count;
170 dst->pause_filter_thresh = from->pause_filter_thresh;
173 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
176 * This function merges the msr permission bitmaps of kvm and the
177 * nested vmcb. It is optimized in that it only merges the parts where
178 * the kvm msr permission bitmap may contain zero bits
182 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
185 for (i = 0; i < MSRPM_OFFSETS; i++) {
189 if (msrpm_offsets[i] == 0xffffffff)
192 p = msrpm_offsets[i];
193 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
195 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
198 svm->nested.msrpm[p] = svm->msrpm[p] | value;
201 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
206 static bool nested_vmcb_checks(struct vmcb *vmcb)
208 if ((vmcb->save.efer & EFER_SVME) == 0)
211 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
212 (vmcb->save.cr0 & X86_CR0_NW))
215 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
218 if (vmcb->control.asid == 0)
221 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
228 static void load_nested_vmcb_control(struct vcpu_svm *svm,
229 struct vmcb_control_area *control)
231 copy_vmcb_control_area(&svm->nested.ctl, control);
233 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
234 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
238 * Synchronize fields that are written by the processor, so that
239 * they can be copied back into the nested_vmcb.
241 void sync_nested_vmcb_control(struct vcpu_svm *svm)
244 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
245 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
247 /* Only a few fields of int_ctl are written by the processor. */
248 mask = V_IRQ_MASK | V_TPR_MASK;
249 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
250 is_intercept(svm, SVM_EXIT_VINTR)) {
252 * In order to request an interrupt window, L0 is usurping
253 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
254 * even if it was clear in L1's VMCB. Restoring it would be
255 * wrong. However, in this case V_IRQ will remain true until
256 * interrupt_window_interception calls svm_clear_vintr and
257 * restores int_ctl. We can just leave it aside.
261 svm->nested.ctl.int_ctl &= ~mask;
262 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
265 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
267 /* Load the nested guest state */
268 svm->vmcb->save.es = nested_vmcb->save.es;
269 svm->vmcb->save.cs = nested_vmcb->save.cs;
270 svm->vmcb->save.ss = nested_vmcb->save.ss;
271 svm->vmcb->save.ds = nested_vmcb->save.ds;
272 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
273 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
274 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
275 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
276 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
277 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
278 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
280 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
281 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
282 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
283 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
285 /* In case we don't even reach vcpu_run, the fields are not updated */
286 svm->vmcb->save.rax = nested_vmcb->save.rax;
287 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
288 svm->vmcb->save.rip = nested_vmcb->save.rip;
289 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
290 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
291 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
294 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
296 if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
297 nested_svm_init_mmu_context(&svm->vcpu);
299 /* Guest paging mode is active - reset mmu */
300 kvm_mmu_reset_context(&svm->vcpu);
302 svm_flush_tlb(&svm->vcpu);
303 if (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
304 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
306 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
308 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
309 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
311 svm->vmcb->control.int_ctl = svm->nested.ctl.int_ctl | V_INTR_MASKING_MASK;
312 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
313 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
314 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
315 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
316 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
318 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
319 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
321 /* Enter Guest-Mode */
322 enter_guest_mode(&svm->vcpu);
325 * Merge guest and host intercepts - must be called with vcpu in
326 * guest-mode to take affect here
328 recalc_intercepts(svm);
330 mark_all_dirty(svm->vmcb);
333 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
334 struct vmcb *nested_vmcb)
336 svm->nested.vmcb = vmcb_gpa;
337 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
338 svm->vcpu.arch.hflags |= HF_HIF_MASK;
340 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
342 load_nested_vmcb_control(svm, &nested_vmcb->control);
343 nested_prepare_vmcb_save(svm, nested_vmcb);
344 nested_prepare_vmcb_control(svm);
346 svm_set_gif(svm, true);
349 int nested_svm_vmrun(struct vcpu_svm *svm)
352 struct vmcb *nested_vmcb;
353 struct vmcb *hsave = svm->nested.hsave;
354 struct vmcb *vmcb = svm->vmcb;
355 struct kvm_host_map map;
358 if (is_smm(&svm->vcpu)) {
359 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
363 vmcb_gpa = svm->vmcb->save.rax;
364 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
365 if (ret == -EINVAL) {
366 kvm_inject_gp(&svm->vcpu, 0);
369 return kvm_skip_emulated_instruction(&svm->vcpu);
372 ret = kvm_skip_emulated_instruction(&svm->vcpu);
374 nested_vmcb = map.hva;
376 if (!nested_vmcb_checks(nested_vmcb)) {
377 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
378 nested_vmcb->control.exit_code_hi = 0;
379 nested_vmcb->control.exit_info_1 = 0;
380 nested_vmcb->control.exit_info_2 = 0;
384 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
385 nested_vmcb->save.rip,
386 nested_vmcb->control.int_ctl,
387 nested_vmcb->control.event_inj,
388 nested_vmcb->control.nested_ctl);
390 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
391 nested_vmcb->control.intercept_cr >> 16,
392 nested_vmcb->control.intercept_exceptions,
393 nested_vmcb->control.intercept);
395 /* Clear internal status */
396 kvm_clear_exception_queue(&svm->vcpu);
397 kvm_clear_interrupt_queue(&svm->vcpu);
400 * Save the old vmcb, so we don't need to pick what we save, but can
401 * restore everything when a VMEXIT occurs
403 hsave->save.es = vmcb->save.es;
404 hsave->save.cs = vmcb->save.cs;
405 hsave->save.ss = vmcb->save.ss;
406 hsave->save.ds = vmcb->save.ds;
407 hsave->save.gdtr = vmcb->save.gdtr;
408 hsave->save.idtr = vmcb->save.idtr;
409 hsave->save.efer = svm->vcpu.arch.efer;
410 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
411 hsave->save.cr4 = svm->vcpu.arch.cr4;
412 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
413 hsave->save.rip = kvm_rip_read(&svm->vcpu);
414 hsave->save.rsp = vmcb->save.rsp;
415 hsave->save.rax = vmcb->save.rax;
417 hsave->save.cr3 = vmcb->save.cr3;
419 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
421 copy_vmcb_control_area(&hsave->control, &vmcb->control);
423 svm->nested.nested_run_pending = 1;
424 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
426 if (!nested_svm_vmrun_msrpm(svm)) {
427 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
428 svm->vmcb->control.exit_code_hi = 0;
429 svm->vmcb->control.exit_info_1 = 0;
430 svm->vmcb->control.exit_info_2 = 0;
432 nested_svm_vmexit(svm);
436 kvm_vcpu_unmap(&svm->vcpu, &map, true);
441 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
443 to_vmcb->save.fs = from_vmcb->save.fs;
444 to_vmcb->save.gs = from_vmcb->save.gs;
445 to_vmcb->save.tr = from_vmcb->save.tr;
446 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
447 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
448 to_vmcb->save.star = from_vmcb->save.star;
449 to_vmcb->save.lstar = from_vmcb->save.lstar;
450 to_vmcb->save.cstar = from_vmcb->save.cstar;
451 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
452 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
453 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
454 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
457 int nested_svm_vmexit(struct vcpu_svm *svm)
460 struct vmcb *nested_vmcb;
461 struct vmcb *hsave = svm->nested.hsave;
462 struct vmcb *vmcb = svm->vmcb;
463 struct kvm_host_map map;
465 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
466 vmcb->control.exit_info_1,
467 vmcb->control.exit_info_2,
468 vmcb->control.exit_int_info,
469 vmcb->control.exit_int_info_err,
472 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
475 kvm_inject_gp(&svm->vcpu, 0);
479 nested_vmcb = map.hva;
481 /* Exit Guest-Mode */
482 leave_guest_mode(&svm->vcpu);
483 svm->nested.vmcb = 0;
484 WARN_ON_ONCE(svm->nested.nested_run_pending);
486 /* in case we halted in L2 */
487 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
489 /* Give the current vmcb to the guest */
490 svm_set_gif(svm, false);
492 nested_vmcb->save.es = vmcb->save.es;
493 nested_vmcb->save.cs = vmcb->save.cs;
494 nested_vmcb->save.ss = vmcb->save.ss;
495 nested_vmcb->save.ds = vmcb->save.ds;
496 nested_vmcb->save.gdtr = vmcb->save.gdtr;
497 nested_vmcb->save.idtr = vmcb->save.idtr;
498 nested_vmcb->save.efer = svm->vcpu.arch.efer;
499 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
500 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
501 nested_vmcb->save.cr2 = vmcb->save.cr2;
502 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
503 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
504 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu);
505 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu);
506 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu);
507 nested_vmcb->save.dr7 = vmcb->save.dr7;
508 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
509 nested_vmcb->save.cpl = vmcb->save.cpl;
511 nested_vmcb->control.int_state = vmcb->control.int_state;
512 nested_vmcb->control.exit_code = vmcb->control.exit_code;
513 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
514 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
515 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
516 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
517 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
519 if (svm->nrips_enabled)
520 nested_vmcb->control.next_rip = vmcb->control.next_rip;
522 nested_vmcb->control.int_ctl = svm->nested.ctl.int_ctl;
523 nested_vmcb->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
524 nested_vmcb->control.event_inj = svm->nested.ctl.event_inj;
525 nested_vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
527 nested_vmcb->control.pause_filter_count =
528 svm->vmcb->control.pause_filter_count;
529 nested_vmcb->control.pause_filter_thresh =
530 svm->vmcb->control.pause_filter_thresh;
532 /* Restore the original control entries */
533 copy_vmcb_control_area(&vmcb->control, &hsave->control);
535 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
536 svm->vcpu.arch.l1_tsc_offset;
538 kvm_clear_exception_queue(&svm->vcpu);
539 kvm_clear_interrupt_queue(&svm->vcpu);
541 svm->nested.ctl.nested_cr3 = 0;
543 /* Restore selected save entries */
544 svm->vmcb->save.es = hsave->save.es;
545 svm->vmcb->save.cs = hsave->save.cs;
546 svm->vmcb->save.ss = hsave->save.ss;
547 svm->vmcb->save.ds = hsave->save.ds;
548 svm->vmcb->save.gdtr = hsave->save.gdtr;
549 svm->vmcb->save.idtr = hsave->save.idtr;
550 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
551 svm_set_efer(&svm->vcpu, hsave->save.efer);
552 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
553 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
555 svm->vmcb->save.cr3 = hsave->save.cr3;
556 svm->vcpu.arch.cr3 = hsave->save.cr3;
558 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
560 kvm_rax_write(&svm->vcpu, hsave->save.rax);
561 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
562 kvm_rip_write(&svm->vcpu, hsave->save.rip);
563 svm->vmcb->save.dr7 = 0;
564 svm->vmcb->save.cpl = 0;
565 svm->vmcb->control.exit_int_info = 0;
567 mark_all_dirty(svm->vmcb);
569 kvm_vcpu_unmap(&svm->vcpu, &map, true);
571 nested_svm_uninit_mmu_context(&svm->vcpu);
572 kvm_mmu_reset_context(&svm->vcpu);
573 kvm_mmu_load(&svm->vcpu);
576 * Drop what we picked up for L2 via svm_complete_interrupts() so it
577 * doesn't end up in L1.
579 svm->vcpu.arch.nmi_injected = false;
580 kvm_clear_exception_queue(&svm->vcpu);
581 kvm_clear_interrupt_queue(&svm->vcpu);
586 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
588 u32 offset, msr, value;
591 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
592 return NESTED_EXIT_HOST;
594 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
595 offset = svm_msrpm_offset(msr);
596 write = svm->vmcb->control.exit_info_1 & 1;
597 mask = 1 << ((2 * (msr & 0xf)) + write);
599 if (offset == MSR_INVALID)
600 return NESTED_EXIT_DONE;
602 /* Offset is in 32 bit units but need in 8 bit units */
605 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
606 return NESTED_EXIT_DONE;
608 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
611 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
613 unsigned port, size, iopm_len;
618 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
619 return NESTED_EXIT_HOST;
621 port = svm->vmcb->control.exit_info_1 >> 16;
622 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
624 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
625 start_bit = port % 8;
626 iopm_len = (start_bit + size > 8) ? 2 : 1;
627 mask = (0xf >> (4 - size)) << start_bit;
630 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
631 return NESTED_EXIT_DONE;
633 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
636 static int nested_svm_intercept(struct vcpu_svm *svm)
638 u32 exit_code = svm->vmcb->control.exit_code;
639 int vmexit = NESTED_EXIT_HOST;
643 vmexit = nested_svm_exit_handled_msr(svm);
646 vmexit = nested_svm_intercept_ioio(svm);
648 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
649 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
650 if (svm->nested.ctl.intercept_cr & bit)
651 vmexit = NESTED_EXIT_DONE;
654 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
655 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
656 if (svm->nested.ctl.intercept_dr & bit)
657 vmexit = NESTED_EXIT_DONE;
660 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
662 * Host-intercepted exceptions have been checked already in
663 * nested_svm_exit_special. There is nothing to do here,
664 * the vmexit is injected by svm_check_nested_events.
666 vmexit = NESTED_EXIT_DONE;
670 vmexit = NESTED_EXIT_DONE;
674 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
675 if (svm->nested.ctl.intercept & exit_bits)
676 vmexit = NESTED_EXIT_DONE;
683 int nested_svm_exit_handled(struct vcpu_svm *svm)
687 vmexit = nested_svm_intercept(svm);
689 if (vmexit == NESTED_EXIT_DONE)
690 nested_svm_vmexit(svm);
695 int nested_svm_check_permissions(struct vcpu_svm *svm)
697 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
698 !is_paging(&svm->vcpu)) {
699 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
703 if (svm->vmcb->save.cpl) {
704 kvm_inject_gp(&svm->vcpu, 0);
711 static bool nested_exit_on_exception(struct vcpu_svm *svm)
713 unsigned int nr = svm->vcpu.arch.exception.nr;
715 return (svm->nested.ctl.intercept_exceptions & (1 << nr));
718 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
720 unsigned int nr = svm->vcpu.arch.exception.nr;
722 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
723 svm->vmcb->control.exit_code_hi = 0;
725 if (svm->vcpu.arch.exception.has_error_code)
726 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
729 * EXITINFO2 is undefined for all exception intercepts other
732 if (nr == PF_VECTOR) {
733 if (svm->vcpu.arch.exception.nested_apf)
734 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
735 else if (svm->vcpu.arch.exception.has_payload)
736 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
738 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
739 } else if (nr == DB_VECTOR) {
740 /* See inject_pending_event. */
741 kvm_deliver_exception_payload(&svm->vcpu);
742 if (svm->vcpu.arch.dr7 & DR7_GD) {
743 svm->vcpu.arch.dr7 &= ~DR7_GD;
744 kvm_update_dr7(&svm->vcpu);
747 WARN_ON(svm->vcpu.arch.exception.has_payload);
749 nested_svm_vmexit(svm);
752 static void nested_svm_smi(struct vcpu_svm *svm)
754 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
755 svm->vmcb->control.exit_info_1 = 0;
756 svm->vmcb->control.exit_info_2 = 0;
758 nested_svm_vmexit(svm);
761 static void nested_svm_nmi(struct vcpu_svm *svm)
763 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
764 svm->vmcb->control.exit_info_1 = 0;
765 svm->vmcb->control.exit_info_2 = 0;
767 nested_svm_vmexit(svm);
770 static void nested_svm_intr(struct vcpu_svm *svm)
772 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
774 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
775 svm->vmcb->control.exit_info_1 = 0;
776 svm->vmcb->control.exit_info_2 = 0;
778 nested_svm_vmexit(svm);
781 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
783 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT));
786 static void nested_svm_init(struct vcpu_svm *svm)
788 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
789 svm->vmcb->control.exit_info_1 = 0;
790 svm->vmcb->control.exit_info_2 = 0;
792 nested_svm_vmexit(svm);
796 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
798 struct vcpu_svm *svm = to_svm(vcpu);
799 bool block_nested_events =
800 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
801 struct kvm_lapic *apic = vcpu->arch.apic;
803 if (lapic_in_kernel(vcpu) &&
804 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
805 if (block_nested_events)
807 if (!nested_exit_on_init(svm))
809 nested_svm_init(svm);
813 if (vcpu->arch.exception.pending) {
814 if (block_nested_events)
816 if (!nested_exit_on_exception(svm))
818 nested_svm_inject_exception_vmexit(svm);
822 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
823 if (block_nested_events)
825 if (!nested_exit_on_smi(svm))
831 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
832 if (block_nested_events)
834 if (!nested_exit_on_nmi(svm))
840 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
841 if (block_nested_events)
843 if (!nested_exit_on_intr(svm))
845 nested_svm_intr(svm);
852 int nested_svm_exit_special(struct vcpu_svm *svm)
854 u32 exit_code = svm->vmcb->control.exit_code;
860 return NESTED_EXIT_HOST;
861 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
862 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
864 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
865 return NESTED_EXIT_HOST;
866 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
867 svm->vcpu.arch.apf.host_apf_reason)
868 /* Trap async PF even if not shadowing */
869 return NESTED_EXIT_HOST;
876 return NESTED_EXIT_CONTINUE;
879 struct kvm_x86_nested_ops svm_nested_ops = {
880 .check_events = svm_check_nested_events,