1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
23 #include "kvm_emulate.h"
29 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
30 struct x86_exception *fault)
32 struct vcpu_svm *svm = to_svm(vcpu);
34 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
36 * TODO: track the cause of the nested page fault, and
37 * correctly fill in the high bits of exit_info_1.
39 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
40 svm->vmcb->control.exit_code_hi = 0;
41 svm->vmcb->control.exit_info_1 = (1ULL << 32);
42 svm->vmcb->control.exit_info_2 = fault->address;
45 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
46 svm->vmcb->control.exit_info_1 |= fault->error_code;
49 * The present bit is always zero for page structure faults on real
52 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
53 svm->vmcb->control.exit_info_1 &= ~1;
55 nested_svm_vmexit(svm);
58 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60 struct vcpu_svm *svm = to_svm(vcpu);
61 u64 cr3 = svm->nested.nested_cr3;
65 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
66 offset_in_page(cr3) + index * 8, 8);
72 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
74 struct vcpu_svm *svm = to_svm(vcpu);
76 return svm->nested.nested_cr3;
79 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
81 WARN_ON(mmu_is_nested(vcpu));
83 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
84 kvm_init_shadow_mmu(vcpu);
85 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
86 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
87 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88 vcpu->arch.mmu->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
89 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
90 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
93 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
95 vcpu->arch.mmu = &vcpu->arch.root_mmu;
96 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
99 void recalc_intercepts(struct vcpu_svm *svm)
101 struct vmcb_control_area *c, *h;
102 struct nested_state *g;
104 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
106 if (!is_guest_mode(&svm->vcpu))
109 c = &svm->vmcb->control;
110 h = &svm->nested.hsave->control;
113 c->intercept_cr = h->intercept_cr;
114 c->intercept_dr = h->intercept_dr;
115 c->intercept_exceptions = h->intercept_exceptions;
116 c->intercept = h->intercept;
118 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
119 /* We only want the cr8 intercept bits of L1 */
120 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
121 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
124 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
125 * affect any interrupt we may want to inject; therefore,
126 * interrupt window vmexits are irrelevant to L0.
128 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
131 /* We don't want to see VMMCALLs from a nested guest */
132 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
134 c->intercept_cr |= g->intercept_cr;
135 c->intercept_dr |= g->intercept_dr;
136 c->intercept_exceptions |= g->intercept_exceptions;
137 c->intercept |= g->intercept;
140 static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
142 struct vmcb_control_area *dst = &dst_vmcb->control;
143 struct vmcb_control_area *from = &from_vmcb->control;
145 dst->intercept_cr = from->intercept_cr;
146 dst->intercept_dr = from->intercept_dr;
147 dst->intercept_exceptions = from->intercept_exceptions;
148 dst->intercept = from->intercept;
149 dst->iopm_base_pa = from->iopm_base_pa;
150 dst->msrpm_base_pa = from->msrpm_base_pa;
151 dst->tsc_offset = from->tsc_offset;
152 dst->asid = from->asid;
153 dst->tlb_ctl = from->tlb_ctl;
154 dst->int_ctl = from->int_ctl;
155 dst->int_vector = from->int_vector;
156 dst->int_state = from->int_state;
157 dst->exit_code = from->exit_code;
158 dst->exit_code_hi = from->exit_code_hi;
159 dst->exit_info_1 = from->exit_info_1;
160 dst->exit_info_2 = from->exit_info_2;
161 dst->exit_int_info = from->exit_int_info;
162 dst->exit_int_info_err = from->exit_int_info_err;
163 dst->nested_ctl = from->nested_ctl;
164 dst->event_inj = from->event_inj;
165 dst->event_inj_err = from->event_inj_err;
166 dst->nested_cr3 = from->nested_cr3;
167 dst->virt_ext = from->virt_ext;
168 dst->pause_filter_count = from->pause_filter_count;
169 dst->pause_filter_thresh = from->pause_filter_thresh;
172 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
175 * This function merges the msr permission bitmaps of kvm and the
176 * nested vmcb. It is optimized in that it only merges the parts where
177 * the kvm msr permission bitmap may contain zero bits
181 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
184 for (i = 0; i < MSRPM_OFFSETS; i++) {
188 if (msrpm_offsets[i] == 0xffffffff)
191 p = msrpm_offsets[i];
192 offset = svm->nested.vmcb_msrpm + (p * 4);
194 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
197 svm->nested.msrpm[p] = svm->msrpm[p] | value;
200 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
205 static bool nested_vmcb_checks(struct vmcb *vmcb)
207 if ((vmcb->save.efer & EFER_SVME) == 0)
210 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
211 (vmcb->save.cr0 & X86_CR0_NW))
214 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
217 if (vmcb->control.asid == 0)
220 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
227 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
228 struct vmcb *nested_vmcb, struct kvm_host_map *map)
230 bool evaluate_pending_interrupts =
231 is_intercept(svm, INTERCEPT_VINTR) ||
232 is_intercept(svm, INTERCEPT_IRET);
234 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
235 svm->vcpu.arch.hflags |= HF_HIF_MASK;
237 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
239 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
240 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
241 nested_svm_init_mmu_context(&svm->vcpu);
244 /* Load the nested guest state */
245 svm->vmcb->save.es = nested_vmcb->save.es;
246 svm->vmcb->save.cs = nested_vmcb->save.cs;
247 svm->vmcb->save.ss = nested_vmcb->save.ss;
248 svm->vmcb->save.ds = nested_vmcb->save.ds;
249 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
250 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
251 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
252 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
253 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
254 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
256 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
257 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
259 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
261 /* Guest paging mode is active - reset mmu */
262 kvm_mmu_reset_context(&svm->vcpu);
264 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
265 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
266 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
267 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
269 /* In case we don't even reach vcpu_run, the fields are not updated */
270 svm->vmcb->save.rax = nested_vmcb->save.rax;
271 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
272 svm->vmcb->save.rip = nested_vmcb->save.rip;
273 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
274 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
275 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
277 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
278 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
280 /* cache intercepts */
281 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
282 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
283 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
284 svm->nested.intercept = nested_vmcb->control.intercept;
286 svm_flush_tlb(&svm->vcpu);
287 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
288 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
289 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
291 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
293 svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
294 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
296 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
297 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
298 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
299 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
300 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
302 svm->vmcb->control.pause_filter_count =
303 nested_vmcb->control.pause_filter_count;
304 svm->vmcb->control.pause_filter_thresh =
305 nested_vmcb->control.pause_filter_thresh;
307 kvm_vcpu_unmap(&svm->vcpu, map, true);
309 /* Enter Guest-Mode */
310 enter_guest_mode(&svm->vcpu);
313 * Merge guest and host intercepts - must be called with vcpu in
314 * guest-mode to take affect here
316 recalc_intercepts(svm);
318 svm->nested.vmcb = vmcb_gpa;
321 * If L1 had a pending IRQ/NMI before executing VMRUN,
322 * which wasn't delivered because it was disallowed (e.g.
323 * interrupts disabled), L0 needs to evaluate if this pending
324 * event should cause an exit from L2 to L1 or be delivered
327 * Usually this would be handled by the processor noticing an
328 * IRQ/NMI window request. However, VMRUN can unblock interrupts
329 * by implicitly setting GIF, so force L0 to perform pending event
330 * evaluation by requesting a KVM_REQ_EVENT.
333 if (unlikely(evaluate_pending_interrupts))
334 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
336 mark_all_dirty(svm->vmcb);
339 int nested_svm_vmrun(struct vcpu_svm *svm)
342 struct vmcb *nested_vmcb;
343 struct vmcb *hsave = svm->nested.hsave;
344 struct vmcb *vmcb = svm->vmcb;
345 struct kvm_host_map map;
348 vmcb_gpa = svm->vmcb->save.rax;
350 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
351 if (ret == -EINVAL) {
352 kvm_inject_gp(&svm->vcpu, 0);
355 return kvm_skip_emulated_instruction(&svm->vcpu);
358 ret = kvm_skip_emulated_instruction(&svm->vcpu);
360 nested_vmcb = map.hva;
362 if (!nested_vmcb_checks(nested_vmcb)) {
363 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
364 nested_vmcb->control.exit_code_hi = 0;
365 nested_vmcb->control.exit_info_1 = 0;
366 nested_vmcb->control.exit_info_2 = 0;
368 kvm_vcpu_unmap(&svm->vcpu, &map, true);
373 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
374 nested_vmcb->save.rip,
375 nested_vmcb->control.int_ctl,
376 nested_vmcb->control.event_inj,
377 nested_vmcb->control.nested_ctl);
379 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
380 nested_vmcb->control.intercept_cr >> 16,
381 nested_vmcb->control.intercept_exceptions,
382 nested_vmcb->control.intercept);
384 /* Clear internal status */
385 kvm_clear_exception_queue(&svm->vcpu);
386 kvm_clear_interrupt_queue(&svm->vcpu);
389 * Save the old vmcb, so we don't need to pick what we save, but can
390 * restore everything when a VMEXIT occurs
392 hsave->save.es = vmcb->save.es;
393 hsave->save.cs = vmcb->save.cs;
394 hsave->save.ss = vmcb->save.ss;
395 hsave->save.ds = vmcb->save.ds;
396 hsave->save.gdtr = vmcb->save.gdtr;
397 hsave->save.idtr = vmcb->save.idtr;
398 hsave->save.efer = svm->vcpu.arch.efer;
399 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
400 hsave->save.cr4 = svm->vcpu.arch.cr4;
401 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
402 hsave->save.rip = kvm_rip_read(&svm->vcpu);
403 hsave->save.rsp = vmcb->save.rsp;
404 hsave->save.rax = vmcb->save.rax;
406 hsave->save.cr3 = vmcb->save.cr3;
408 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
410 copy_vmcb_control_area(hsave, vmcb);
412 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
414 if (!nested_svm_vmrun_msrpm(svm)) {
415 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
416 svm->vmcb->control.exit_code_hi = 0;
417 svm->vmcb->control.exit_info_1 = 0;
418 svm->vmcb->control.exit_info_2 = 0;
420 nested_svm_vmexit(svm);
426 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
428 to_vmcb->save.fs = from_vmcb->save.fs;
429 to_vmcb->save.gs = from_vmcb->save.gs;
430 to_vmcb->save.tr = from_vmcb->save.tr;
431 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
432 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
433 to_vmcb->save.star = from_vmcb->save.star;
434 to_vmcb->save.lstar = from_vmcb->save.lstar;
435 to_vmcb->save.cstar = from_vmcb->save.cstar;
436 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
437 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
438 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
439 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
442 int nested_svm_vmexit(struct vcpu_svm *svm)
445 struct vmcb *nested_vmcb;
446 struct vmcb *hsave = svm->nested.hsave;
447 struct vmcb *vmcb = svm->vmcb;
448 struct kvm_host_map map;
450 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
451 vmcb->control.exit_info_1,
452 vmcb->control.exit_info_2,
453 vmcb->control.exit_int_info,
454 vmcb->control.exit_int_info_err,
457 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
460 kvm_inject_gp(&svm->vcpu, 0);
464 nested_vmcb = map.hva;
466 /* Exit Guest-Mode */
467 leave_guest_mode(&svm->vcpu);
468 svm->nested.vmcb = 0;
470 /* Give the current vmcb to the guest */
473 nested_vmcb->save.es = vmcb->save.es;
474 nested_vmcb->save.cs = vmcb->save.cs;
475 nested_vmcb->save.ss = vmcb->save.ss;
476 nested_vmcb->save.ds = vmcb->save.ds;
477 nested_vmcb->save.gdtr = vmcb->save.gdtr;
478 nested_vmcb->save.idtr = vmcb->save.idtr;
479 nested_vmcb->save.efer = svm->vcpu.arch.efer;
480 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
481 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
482 nested_vmcb->save.cr2 = vmcb->save.cr2;
483 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
484 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
485 nested_vmcb->save.rip = vmcb->save.rip;
486 nested_vmcb->save.rsp = vmcb->save.rsp;
487 nested_vmcb->save.rax = vmcb->save.rax;
488 nested_vmcb->save.dr7 = vmcb->save.dr7;
489 nested_vmcb->save.dr6 = vmcb->save.dr6;
490 nested_vmcb->save.cpl = vmcb->save.cpl;
492 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
493 nested_vmcb->control.int_vector = vmcb->control.int_vector;
494 nested_vmcb->control.int_state = vmcb->control.int_state;
495 nested_vmcb->control.exit_code = vmcb->control.exit_code;
496 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
497 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
498 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
499 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
500 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
502 if (svm->nrips_enabled)
503 nested_vmcb->control.next_rip = vmcb->control.next_rip;
506 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
507 * to make sure that we do not lose injected events. So check event_inj
508 * here and copy it to exit_int_info if it is valid.
509 * Exit_int_info and event_inj can't be both valid because the case
510 * below only happens on a VMRUN instruction intercept which has
511 * no valid exit_int_info set.
513 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
514 struct vmcb_control_area *nc = &nested_vmcb->control;
516 nc->exit_int_info = vmcb->control.event_inj;
517 nc->exit_int_info_err = vmcb->control.event_inj_err;
520 nested_vmcb->control.tlb_ctl = 0;
521 nested_vmcb->control.event_inj = 0;
522 nested_vmcb->control.event_inj_err = 0;
524 nested_vmcb->control.pause_filter_count =
525 svm->vmcb->control.pause_filter_count;
526 nested_vmcb->control.pause_filter_thresh =
527 svm->vmcb->control.pause_filter_thresh;
529 /* We always set V_INTR_MASKING and remember the old value in hflags */
530 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
531 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
533 /* Restore the original control entries */
534 copy_vmcb_control_area(vmcb, hsave);
536 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
537 kvm_clear_exception_queue(&svm->vcpu);
538 kvm_clear_interrupt_queue(&svm->vcpu);
540 svm->nested.nested_cr3 = 0;
542 /* Restore selected save entries */
543 svm->vmcb->save.es = hsave->save.es;
544 svm->vmcb->save.cs = hsave->save.cs;
545 svm->vmcb->save.ss = hsave->save.ss;
546 svm->vmcb->save.ds = hsave->save.ds;
547 svm->vmcb->save.gdtr = hsave->save.gdtr;
548 svm->vmcb->save.idtr = hsave->save.idtr;
549 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
550 svm_set_efer(&svm->vcpu, hsave->save.efer);
551 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
552 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
554 svm->vmcb->save.cr3 = hsave->save.cr3;
555 svm->vcpu.arch.cr3 = hsave->save.cr3;
557 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
559 kvm_rax_write(&svm->vcpu, hsave->save.rax);
560 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
561 kvm_rip_write(&svm->vcpu, hsave->save.rip);
562 svm->vmcb->save.dr7 = 0;
563 svm->vmcb->save.cpl = 0;
564 svm->vmcb->control.exit_int_info = 0;
566 mark_all_dirty(svm->vmcb);
568 kvm_vcpu_unmap(&svm->vcpu, &map, true);
570 nested_svm_uninit_mmu_context(&svm->vcpu);
571 kvm_mmu_reset_context(&svm->vcpu);
572 kvm_mmu_load(&svm->vcpu);
575 * Drop what we picked up for L2 via svm_complete_interrupts() so it
576 * doesn't end up in L1.
578 svm->vcpu.arch.nmi_injected = false;
579 kvm_clear_exception_queue(&svm->vcpu);
580 kvm_clear_interrupt_queue(&svm->vcpu);
585 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
587 u32 offset, msr, value;
590 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
591 return NESTED_EXIT_HOST;
593 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
594 offset = svm_msrpm_offset(msr);
595 write = svm->vmcb->control.exit_info_1 & 1;
596 mask = 1 << ((2 * (msr & 0xf)) + write);
598 if (offset == MSR_INVALID)
599 return NESTED_EXIT_DONE;
601 /* Offset is in 32 bit units but need in 8 bit units */
604 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
605 return NESTED_EXIT_DONE;
607 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
610 /* DB exceptions for our internal use must not cause vmexit */
611 static int nested_svm_intercept_db(struct vcpu_svm *svm)
615 /* if we're not singlestepping, it's not ours */
616 if (!svm->nmi_singlestep)
617 return NESTED_EXIT_DONE;
619 /* if it's not a singlestep exception, it's not ours */
620 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
621 return NESTED_EXIT_DONE;
623 return NESTED_EXIT_DONE;
625 /* if the guest is singlestepping, it should get the vmexit */
626 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
627 disable_nmi_singlestep(svm);
628 return NESTED_EXIT_DONE;
631 /* it's ours, the nested hypervisor must not see this one */
632 return NESTED_EXIT_HOST;
635 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
637 unsigned port, size, iopm_len;
642 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
643 return NESTED_EXIT_HOST;
645 port = svm->vmcb->control.exit_info_1 >> 16;
646 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
648 gpa = svm->nested.vmcb_iopm + (port / 8);
649 start_bit = port % 8;
650 iopm_len = (start_bit + size > 8) ? 2 : 1;
651 mask = (0xf >> (4 - size)) << start_bit;
654 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
655 return NESTED_EXIT_DONE;
657 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
660 static int nested_svm_intercept(struct vcpu_svm *svm)
662 u32 exit_code = svm->vmcb->control.exit_code;
663 int vmexit = NESTED_EXIT_HOST;
667 vmexit = nested_svm_exit_handled_msr(svm);
670 vmexit = nested_svm_intercept_ioio(svm);
672 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
673 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
674 if (svm->nested.intercept_cr & bit)
675 vmexit = NESTED_EXIT_DONE;
678 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
679 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
680 if (svm->nested.intercept_dr & bit)
681 vmexit = NESTED_EXIT_DONE;
684 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
685 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
686 if (svm->nested.intercept_exceptions & excp_bits) {
687 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
688 vmexit = nested_svm_intercept_db(svm);
690 vmexit = NESTED_EXIT_DONE;
692 /* async page fault always cause vmexit */
693 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
694 svm->vcpu.arch.exception.nested_apf != 0)
695 vmexit = NESTED_EXIT_DONE;
699 vmexit = NESTED_EXIT_DONE;
703 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
704 if (svm->nested.intercept & exit_bits)
705 vmexit = NESTED_EXIT_DONE;
712 int nested_svm_exit_handled(struct vcpu_svm *svm)
716 vmexit = nested_svm_intercept(svm);
718 if (vmexit == NESTED_EXIT_DONE)
719 nested_svm_vmexit(svm);
724 int nested_svm_check_permissions(struct vcpu_svm *svm)
726 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
727 !is_paging(&svm->vcpu)) {
728 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
732 if (svm->vmcb->save.cpl) {
733 kvm_inject_gp(&svm->vcpu, 0);
740 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
741 bool has_error_code, u32 error_code)
745 if (!is_guest_mode(&svm->vcpu))
748 vmexit = nested_svm_intercept(svm);
749 if (vmexit != NESTED_EXIT_DONE)
752 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
753 svm->vmcb->control.exit_code_hi = 0;
754 svm->vmcb->control.exit_info_1 = error_code;
757 * EXITINFO2 is undefined for all exception intercepts other
760 if (svm->vcpu.arch.exception.nested_apf)
761 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
762 else if (svm->vcpu.arch.exception.has_payload)
763 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
765 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
767 svm->nested.exit_required = true;
771 static void nested_svm_intr(struct vcpu_svm *svm)
773 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
774 svm->vmcb->control.exit_info_1 = 0;
775 svm->vmcb->control.exit_info_2 = 0;
777 /* nested_svm_vmexit this gets called afterwards from handle_exit */
778 svm->nested.exit_required = true;
779 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
782 static bool nested_exit_on_intr(struct vcpu_svm *svm)
784 return (svm->nested.intercept & 1ULL);
787 int svm_check_nested_events(struct kvm_vcpu *vcpu)
789 struct vcpu_svm *svm = to_svm(vcpu);
790 bool block_nested_events =
791 kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
793 if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
794 if (block_nested_events)
796 nested_svm_intr(svm);
803 int nested_svm_exit_special(struct vcpu_svm *svm)
805 u32 exit_code = svm->vmcb->control.exit_code;
810 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
811 return NESTED_EXIT_HOST;
813 /* For now we are always handling NPFs when using them */
815 return NESTED_EXIT_HOST;
817 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
818 /* When we're shadowing, trap PFs, but not async PF */
819 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
820 return NESTED_EXIT_HOST;
826 return NESTED_EXIT_CONTINUE;