1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
16 static bool __read_mostly enable_shadow_vmcs = 1;
17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
19 static bool __read_mostly nested_early_check = 0;
20 module_param(nested_early_check, bool, S_IRUGO);
23 * Hyper-V requires all of these, so mark them as supported even though
24 * they are just treated the same as all-context.
26 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
39 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
41 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
42 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
44 struct shadow_vmcs_field {
48 static struct shadow_vmcs_field shadow_read_only_fields[] = {
49 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
50 #include "vmcs_shadow_fields.h"
52 static int max_shadow_read_only_fields =
53 ARRAY_SIZE(shadow_read_only_fields);
55 static struct shadow_vmcs_field shadow_read_write_fields[] = {
56 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
57 #include "vmcs_shadow_fields.h"
59 static int max_shadow_read_write_fields =
60 ARRAY_SIZE(shadow_read_write_fields);
62 static void init_vmcs_shadow_fields(void)
66 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
67 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
69 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
70 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
71 u16 field = entry.encoding;
73 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
74 (i + 1 == max_shadow_read_only_fields ||
75 shadow_read_only_fields[i + 1].encoding != field + 1))
76 pr_err("Missing field from shadow_read_only_field %x\n",
79 clear_bit(field, vmx_vmread_bitmap);
84 entry.offset += sizeof(u32);
86 shadow_read_only_fields[j++] = entry;
88 max_shadow_read_only_fields = j;
90 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
91 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
92 u16 field = entry.encoding;
94 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
95 (i + 1 == max_shadow_read_write_fields ||
96 shadow_read_write_fields[i + 1].encoding != field + 1))
97 pr_err("Missing field from shadow_read_write_field %x\n",
100 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
101 field <= GUEST_TR_AR_BYTES,
102 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
105 * PML and the preemption timer can be emulated, but the
106 * processor cannot vmwrite to fields that don't exist
110 case GUEST_PML_INDEX:
111 if (!cpu_has_vmx_pml())
114 case VMX_PREEMPTION_TIMER_VALUE:
115 if (!cpu_has_vmx_preemption_timer())
118 case GUEST_INTR_STATUS:
119 if (!cpu_has_vmx_apicv())
126 clear_bit(field, vmx_vmwrite_bitmap);
127 clear_bit(field, vmx_vmread_bitmap);
132 entry.offset += sizeof(u32);
134 shadow_read_write_fields[j++] = entry;
136 max_shadow_read_write_fields = j;
140 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
141 * set the success or error code of an emulated VMX instruction (as specified
142 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
145 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
147 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
148 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
149 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
150 return kvm_skip_emulated_instruction(vcpu);
153 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
155 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
156 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
157 X86_EFLAGS_SF | X86_EFLAGS_OF))
159 return kvm_skip_emulated_instruction(vcpu);
162 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
163 u32 vm_instruction_error)
165 struct vcpu_vmx *vmx = to_vmx(vcpu);
168 * failValid writes the error number to the current VMCS, which
169 * can't be done if there isn't a current VMCS.
171 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
172 return nested_vmx_failInvalid(vcpu);
174 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
175 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
176 X86_EFLAGS_SF | X86_EFLAGS_OF))
178 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
180 * We don't need to force a shadow sync because
181 * VM_INSTRUCTION_ERROR is not shadowed
183 return kvm_skip_emulated_instruction(vcpu);
186 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
188 /* TODO: not to reset guest simply here. */
189 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
190 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
193 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
195 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
196 vmcs_write64(VMCS_LINK_POINTER, -1ull);
197 vmx->nested.need_vmcs12_to_shadow_sync = false;
200 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
202 struct vcpu_vmx *vmx = to_vmx(vcpu);
204 if (!vmx->nested.hv_evmcs)
207 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
208 vmx->nested.hv_evmcs_vmptr = -1ull;
209 vmx->nested.hv_evmcs = NULL;
213 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
214 * just stops using VMX.
216 static void free_nested(struct kvm_vcpu *vcpu)
218 struct vcpu_vmx *vmx = to_vmx(vcpu);
220 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
223 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
225 vmx->nested.vmxon = false;
226 vmx->nested.smm.vmxon = false;
227 free_vpid(vmx->nested.vpid02);
228 vmx->nested.posted_intr_nv = -1;
229 vmx->nested.current_vmptr = -1ull;
230 if (enable_shadow_vmcs) {
231 vmx_disable_shadow_vmcs(vmx);
232 vmcs_clear(vmx->vmcs01.shadow_vmcs);
233 free_vmcs(vmx->vmcs01.shadow_vmcs);
234 vmx->vmcs01.shadow_vmcs = NULL;
236 kfree(vmx->nested.cached_vmcs12);
237 vmx->nested.cached_vmcs12 = NULL;
238 kfree(vmx->nested.cached_shadow_vmcs12);
239 vmx->nested.cached_shadow_vmcs12 = NULL;
240 /* Unpin physical memory we referred to in the vmcs02 */
241 if (vmx->nested.apic_access_page) {
242 kvm_release_page_dirty(vmx->nested.apic_access_page);
243 vmx->nested.apic_access_page = NULL;
245 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
246 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
247 vmx->nested.pi_desc = NULL;
249 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
251 nested_release_evmcs(vcpu);
253 free_loaded_vmcs(&vmx->nested.vmcs02);
256 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
257 struct loaded_vmcs *prev)
259 struct vmcs_host_state *dest, *src;
261 if (unlikely(!vmx->guest_state_loaded))
264 src = &prev->host_state;
265 dest = &vmx->loaded_vmcs->host_state;
267 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
268 dest->ldt_sel = src->ldt_sel;
270 dest->ds_sel = src->ds_sel;
271 dest->es_sel = src->es_sel;
275 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
277 struct vcpu_vmx *vmx = to_vmx(vcpu);
278 struct loaded_vmcs *prev;
281 if (vmx->loaded_vmcs == vmcs)
285 prev = vmx->loaded_vmcs;
286 vmx->loaded_vmcs = vmcs;
287 vmx_vcpu_load_vmcs(vcpu, cpu);
288 vmx_sync_vmcs_host_state(vmx, prev);
291 vmx_segment_cache_clear(vmx);
295 * Ensure that the current vmcs of the logical processor is the
296 * vmcs01 of the vcpu before calling free_nested().
298 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
301 vmx_leave_nested(vcpu);
302 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
307 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
308 struct x86_exception *fault)
310 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
311 struct vcpu_vmx *vmx = to_vmx(vcpu);
313 unsigned long exit_qualification = vcpu->arch.exit_qualification;
315 if (vmx->nested.pml_full) {
316 exit_reason = EXIT_REASON_PML_FULL;
317 vmx->nested.pml_full = false;
318 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
319 } else if (fault->error_code & PFERR_RSVD_MASK)
320 exit_reason = EXIT_REASON_EPT_MISCONFIG;
322 exit_reason = EXIT_REASON_EPT_VIOLATION;
324 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
325 vmcs12->guest_physical_address = fault->address;
328 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
330 WARN_ON(mmu_is_nested(vcpu));
332 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
333 kvm_init_shadow_ept_mmu(vcpu,
334 to_vmx(vcpu)->nested.msrs.ept_caps &
335 VMX_EPT_EXECUTE_ONLY_BIT,
336 nested_ept_ad_enabled(vcpu),
337 nested_ept_get_cr3(vcpu));
338 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
339 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
340 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
341 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
343 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
346 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
348 vcpu->arch.mmu = &vcpu->arch.root_mmu;
349 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
352 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
355 bool inequality, bit;
357 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
359 (error_code & vmcs12->page_fault_error_code_mask) !=
360 vmcs12->page_fault_error_code_match;
361 return inequality ^ bit;
366 * KVM wants to inject page-faults which it got to the guest. This function
367 * checks whether in a nested guest, we need to inject them to L1 or L2.
369 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
371 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
372 unsigned int nr = vcpu->arch.exception.nr;
373 bool has_payload = vcpu->arch.exception.has_payload;
374 unsigned long payload = vcpu->arch.exception.payload;
376 if (nr == PF_VECTOR) {
377 if (vcpu->arch.exception.nested_apf) {
378 *exit_qual = vcpu->arch.apf.nested_apf_token;
381 if (nested_vmx_is_page_fault_vmexit(vmcs12,
382 vcpu->arch.exception.error_code)) {
383 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
386 } else if (vmcs12->exception_bitmap & (1u << nr)) {
387 if (nr == DB_VECTOR) {
389 payload = vcpu->arch.dr6;
390 payload &= ~(DR6_FIXED_1 | DR6_BT);
393 *exit_qual = payload;
403 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
404 struct x86_exception *fault)
406 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
408 WARN_ON(!is_guest_mode(vcpu));
410 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
411 !to_vmx(vcpu)->nested.nested_run_pending) {
412 vmcs12->vm_exit_intr_error_code = fault->error_code;
413 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
414 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
415 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
418 kvm_inject_page_fault(vcpu, fault);
422 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
424 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
427 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
428 struct vmcs12 *vmcs12)
430 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
433 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
434 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
440 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
441 struct vmcs12 *vmcs12)
443 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
446 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
452 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
453 struct vmcs12 *vmcs12)
455 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
458 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
465 * Check if MSR is intercepted for L01 MSR bitmap.
467 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
469 unsigned long *msr_bitmap;
470 int f = sizeof(unsigned long);
472 if (!cpu_has_vmx_msr_bitmap())
475 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
478 return !!test_bit(msr, msr_bitmap + 0x800 / f);
479 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
481 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
488 * If a msr is allowed by L0, we should check whether it is allowed by L1.
489 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
491 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
492 unsigned long *msr_bitmap_nested,
495 int f = sizeof(unsigned long);
498 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
499 * have the write-low and read-high bitmap offsets the wrong way round.
500 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
503 if (type & MSR_TYPE_R &&
504 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
506 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
508 if (type & MSR_TYPE_W &&
509 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
511 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
513 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
515 if (type & MSR_TYPE_R &&
516 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
518 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
520 if (type & MSR_TYPE_W &&
521 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
523 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
528 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
531 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
532 unsigned word = msr / BITS_PER_LONG;
534 msr_bitmap[word] = ~0;
535 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
540 * Merge L0's and L1's MSR bitmap, return false to indicate that
541 * we do not use the hardware.
543 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
544 struct vmcs12 *vmcs12)
547 unsigned long *msr_bitmap_l1;
548 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
549 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
551 /* Nothing to do if the MSR bitmap is not in use. */
552 if (!cpu_has_vmx_msr_bitmap() ||
553 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
556 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
559 msr_bitmap_l1 = (unsigned long *)map->hva;
562 * To keep the control flow simple, pay eight 8-byte writes (sixteen
563 * 4-byte writes on 32-bit systems) up front to enable intercepts for
564 * the x2APIC MSR range and selectively disable them below.
566 enable_x2apic_msr_intercepts(msr_bitmap_l0);
568 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
569 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
571 * L0 need not intercept reads for MSRs between 0x800
572 * and 0x8ff, it just lets the processor take the value
573 * from the virtual-APIC page; take those 256 bits
574 * directly from the L1 bitmap.
576 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
577 unsigned word = msr / BITS_PER_LONG;
579 msr_bitmap_l0[word] = msr_bitmap_l1[word];
583 nested_vmx_disable_intercept_for_msr(
584 msr_bitmap_l1, msr_bitmap_l0,
585 X2APIC_MSR(APIC_TASKPRI),
586 MSR_TYPE_R | MSR_TYPE_W);
588 if (nested_cpu_has_vid(vmcs12)) {
589 nested_vmx_disable_intercept_for_msr(
590 msr_bitmap_l1, msr_bitmap_l0,
591 X2APIC_MSR(APIC_EOI),
593 nested_vmx_disable_intercept_for_msr(
594 msr_bitmap_l1, msr_bitmap_l0,
595 X2APIC_MSR(APIC_SELF_IPI),
600 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
601 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
602 MSR_FS_BASE, MSR_TYPE_RW);
604 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
605 MSR_GS_BASE, MSR_TYPE_RW);
607 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
608 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
611 * Checking the L0->L1 bitmap is trying to verify two things:
613 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
614 * ensures that we do not accidentally generate an L02 MSR bitmap
615 * from the L12 MSR bitmap that is too permissive.
616 * 2. That L1 or L2s have actually used the MSR. This avoids
617 * unnecessarily merging of the bitmap if the MSR is unused. This
618 * works properly because we only update the L01 MSR bitmap lazily.
619 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
620 * updated to reflect this when L1 (or its L2s) actually write to
623 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
624 nested_vmx_disable_intercept_for_msr(
625 msr_bitmap_l1, msr_bitmap_l0,
627 MSR_TYPE_R | MSR_TYPE_W);
629 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
630 nested_vmx_disable_intercept_for_msr(
631 msr_bitmap_l1, msr_bitmap_l0,
635 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
640 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
641 struct vmcs12 *vmcs12)
643 struct kvm_host_map map;
644 struct vmcs12 *shadow;
646 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
647 vmcs12->vmcs_link_pointer == -1ull)
650 shadow = get_shadow_vmcs12(vcpu);
652 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
655 memcpy(shadow, map.hva, VMCS12_SIZE);
656 kvm_vcpu_unmap(vcpu, &map, false);
659 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
660 struct vmcs12 *vmcs12)
662 struct vcpu_vmx *vmx = to_vmx(vcpu);
664 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
665 vmcs12->vmcs_link_pointer == -1ull)
668 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
669 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
673 * In nested virtualization, check if L1 has set
674 * VM_EXIT_ACK_INTR_ON_EXIT
676 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
678 return get_vmcs12(vcpu)->vm_exit_controls &
679 VM_EXIT_ACK_INTR_ON_EXIT;
682 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
684 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
687 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
688 struct vmcs12 *vmcs12)
690 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
691 !page_address_valid(vcpu, vmcs12->apic_access_addr))
697 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
698 struct vmcs12 *vmcs12)
700 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
701 !nested_cpu_has_apic_reg_virt(vmcs12) &&
702 !nested_cpu_has_vid(vmcs12) &&
703 !nested_cpu_has_posted_intr(vmcs12))
707 * If virtualize x2apic mode is enabled,
708 * virtualize apic access must be disabled.
710 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
711 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
715 * If virtual interrupt delivery is enabled,
716 * we must exit on external interrupts.
718 if (nested_cpu_has_vid(vmcs12) &&
719 !nested_exit_on_intr(vcpu))
723 * bits 15:8 should be zero in posted_intr_nv,
724 * the descriptor address has been already checked
725 * in nested_get_vmcs12_pages.
727 * bits 5:0 of posted_intr_desc_addr should be zero.
729 if (nested_cpu_has_posted_intr(vmcs12) &&
730 (!nested_cpu_has_vid(vmcs12) ||
731 !nested_exit_intr_ack_set(vcpu) ||
732 (vmcs12->posted_intr_nv & 0xff00) ||
733 (vmcs12->posted_intr_desc_addr & 0x3f) ||
734 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
737 /* tpr shadow is needed by all apicv features. */
738 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
744 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
751 maxphyaddr = cpuid_maxphyaddr(vcpu);
752 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
753 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
759 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
760 struct vmcs12 *vmcs12)
762 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
763 vmcs12->vm_exit_msr_load_addr) ||
764 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
765 vmcs12->vm_exit_msr_store_addr))
771 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
772 struct vmcs12 *vmcs12)
774 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
775 vmcs12->vm_entry_msr_load_addr))
781 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
782 struct vmcs12 *vmcs12)
784 if (!nested_cpu_has_pml(vmcs12))
787 if (!nested_cpu_has_ept(vmcs12) ||
788 !page_address_valid(vcpu, vmcs12->pml_address))
794 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
795 struct vmcs12 *vmcs12)
797 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
798 !nested_cpu_has_ept(vmcs12))
803 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
804 struct vmcs12 *vmcs12)
806 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
807 !nested_cpu_has_ept(vmcs12))
812 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
813 struct vmcs12 *vmcs12)
815 if (!nested_cpu_has_shadow_vmcs(vmcs12))
818 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
819 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
825 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
826 struct vmx_msr_entry *e)
828 /* x2APIC MSR accesses are not allowed */
829 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
831 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
832 e->index == MSR_IA32_UCODE_REV)
834 if (e->reserved != 0)
839 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
840 struct vmx_msr_entry *e)
842 if (e->index == MSR_FS_BASE ||
843 e->index == MSR_GS_BASE ||
844 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
845 nested_vmx_msr_check_common(vcpu, e))
850 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
851 struct vmx_msr_entry *e)
853 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
854 nested_vmx_msr_check_common(vcpu, e))
860 * Load guest's/host's msr at nested entry/exit.
861 * return 0 for success, entry index for failure.
863 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
866 struct vmx_msr_entry e;
869 msr.host_initiated = false;
870 for (i = 0; i < count; i++) {
871 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
873 pr_debug_ratelimited(
874 "%s cannot read MSR entry (%u, 0x%08llx)\n",
875 __func__, i, gpa + i * sizeof(e));
878 if (nested_vmx_load_msr_check(vcpu, &e)) {
879 pr_debug_ratelimited(
880 "%s check failed (%u, 0x%x, 0x%x)\n",
881 __func__, i, e.index, e.reserved);
886 if (kvm_set_msr(vcpu, &msr)) {
887 pr_debug_ratelimited(
888 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
889 __func__, i, e.index, e.value);
898 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
901 struct vmx_msr_entry e;
903 for (i = 0; i < count; i++) {
904 struct msr_data msr_info;
905 if (kvm_vcpu_read_guest(vcpu,
907 &e, 2 * sizeof(u32))) {
908 pr_debug_ratelimited(
909 "%s cannot read MSR entry (%u, 0x%08llx)\n",
910 __func__, i, gpa + i * sizeof(e));
913 if (nested_vmx_store_msr_check(vcpu, &e)) {
914 pr_debug_ratelimited(
915 "%s check failed (%u, 0x%x, 0x%x)\n",
916 __func__, i, e.index, e.reserved);
919 msr_info.host_initiated = false;
920 msr_info.index = e.index;
921 if (kvm_get_msr(vcpu, &msr_info)) {
922 pr_debug_ratelimited(
923 "%s cannot read MSR (%u, 0x%x)\n",
924 __func__, i, e.index);
927 if (kvm_vcpu_write_guest(vcpu,
928 gpa + i * sizeof(e) +
929 offsetof(struct vmx_msr_entry, value),
930 &msr_info.data, sizeof(msr_info.data))) {
931 pr_debug_ratelimited(
932 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
933 __func__, i, e.index, msr_info.data);
940 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
942 unsigned long invalid_mask;
944 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
945 return (val & invalid_mask) == 0;
949 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
950 * emulating VM entry into a guest with EPT enabled.
951 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
952 * is assigned to entry_failure_code on failure.
954 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
955 u32 *entry_failure_code)
957 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
958 if (!nested_cr3_valid(vcpu, cr3)) {
959 *entry_failure_code = ENTRY_FAIL_DEFAULT;
964 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
965 * must not be dereferenced.
967 if (is_pae_paging(vcpu) && !nested_ept) {
968 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
969 *entry_failure_code = ENTRY_FAIL_PDPTE;
976 kvm_mmu_new_cr3(vcpu, cr3, false);
978 vcpu->arch.cr3 = cr3;
979 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
981 kvm_init_mmu(vcpu, false);
987 * Returns if KVM is able to config CPU to tag TLB entries
988 * populated by L2 differently than TLB entries populated
991 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
993 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
994 * with different VPID (L1 entries are tagged with vmx->vpid
995 * while L2 entries are tagged with vmx->nested.vpid02).
997 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
999 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1001 return nested_cpu_has_ept(vmcs12) ||
1002 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1005 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
1007 struct vcpu_vmx *vmx = to_vmx(vcpu);
1009 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
1013 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
1015 return fixed_bits_valid(control, low, high);
1018 static inline u64 vmx_control_msr(u32 low, u32 high)
1020 return low | ((u64)high << 32);
1023 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1028 return (superset | subset) == superset;
1031 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1033 const u64 feature_and_reserved =
1034 /* feature (except bit 48; see below) */
1035 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1037 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1038 u64 vmx_basic = vmx->nested.msrs.basic;
1040 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1044 * KVM does not emulate a version of VMX that constrains physical
1045 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1047 if (data & BIT_ULL(48))
1050 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1051 vmx_basic_vmcs_revision_id(data))
1054 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1057 vmx->nested.msrs.basic = data;
1062 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1067 switch (msr_index) {
1068 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1069 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1070 highp = &vmx->nested.msrs.pinbased_ctls_high;
1072 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1073 lowp = &vmx->nested.msrs.procbased_ctls_low;
1074 highp = &vmx->nested.msrs.procbased_ctls_high;
1076 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1077 lowp = &vmx->nested.msrs.exit_ctls_low;
1078 highp = &vmx->nested.msrs.exit_ctls_high;
1080 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1081 lowp = &vmx->nested.msrs.entry_ctls_low;
1082 highp = &vmx->nested.msrs.entry_ctls_high;
1084 case MSR_IA32_VMX_PROCBASED_CTLS2:
1085 lowp = &vmx->nested.msrs.secondary_ctls_low;
1086 highp = &vmx->nested.msrs.secondary_ctls_high;
1092 supported = vmx_control_msr(*lowp, *highp);
1094 /* Check must-be-1 bits are still 1. */
1095 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1098 /* Check must-be-0 bits are still 0. */
1099 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1103 *highp = data >> 32;
1107 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1109 const u64 feature_and_reserved_bits =
1111 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1112 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1114 GENMASK_ULL(13, 9) | BIT_ULL(31);
1117 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1118 vmx->nested.msrs.misc_high);
1120 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1123 if ((vmx->nested.msrs.pinbased_ctls_high &
1124 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1125 vmx_misc_preemption_timer_rate(data) !=
1126 vmx_misc_preemption_timer_rate(vmx_misc))
1129 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1132 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1135 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1138 vmx->nested.msrs.misc_low = data;
1139 vmx->nested.msrs.misc_high = data >> 32;
1144 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1146 u64 vmx_ept_vpid_cap;
1148 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1149 vmx->nested.msrs.vpid_caps);
1151 /* Every bit is either reserved or a feature bit. */
1152 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1155 vmx->nested.msrs.ept_caps = data;
1156 vmx->nested.msrs.vpid_caps = data >> 32;
1160 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1164 switch (msr_index) {
1165 case MSR_IA32_VMX_CR0_FIXED0:
1166 msr = &vmx->nested.msrs.cr0_fixed0;
1168 case MSR_IA32_VMX_CR4_FIXED0:
1169 msr = &vmx->nested.msrs.cr4_fixed0;
1176 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1177 * must be 1 in the restored value.
1179 if (!is_bitwise_subset(data, *msr, -1ULL))
1187 * Called when userspace is restoring VMX MSRs.
1189 * Returns 0 on success, non-0 otherwise.
1191 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1193 struct vcpu_vmx *vmx = to_vmx(vcpu);
1196 * Don't allow changes to the VMX capability MSRs while the vCPU
1197 * is in VMX operation.
1199 if (vmx->nested.vmxon)
1202 switch (msr_index) {
1203 case MSR_IA32_VMX_BASIC:
1204 return vmx_restore_vmx_basic(vmx, data);
1205 case MSR_IA32_VMX_PINBASED_CTLS:
1206 case MSR_IA32_VMX_PROCBASED_CTLS:
1207 case MSR_IA32_VMX_EXIT_CTLS:
1208 case MSR_IA32_VMX_ENTRY_CTLS:
1210 * The "non-true" VMX capability MSRs are generated from the
1211 * "true" MSRs, so we do not support restoring them directly.
1213 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1214 * should restore the "true" MSRs with the must-be-1 bits
1215 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1216 * DEFAULT SETTINGS".
1219 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1220 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1221 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1222 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1223 case MSR_IA32_VMX_PROCBASED_CTLS2:
1224 return vmx_restore_control_msr(vmx, msr_index, data);
1225 case MSR_IA32_VMX_MISC:
1226 return vmx_restore_vmx_misc(vmx, data);
1227 case MSR_IA32_VMX_CR0_FIXED0:
1228 case MSR_IA32_VMX_CR4_FIXED0:
1229 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1230 case MSR_IA32_VMX_CR0_FIXED1:
1231 case MSR_IA32_VMX_CR4_FIXED1:
1233 * These MSRs are generated based on the vCPU's CPUID, so we
1234 * do not support restoring them directly.
1237 case MSR_IA32_VMX_EPT_VPID_CAP:
1238 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1239 case MSR_IA32_VMX_VMCS_ENUM:
1240 vmx->nested.msrs.vmcs_enum = data;
1242 case MSR_IA32_VMX_VMFUNC:
1243 if (data & ~vmx->nested.msrs.vmfunc_controls)
1245 vmx->nested.msrs.vmfunc_controls = data;
1249 * The rest of the VMX capability MSRs do not support restore.
1255 /* Returns 0 on success, non-0 otherwise. */
1256 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1258 switch (msr_index) {
1259 case MSR_IA32_VMX_BASIC:
1260 *pdata = msrs->basic;
1262 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1263 case MSR_IA32_VMX_PINBASED_CTLS:
1264 *pdata = vmx_control_msr(
1265 msrs->pinbased_ctls_low,
1266 msrs->pinbased_ctls_high);
1267 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1268 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1270 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1271 case MSR_IA32_VMX_PROCBASED_CTLS:
1272 *pdata = vmx_control_msr(
1273 msrs->procbased_ctls_low,
1274 msrs->procbased_ctls_high);
1275 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1276 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1278 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1279 case MSR_IA32_VMX_EXIT_CTLS:
1280 *pdata = vmx_control_msr(
1281 msrs->exit_ctls_low,
1282 msrs->exit_ctls_high);
1283 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1284 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1286 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1287 case MSR_IA32_VMX_ENTRY_CTLS:
1288 *pdata = vmx_control_msr(
1289 msrs->entry_ctls_low,
1290 msrs->entry_ctls_high);
1291 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1292 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1294 case MSR_IA32_VMX_MISC:
1295 *pdata = vmx_control_msr(
1299 case MSR_IA32_VMX_CR0_FIXED0:
1300 *pdata = msrs->cr0_fixed0;
1302 case MSR_IA32_VMX_CR0_FIXED1:
1303 *pdata = msrs->cr0_fixed1;
1305 case MSR_IA32_VMX_CR4_FIXED0:
1306 *pdata = msrs->cr4_fixed0;
1308 case MSR_IA32_VMX_CR4_FIXED1:
1309 *pdata = msrs->cr4_fixed1;
1311 case MSR_IA32_VMX_VMCS_ENUM:
1312 *pdata = msrs->vmcs_enum;
1314 case MSR_IA32_VMX_PROCBASED_CTLS2:
1315 *pdata = vmx_control_msr(
1316 msrs->secondary_ctls_low,
1317 msrs->secondary_ctls_high);
1319 case MSR_IA32_VMX_EPT_VPID_CAP:
1320 *pdata = msrs->ept_caps |
1321 ((u64)msrs->vpid_caps << 32);
1323 case MSR_IA32_VMX_VMFUNC:
1324 *pdata = msrs->vmfunc_controls;
1334 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1335 * been modified by the L1 guest. Note, "writable" in this context means
1336 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1337 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1338 * VM-exit information fields (which are actually writable if the vCPU is
1339 * configured to support "VMWRITE to any supported field in the VMCS").
1341 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1343 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1344 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1345 struct shadow_vmcs_field field;
1349 if (WARN_ON(!shadow_vmcs))
1354 vmcs_load(shadow_vmcs);
1356 for (i = 0; i < max_shadow_read_write_fields; i++) {
1357 field = shadow_read_write_fields[i];
1358 val = __vmcs_readl(field.encoding);
1359 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1362 vmcs_clear(shadow_vmcs);
1363 vmcs_load(vmx->loaded_vmcs->vmcs);
1368 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1370 const struct shadow_vmcs_field *fields[] = {
1371 shadow_read_write_fields,
1372 shadow_read_only_fields
1374 const int max_fields[] = {
1375 max_shadow_read_write_fields,
1376 max_shadow_read_only_fields
1378 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1379 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1380 struct shadow_vmcs_field field;
1384 if (WARN_ON(!shadow_vmcs))
1387 vmcs_load(shadow_vmcs);
1389 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1390 for (i = 0; i < max_fields[q]; i++) {
1391 field = fields[q][i];
1392 val = vmcs12_read_any(vmcs12, field.encoding,
1394 __vmcs_writel(field.encoding, val);
1398 vmcs_clear(shadow_vmcs);
1399 vmcs_load(vmx->loaded_vmcs->vmcs);
1402 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1404 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1405 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1407 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1408 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1409 vmcs12->guest_rip = evmcs->guest_rip;
1411 if (unlikely(!(evmcs->hv_clean_fields &
1412 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1413 vmcs12->guest_rsp = evmcs->guest_rsp;
1414 vmcs12->guest_rflags = evmcs->guest_rflags;
1415 vmcs12->guest_interruptibility_info =
1416 evmcs->guest_interruptibility_info;
1419 if (unlikely(!(evmcs->hv_clean_fields &
1420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1421 vmcs12->cpu_based_vm_exec_control =
1422 evmcs->cpu_based_vm_exec_control;
1425 if (unlikely(!(evmcs->hv_clean_fields &
1426 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1427 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1430 if (unlikely(!(evmcs->hv_clean_fields &
1431 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1432 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1435 if (unlikely(!(evmcs->hv_clean_fields &
1436 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1437 vmcs12->vm_entry_intr_info_field =
1438 evmcs->vm_entry_intr_info_field;
1439 vmcs12->vm_entry_exception_error_code =
1440 evmcs->vm_entry_exception_error_code;
1441 vmcs12->vm_entry_instruction_len =
1442 evmcs->vm_entry_instruction_len;
1445 if (unlikely(!(evmcs->hv_clean_fields &
1446 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1447 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1448 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1449 vmcs12->host_cr0 = evmcs->host_cr0;
1450 vmcs12->host_cr3 = evmcs->host_cr3;
1451 vmcs12->host_cr4 = evmcs->host_cr4;
1452 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1453 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1454 vmcs12->host_rip = evmcs->host_rip;
1455 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1456 vmcs12->host_es_selector = evmcs->host_es_selector;
1457 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1458 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1459 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1460 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1461 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1462 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1465 if (unlikely(!(evmcs->hv_clean_fields &
1466 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1467 vmcs12->pin_based_vm_exec_control =
1468 evmcs->pin_based_vm_exec_control;
1469 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1470 vmcs12->secondary_vm_exec_control =
1471 evmcs->secondary_vm_exec_control;
1474 if (unlikely(!(evmcs->hv_clean_fields &
1475 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1476 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1477 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1480 if (unlikely(!(evmcs->hv_clean_fields &
1481 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1482 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1485 if (unlikely(!(evmcs->hv_clean_fields &
1486 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1487 vmcs12->guest_es_base = evmcs->guest_es_base;
1488 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1489 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1490 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1491 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1492 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1493 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1494 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1495 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1496 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1497 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1498 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1499 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1500 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1501 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1502 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1503 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1504 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1505 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1506 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1507 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1508 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1509 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1510 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1511 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1512 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1513 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1514 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1515 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1516 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1517 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1518 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1519 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1520 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1521 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1522 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1525 if (unlikely(!(evmcs->hv_clean_fields &
1526 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1527 vmcs12->tsc_offset = evmcs->tsc_offset;
1528 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1529 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1532 if (unlikely(!(evmcs->hv_clean_fields &
1533 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1534 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1535 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1536 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1537 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1538 vmcs12->guest_cr0 = evmcs->guest_cr0;
1539 vmcs12->guest_cr3 = evmcs->guest_cr3;
1540 vmcs12->guest_cr4 = evmcs->guest_cr4;
1541 vmcs12->guest_dr7 = evmcs->guest_dr7;
1544 if (unlikely(!(evmcs->hv_clean_fields &
1545 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1546 vmcs12->host_fs_base = evmcs->host_fs_base;
1547 vmcs12->host_gs_base = evmcs->host_gs_base;
1548 vmcs12->host_tr_base = evmcs->host_tr_base;
1549 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1550 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1551 vmcs12->host_rsp = evmcs->host_rsp;
1554 if (unlikely(!(evmcs->hv_clean_fields &
1555 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1556 vmcs12->ept_pointer = evmcs->ept_pointer;
1557 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1560 if (unlikely(!(evmcs->hv_clean_fields &
1561 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1562 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1563 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1564 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1565 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1566 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1567 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1568 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1569 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1570 vmcs12->guest_pending_dbg_exceptions =
1571 evmcs->guest_pending_dbg_exceptions;
1572 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1573 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1574 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1575 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1576 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1581 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1582 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1583 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1584 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1585 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1586 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1587 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1588 * vmcs12->page_fault_error_code_mask =
1589 * evmcs->page_fault_error_code_mask;
1590 * vmcs12->page_fault_error_code_match =
1591 * evmcs->page_fault_error_code_match;
1592 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1593 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1594 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1595 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1600 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1601 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1602 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1603 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1604 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1605 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1606 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1607 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1608 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1609 * vmcs12->exit_qualification = evmcs->exit_qualification;
1610 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1612 * Not present in struct vmcs12:
1613 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1614 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1615 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1616 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1622 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1624 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1625 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1628 * Should not be changed by KVM:
1630 * evmcs->host_es_selector = vmcs12->host_es_selector;
1631 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1632 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1633 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1634 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1635 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1636 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1637 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1638 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1639 * evmcs->host_cr0 = vmcs12->host_cr0;
1640 * evmcs->host_cr3 = vmcs12->host_cr3;
1641 * evmcs->host_cr4 = vmcs12->host_cr4;
1642 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1643 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1644 * evmcs->host_rip = vmcs12->host_rip;
1645 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1646 * evmcs->host_fs_base = vmcs12->host_fs_base;
1647 * evmcs->host_gs_base = vmcs12->host_gs_base;
1648 * evmcs->host_tr_base = vmcs12->host_tr_base;
1649 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1650 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1651 * evmcs->host_rsp = vmcs12->host_rsp;
1652 * sync_vmcs02_to_vmcs12() doesn't read these:
1653 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1654 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1655 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1656 * evmcs->ept_pointer = vmcs12->ept_pointer;
1657 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1658 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1659 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1660 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1661 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1662 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1663 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1664 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1665 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1666 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1667 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1668 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1669 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1670 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1671 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1672 * evmcs->page_fault_error_code_mask =
1673 * vmcs12->page_fault_error_code_mask;
1674 * evmcs->page_fault_error_code_match =
1675 * vmcs12->page_fault_error_code_match;
1676 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1677 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1678 * evmcs->tsc_offset = vmcs12->tsc_offset;
1679 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1680 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1681 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1682 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1683 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1684 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1685 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1686 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1688 * Not present in struct vmcs12:
1689 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1690 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1691 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1692 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1695 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1696 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1697 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1698 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1699 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1700 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1701 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1702 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1704 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1705 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1706 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1707 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1708 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1709 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1710 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1711 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1712 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1713 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1715 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1716 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1717 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1718 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1719 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1720 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1721 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1722 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1724 evmcs->guest_es_base = vmcs12->guest_es_base;
1725 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1726 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1727 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1728 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1729 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1730 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1731 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1732 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1733 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1735 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1736 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1738 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1739 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1740 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1741 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1743 evmcs->guest_pending_dbg_exceptions =
1744 vmcs12->guest_pending_dbg_exceptions;
1745 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1746 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1748 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1749 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1751 evmcs->guest_cr0 = vmcs12->guest_cr0;
1752 evmcs->guest_cr3 = vmcs12->guest_cr3;
1753 evmcs->guest_cr4 = vmcs12->guest_cr4;
1754 evmcs->guest_dr7 = vmcs12->guest_dr7;
1756 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1758 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1759 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1760 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1761 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1762 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1763 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1764 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1765 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1767 evmcs->exit_qualification = vmcs12->exit_qualification;
1769 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1770 evmcs->guest_rsp = vmcs12->guest_rsp;
1771 evmcs->guest_rflags = vmcs12->guest_rflags;
1773 evmcs->guest_interruptibility_info =
1774 vmcs12->guest_interruptibility_info;
1775 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1776 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1777 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1778 evmcs->vm_entry_exception_error_code =
1779 vmcs12->vm_entry_exception_error_code;
1780 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1782 evmcs->guest_rip = vmcs12->guest_rip;
1784 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1790 * This is an equivalent of the nested hypervisor executing the vmptrld
1793 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1796 struct vcpu_vmx *vmx = to_vmx(vcpu);
1797 bool evmcs_gpa_changed = false;
1800 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1803 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
1806 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1807 if (!vmx->nested.hv_evmcs)
1808 vmx->nested.current_vmptr = -1ull;
1810 nested_release_evmcs(vcpu);
1812 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1813 &vmx->nested.hv_evmcs_map))
1816 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1819 * Currently, KVM only supports eVMCS version 1
1820 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1821 * value to first u32 field of eVMCS which should specify eVMCS
1824 * Guest should be aware of supported eVMCS versions by host by
1825 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1826 * expected to set this CPUID leaf according to the value
1827 * returned in vmcs_version from nested_enable_evmcs().
1829 * However, it turns out that Microsoft Hyper-V fails to comply
1830 * to their own invented interface: When Hyper-V use eVMCS, it
1831 * just sets first u32 field of eVMCS to revision_id specified
1832 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1833 * which is one of the supported versions specified in
1834 * CPUID.0x4000000A.EAX[0:15].
1836 * To overcome Hyper-V bug, we accept here either a supported
1837 * eVMCS version or VMCS12 revision_id as valid values for first
1838 * u32 field of eVMCS.
1840 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1841 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1842 nested_release_evmcs(vcpu);
1846 vmx->nested.dirty_vmcs12 = true;
1847 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
1849 evmcs_gpa_changed = true;
1851 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1852 * reloaded from guest's memory (read only fields, fields not
1853 * present in struct hv_enlightened_vmcs, ...). Make sure there
1857 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1858 memset(vmcs12, 0, sizeof(*vmcs12));
1859 vmcs12->hdr.revision_id = VMCS12_REVISION;
1865 * Clean fields data can't de used on VMLAUNCH and when we switch
1866 * between different L2 guests as KVM keeps a single VMCS12 per L1.
1868 if (from_launch || evmcs_gpa_changed)
1869 vmx->nested.hv_evmcs->hv_clean_fields &=
1870 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1875 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
1877 struct vcpu_vmx *vmx = to_vmx(vcpu);
1880 * hv_evmcs may end up being not mapped after migration (when
1881 * L2 was running), map it here to make sure vmcs12 changes are
1882 * properly reflected.
1884 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1885 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1887 if (vmx->nested.hv_evmcs) {
1888 copy_vmcs12_to_enlightened(vmx);
1889 /* All fields are clean */
1890 vmx->nested.hv_evmcs->hv_clean_fields |=
1891 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1893 copy_vmcs12_to_shadow(vmx);
1896 vmx->nested.need_vmcs12_to_shadow_sync = false;
1899 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1901 struct vcpu_vmx *vmx =
1902 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1904 vmx->nested.preemption_timer_expired = true;
1905 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1906 kvm_vcpu_kick(&vmx->vcpu);
1908 return HRTIMER_NORESTART;
1911 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1913 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1914 struct vcpu_vmx *vmx = to_vmx(vcpu);
1917 * A timer value of zero is architecturally guaranteed to cause
1918 * a VMExit prior to executing any instructions in the guest.
1920 if (preemption_timeout == 0) {
1921 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1925 if (vcpu->arch.virtual_tsc_khz == 0)
1928 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1929 preemption_timeout *= 1000000;
1930 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1931 hrtimer_start(&vmx->nested.preemption_timer,
1932 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1935 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1937 if (vmx->nested.nested_run_pending &&
1938 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1939 return vmcs12->guest_ia32_efer;
1940 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1941 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1943 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1946 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1949 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1950 * according to L0's settings (vmcs12 is irrelevant here). Host
1951 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1952 * will be set as needed prior to VMLAUNCH/VMRESUME.
1954 if (vmx->nested.vmcs02_initialized)
1956 vmx->nested.vmcs02_initialized = true;
1959 * We don't care what the EPTP value is we just need to guarantee
1960 * it's valid so we don't get a false positive when doing early
1961 * consistency checks.
1963 if (enable_ept && nested_early_check)
1964 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1966 /* All VMFUNCs are currently emulated through L0 vmexits. */
1967 if (cpu_has_vmx_vmfunc())
1968 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1970 if (cpu_has_vmx_posted_intr())
1971 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1973 if (cpu_has_vmx_msr_bitmap())
1974 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1977 * The PML address never changes, so it is constant in vmcs02.
1978 * Conceptually we want to copy the PML index from vmcs01 here,
1979 * and then back to vmcs01 on nested vmexit. But since we flush
1980 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
1981 * index is also effectively constant in vmcs02.
1984 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
1985 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
1988 if (cpu_has_vmx_encls_vmexit())
1989 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
1992 * Set the MSR load/store lists to match L0's settings. Only the
1993 * addresses are constant (for vmcs02), the counts can change based
1994 * on L2's behavior, e.g. switching to/from long mode.
1996 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1997 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1998 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2000 vmx_set_constant_host_state(vmx);
2003 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2004 struct vmcs12 *vmcs12)
2006 prepare_vmcs02_constant_state(vmx);
2008 vmcs_write64(VMCS_LINK_POINTER, -1ull);
2011 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2012 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2014 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2018 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2020 u32 exec_control, vmcs12_exec_ctrl;
2021 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2023 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
2024 prepare_vmcs02_early_rare(vmx, vmcs12);
2029 exec_control = vmx_pin_based_exec_ctrl(vmx);
2030 exec_control |= (vmcs12->pin_based_vm_exec_control &
2031 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2033 /* Posted interrupts setting is only taken from vmcs12. */
2034 if (nested_cpu_has_posted_intr(vmcs12)) {
2035 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2036 vmx->nested.pi_pending = false;
2038 exec_control &= ~PIN_BASED_POSTED_INTR;
2040 pin_controls_set(vmx, exec_control);
2045 exec_control = vmx_exec_control(vmx); /* L0's desires */
2046 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2047 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2048 exec_control &= ~CPU_BASED_TPR_SHADOW;
2049 exec_control |= vmcs12->cpu_based_vm_exec_control;
2051 if (exec_control & CPU_BASED_TPR_SHADOW)
2052 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2053 #ifdef CONFIG_X86_64
2055 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2056 CPU_BASED_CR8_STORE_EXITING;
2060 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2061 * for I/O port accesses.
2063 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2064 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2067 * This bit will be computed in nested_get_vmcs12_pages, because
2068 * we do not have access to L1's MSR bitmap yet. For now, keep
2069 * the same bit as before, hoping to avoid multiple VMWRITEs that
2070 * only set/clear this bit.
2072 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2073 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2075 exec_controls_set(vmx, exec_control);
2078 * SECONDARY EXEC CONTROLS
2080 if (cpu_has_secondary_exec_ctrls()) {
2081 exec_control = vmx->secondary_exec_control;
2083 /* Take the following fields only from vmcs12 */
2084 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2085 SECONDARY_EXEC_ENABLE_INVPCID |
2086 SECONDARY_EXEC_RDTSCP |
2087 SECONDARY_EXEC_XSAVES |
2088 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2089 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2090 SECONDARY_EXEC_ENABLE_VMFUNC);
2091 if (nested_cpu_has(vmcs12,
2092 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2093 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2094 ~SECONDARY_EXEC_ENABLE_PML;
2095 exec_control |= vmcs12_exec_ctrl;
2098 /* VMCS shadowing for L2 is emulated for now */
2099 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2102 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2103 * will not have to rewrite the controls just for this bit.
2105 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2106 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2107 exec_control |= SECONDARY_EXEC_DESC;
2109 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2110 vmcs_write16(GUEST_INTR_STATUS,
2111 vmcs12->guest_intr_status);
2113 secondary_exec_controls_set(vmx, exec_control);
2119 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2120 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2121 * on the related bits (if supported by the CPU) in the hope that
2122 * we can avoid VMWrites during vmx_set_efer().
2124 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2125 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2126 if (cpu_has_load_ia32_efer()) {
2127 if (guest_efer & EFER_LMA)
2128 exec_control |= VM_ENTRY_IA32E_MODE;
2129 if (guest_efer != host_efer)
2130 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2132 vm_entry_controls_set(vmx, exec_control);
2137 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2138 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2139 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2141 exec_control = vmx_vmexit_ctrl();
2142 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2143 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2144 vm_exit_controls_set(vmx, exec_control);
2147 * Interrupt/Exception Fields
2149 if (vmx->nested.nested_run_pending) {
2150 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2151 vmcs12->vm_entry_intr_info_field);
2152 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2153 vmcs12->vm_entry_exception_error_code);
2154 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2155 vmcs12->vm_entry_instruction_len);
2156 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2157 vmcs12->guest_interruptibility_info);
2158 vmx->loaded_vmcs->nmi_known_unmasked =
2159 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2161 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2165 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2167 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2169 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2170 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2171 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2172 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2173 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2174 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2175 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2176 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2177 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2178 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2179 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2180 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2181 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2182 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2183 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2184 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2185 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2186 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2187 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2188 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2189 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2190 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2191 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2192 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2193 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2194 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2195 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2196 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2197 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2198 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2199 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2200 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2201 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2202 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2203 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2204 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2205 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2206 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2209 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2210 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2211 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2212 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2213 vmcs12->guest_pending_dbg_exceptions);
2214 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2215 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2218 * L1 may access the L2's PDPTR, so save them to construct
2222 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2223 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2224 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2225 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2228 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2229 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2230 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2233 if (nested_cpu_has_xsaves(vmcs12))
2234 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2237 * Whether page-faults are trapped is determined by a combination of
2238 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2239 * If enable_ept, L0 doesn't care about page faults and we should
2240 * set all of these to L1's desires. However, if !enable_ept, L0 does
2241 * care about (at least some) page faults, and because it is not easy
2242 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2243 * to exit on each and every L2 page fault. This is done by setting
2244 * MASK=MATCH=0 and (see below) EB.PF=1.
2245 * Note that below we don't need special code to set EB.PF beyond the
2246 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2247 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2248 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2250 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2251 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2252 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2253 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2255 if (cpu_has_vmx_apicv()) {
2256 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2257 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2258 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2259 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2262 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2263 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2265 set_cr4_guest_host_mask(vmx);
2269 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2270 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2271 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2272 * guest in a way that will both be appropriate to L1's requests, and our
2273 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2274 * function also has additional necessary side-effects, like setting various
2275 * vcpu->arch fields.
2276 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2277 * is assigned to entry_failure_code on failure.
2279 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2280 u32 *entry_failure_code)
2282 struct vcpu_vmx *vmx = to_vmx(vcpu);
2283 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2284 bool load_guest_pdptrs_vmcs12 = false;
2286 if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
2287 prepare_vmcs02_rare(vmx, vmcs12);
2288 vmx->nested.dirty_vmcs12 = false;
2290 load_guest_pdptrs_vmcs12 = !hv_evmcs ||
2291 !(hv_evmcs->hv_clean_fields &
2292 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2295 if (vmx->nested.nested_run_pending &&
2296 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2297 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2298 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2300 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2301 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2303 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2304 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2305 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2306 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2308 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2309 * bitwise-or of what L1 wants to trap for L2, and what we want to
2310 * trap. Note that CR0.TS also needs updating - we do this later.
2312 update_exception_bitmap(vcpu);
2313 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2314 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2316 if (vmx->nested.nested_run_pending &&
2317 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2318 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2319 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2320 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2321 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2324 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2326 if (kvm_has_tsc_control)
2327 decache_tsc_multiplier(vmx);
2331 * There is no direct mapping between vpid02 and vpid12, the
2332 * vpid02 is per-vCPU for L0 and reused while the value of
2333 * vpid12 is changed w/ one invvpid during nested vmentry.
2334 * The vpid12 is allocated by L1 for L2, so it will not
2335 * influence global bitmap(for vpid01 and vpid02 allocation)
2336 * even if spawn a lot of nested vCPUs.
2338 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2339 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2340 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2341 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2345 * If L1 use EPT, then L0 needs to execute INVEPT on
2346 * EPTP02 instead of EPTP01. Therefore, delay TLB
2347 * flush until vmcs02->eptp is fully updated by
2348 * KVM_REQ_LOAD_CR3. Note that this assumes
2349 * KVM_REQ_TLB_FLUSH is evaluated after
2350 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2352 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2356 if (nested_cpu_has_ept(vmcs12))
2357 nested_ept_init_mmu_context(vcpu);
2358 else if (nested_cpu_has2(vmcs12,
2359 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2360 vmx_flush_tlb(vcpu, true);
2363 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2364 * bits which we consider mandatory enabled.
2365 * The CR0_READ_SHADOW is what L2 should have expected to read given
2366 * the specifications by L1; It's not enough to take
2367 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2368 * have more bits than L1 expected.
2370 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2371 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2373 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2374 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2376 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2377 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2378 vmx_set_efer(vcpu, vcpu->arch.efer);
2381 * Guest state is invalid and unrestricted guest is disabled,
2382 * which means L1 attempted VMEntry to L2 with invalid state.
2385 if (vmx->emulation_required) {
2386 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2390 /* Shadow page tables on either EPT or shadow page tables. */
2391 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2392 entry_failure_code))
2395 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2396 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2397 is_pae_paging(vcpu)) {
2398 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2399 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2400 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2401 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2405 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2407 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2408 kvm_rip_write(vcpu, vmcs12->guest_rip);
2412 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2414 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2415 nested_cpu_has_virtual_nmis(vmcs12))
2418 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2419 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2425 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2427 struct vcpu_vmx *vmx = to_vmx(vcpu);
2428 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2430 /* Check for memory type validity */
2431 switch (address & VMX_EPTP_MT_MASK) {
2432 case VMX_EPTP_MT_UC:
2433 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2436 case VMX_EPTP_MT_WB:
2437 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2444 /* only 4 levels page-walk length are valid */
2445 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2448 /* Reserved bits should not be set */
2449 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2452 /* AD, if set, should be supported */
2453 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2454 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2462 * Checks related to VM-Execution Control Fields
2464 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2465 struct vmcs12 *vmcs12)
2467 struct vcpu_vmx *vmx = to_vmx(vcpu);
2469 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2470 vmx->nested.msrs.pinbased_ctls_low,
2471 vmx->nested.msrs.pinbased_ctls_high) ||
2472 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2473 vmx->nested.msrs.procbased_ctls_low,
2474 vmx->nested.msrs.procbased_ctls_high))
2477 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2478 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2479 vmx->nested.msrs.secondary_ctls_low,
2480 vmx->nested.msrs.secondary_ctls_high))
2483 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2484 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2485 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2486 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2487 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2488 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2489 nested_vmx_check_nmi_controls(vmcs12) ||
2490 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2491 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2492 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2493 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2494 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2497 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2498 nested_cpu_has_save_preemption_timer(vmcs12))
2501 if (nested_cpu_has_ept(vmcs12) &&
2502 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2505 if (nested_cpu_has_vmfunc(vmcs12)) {
2506 if (vmcs12->vm_function_control &
2507 ~vmx->nested.msrs.vmfunc_controls)
2510 if (nested_cpu_has_eptp_switching(vmcs12)) {
2511 if (!nested_cpu_has_ept(vmcs12) ||
2512 !page_address_valid(vcpu, vmcs12->eptp_list_address))
2521 * Checks related to VM-Exit Control Fields
2523 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2524 struct vmcs12 *vmcs12)
2526 struct vcpu_vmx *vmx = to_vmx(vcpu);
2528 if (!vmx_control_verify(vmcs12->vm_exit_controls,
2529 vmx->nested.msrs.exit_ctls_low,
2530 vmx->nested.msrs.exit_ctls_high) ||
2531 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
2538 * Checks related to VM-Entry Control Fields
2540 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2541 struct vmcs12 *vmcs12)
2543 struct vcpu_vmx *vmx = to_vmx(vcpu);
2545 if (!vmx_control_verify(vmcs12->vm_entry_controls,
2546 vmx->nested.msrs.entry_ctls_low,
2547 vmx->nested.msrs.entry_ctls_high))
2551 * From the Intel SDM, volume 3:
2552 * Fields relevant to VM-entry event injection must be set properly.
2553 * These fields are the VM-entry interruption-information field, the
2554 * VM-entry exception error code, and the VM-entry instruction length.
2556 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2557 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2558 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2559 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2560 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2561 bool should_have_error_code;
2562 bool urg = nested_cpu_has2(vmcs12,
2563 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2564 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2566 /* VM-entry interruption-info field: interruption type */
2567 if (intr_type == INTR_TYPE_RESERVED ||
2568 (intr_type == INTR_TYPE_OTHER_EVENT &&
2569 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2572 /* VM-entry interruption-info field: vector */
2573 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2574 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2575 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2578 /* VM-entry interruption-info field: deliver error code */
2579 should_have_error_code =
2580 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2581 x86_exception_has_error_code(vector);
2582 if (has_error_code != should_have_error_code)
2585 /* VM-entry exception error code */
2586 if (has_error_code &&
2587 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
2590 /* VM-entry interruption-info field: reserved bits */
2591 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
2594 /* VM-entry instruction length */
2595 switch (intr_type) {
2596 case INTR_TYPE_SOFT_EXCEPTION:
2597 case INTR_TYPE_SOFT_INTR:
2598 case INTR_TYPE_PRIV_SW_EXCEPTION:
2599 if ((vmcs12->vm_entry_instruction_len > 15) ||
2600 (vmcs12->vm_entry_instruction_len == 0 &&
2601 !nested_cpu_has_zero_length_injection(vcpu)))
2606 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2612 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2613 struct vmcs12 *vmcs12)
2615 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2616 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2617 nested_check_vm_entry_controls(vcpu, vmcs12))
2623 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2624 struct vmcs12 *vmcs12)
2628 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2629 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2630 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2633 if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
2634 is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
2637 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2638 !kvm_pat_valid(vmcs12->host_ia32_pat))
2641 ia32e = (vmcs12->vm_exit_controls &
2642 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2644 if (vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2645 vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2646 vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2647 vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2648 vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2649 vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2650 vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
2651 vmcs12->host_cs_selector == 0 ||
2652 vmcs12->host_tr_selector == 0 ||
2653 (vmcs12->host_ss_selector == 0 && !ia32e))
2656 #ifdef CONFIG_X86_64
2657 if (is_noncanonical_address(vmcs12->host_fs_base, vcpu) ||
2658 is_noncanonical_address(vmcs12->host_gs_base, vcpu) ||
2659 is_noncanonical_address(vmcs12->host_gdtr_base, vcpu) ||
2660 is_noncanonical_address(vmcs12->host_idtr_base, vcpu) ||
2661 is_noncanonical_address(vmcs12->host_tr_base, vcpu))
2666 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2667 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2668 * the values of the LMA and LME bits in the field must each be that of
2669 * the host address-space size VM-exit control.
2671 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2672 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2673 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2674 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
2681 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2682 struct vmcs12 *vmcs12)
2685 struct vmcs12 *shadow;
2686 struct kvm_host_map map;
2688 if (vmcs12->vmcs_link_pointer == -1ull)
2691 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2694 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
2699 if (shadow->hdr.revision_id != VMCS12_REVISION ||
2700 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2703 kvm_vcpu_unmap(vcpu, &map, false);
2708 * Checks related to Guest Non-register State
2710 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2712 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2713 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2719 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2720 struct vmcs12 *vmcs12,
2725 *exit_qual = ENTRY_FAIL_DEFAULT;
2727 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2728 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
2731 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
2732 !kvm_pat_valid(vmcs12->guest_ia32_pat))
2735 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2736 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2741 * If the load IA32_EFER VM-entry control is 1, the following checks
2742 * are performed on the field for the IA32_EFER MSR:
2743 * - Bits reserved in the IA32_EFER MSR must be 0.
2744 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2745 * the IA-32e mode guest VM-exit control. It must also be identical
2746 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2749 if (to_vmx(vcpu)->nested.nested_run_pending &&
2750 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2751 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2752 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2753 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2754 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2755 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
2759 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2760 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2761 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2764 if (nested_check_guest_non_reg_state(vmcs12))
2770 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2772 struct vcpu_vmx *vmx = to_vmx(vcpu);
2773 unsigned long cr3, cr4;
2776 if (!nested_early_check)
2779 if (vmx->msr_autoload.host.nr)
2780 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2781 if (vmx->msr_autoload.guest.nr)
2782 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2786 vmx_prepare_switch_to_guest(vcpu);
2789 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2790 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2791 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2792 * there is no need to preserve other bits or save/restore the field.
2794 vmcs_writel(GUEST_RFLAGS, 0);
2796 cr3 = __get_current_cr3_fast();
2797 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2798 vmcs_writel(HOST_CR3, cr3);
2799 vmx->loaded_vmcs->host_state.cr3 = cr3;
2802 cr4 = cr4_read_shadow();
2803 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2804 vmcs_writel(HOST_CR4, cr4);
2805 vmx->loaded_vmcs->host_state.cr4 = cr4;
2809 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
2810 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2812 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
2813 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2815 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
2817 /* Check if vmlaunch or vmresume is needed */
2818 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2821 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2822 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2823 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
2824 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
2826 "call vmx_vmenter\n\t"
2829 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
2830 : [HOST_RSP]"r"((unsigned long)HOST_RSP),
2831 [loaded_vmcs]"r"(vmx->loaded_vmcs),
2832 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
2833 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
2834 [wordsize]"i"(sizeof(ulong))
2838 if (vmx->msr_autoload.host.nr)
2839 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2840 if (vmx->msr_autoload.guest.nr)
2841 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2845 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2846 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2851 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2854 if (hw_breakpoint_active())
2855 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2859 * A non-failing VMEntry means we somehow entered guest mode with
2860 * an illegal RIP, and that's just the tip of the iceberg. There
2861 * is no telling what memory has been modified or what state has
2862 * been exposed to unknown code. Hitting this all but guarantees
2863 * a (very critical) hardware issue.
2865 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2866 VMX_EXIT_REASONS_FAILED_VMENTRY));
2871 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2872 struct vmcs12 *vmcs12);
2874 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2876 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2877 struct vcpu_vmx *vmx = to_vmx(vcpu);
2878 struct kvm_host_map *map;
2882 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2884 * Translate L1 physical address to host physical
2885 * address for vmcs02. Keep the page pinned, so this
2886 * physical address remains valid. We keep a reference
2887 * to it so we can release it later.
2889 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2890 kvm_release_page_dirty(vmx->nested.apic_access_page);
2891 vmx->nested.apic_access_page = NULL;
2893 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2895 * If translation failed, no matter: This feature asks
2896 * to exit when accessing the given address, and if it
2897 * can never be accessed, this feature won't do
2900 if (!is_error_page(page)) {
2901 vmx->nested.apic_access_page = page;
2902 hpa = page_to_phys(vmx->nested.apic_access_page);
2903 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2905 secondary_exec_controls_clearbit(vmx,
2906 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
2910 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2911 map = &vmx->nested.virtual_apic_map;
2913 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
2914 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
2915 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
2916 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
2917 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2919 * The processor will never use the TPR shadow, simply
2920 * clear the bit from the execution control. Such a
2921 * configuration is useless, but it happens in tests.
2922 * For any other configuration, failing the vm entry is
2923 * _not_ what the processor does but it's basically the
2924 * only possibility we have.
2926 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
2929 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
2930 * force VM-Entry to fail.
2932 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2936 if (nested_cpu_has_posted_intr(vmcs12)) {
2937 map = &vmx->nested.pi_desc_map;
2939 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
2940 vmx->nested.pi_desc =
2941 (struct pi_desc *)(((void *)map->hva) +
2942 offset_in_page(vmcs12->posted_intr_desc_addr));
2943 vmcs_write64(POSTED_INTR_DESC_ADDR,
2944 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
2947 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2948 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
2950 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
2954 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2955 * for running VMX instructions (except VMXON, whose prerequisites are
2956 * slightly different). It also specifies what exception to inject otherwise.
2957 * Note that many of these exceptions have priority over VM exits, so they
2958 * don't have to be checked again here.
2960 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2962 if (!to_vmx(vcpu)->nested.vmxon) {
2963 kvm_queue_exception(vcpu, UD_VECTOR);
2967 if (vmx_get_cpl(vcpu)) {
2968 kvm_inject_gp(vcpu, 0);
2975 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2977 u8 rvi = vmx_get_rvi();
2978 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2980 return ((rvi & 0xf0) > (vppr & 0xf0));
2983 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2984 struct vmcs12 *vmcs12);
2987 * If from_vmentry is false, this is being called from state restore (either RSM
2988 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2991 + * 0 - success, i.e. proceed with actual VMEnter
2992 + * 1 - consistency check VMExit
2993 + * -1 - consistency check VMFail
2995 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2997 struct vcpu_vmx *vmx = to_vmx(vcpu);
2998 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2999 bool evaluate_pending_interrupts;
3000 u32 exit_reason = EXIT_REASON_INVALID_STATE;
3003 evaluate_pending_interrupts = exec_controls_get(vmx) &
3004 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
3005 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3006 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3008 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3009 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3010 if (kvm_mpx_supported() &&
3011 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
3012 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3015 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3016 * nested early checks are disabled. In the event of a "late" VM-Fail,
3017 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3018 * software model to the pre-VMEntry host state. When EPT is disabled,
3019 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3020 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3021 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3022 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3023 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3024 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3025 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3026 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3027 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3028 * path would need to manually save/restore vmcs01.GUEST_CR3.
3030 if (!enable_ept && !nested_early_check)
3031 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3033 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3035 prepare_vmcs02_early(vmx, vmcs12);
3038 nested_get_vmcs12_pages(vcpu);
3040 if (nested_vmx_check_vmentry_hw(vcpu)) {
3041 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3045 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
3046 goto vmentry_fail_vmexit;
3049 enter_guest_mode(vcpu);
3050 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3051 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
3053 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
3054 goto vmentry_fail_vmexit_guest_mode;
3057 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
3058 exit_qual = nested_vmx_load_msr(vcpu,
3059 vmcs12->vm_entry_msr_load_addr,
3060 vmcs12->vm_entry_msr_load_count);
3062 goto vmentry_fail_vmexit_guest_mode;
3065 * The MMU is not initialized to point at the right entities yet and
3066 * "get pages" would need to read data from the guest (i.e. we will
3067 * need to perform gpa to hpa translation). Request a call
3068 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3069 * have already been set at vmentry time and should not be reset.
3071 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
3075 * If L1 had a pending IRQ/NMI until it executed
3076 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3077 * disallowed (e.g. interrupts disabled), L0 needs to
3078 * evaluate if this pending event should cause an exit from L2
3079 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3080 * intercept EXTERNAL_INTERRUPT).
3082 * Usually this would be handled by the processor noticing an
3083 * IRQ/NMI window request, or checking RVI during evaluation of
3084 * pending virtual interrupts. However, this setting was done
3085 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3086 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3088 if (unlikely(evaluate_pending_interrupts))
3089 kvm_make_request(KVM_REQ_EVENT, vcpu);
3092 * Do not start the preemption timer hrtimer until after we know
3093 * we are successful, so that only nested_vmx_vmexit needs to cancel
3096 vmx->nested.preemption_timer_expired = false;
3097 if (nested_cpu_has_preemption_timer(vmcs12))
3098 vmx_start_preemption_timer(vcpu);
3101 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3102 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3103 * returned as far as L1 is concerned. It will only return (and set
3104 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3109 * A failed consistency check that leads to a VMExit during L1's
3110 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3111 * 26.7 "VM-entry failures during or after loading guest state".
3113 vmentry_fail_vmexit_guest_mode:
3114 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3115 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3116 leave_guest_mode(vcpu);
3118 vmentry_fail_vmexit:
3119 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3124 load_vmcs12_host_state(vcpu, vmcs12);
3125 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3126 vmcs12->exit_qualification = exit_qual;
3127 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3128 vmx->nested.need_vmcs12_to_shadow_sync = true;
3133 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3134 * for running an L2 nested guest.
3136 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3138 struct vmcs12 *vmcs12;
3139 struct vcpu_vmx *vmx = to_vmx(vcpu);
3140 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3143 if (!nested_vmx_check_permission(vcpu))
3146 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, launch))
3149 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3150 return nested_vmx_failInvalid(vcpu);
3152 vmcs12 = get_vmcs12(vcpu);
3155 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3156 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3157 * rather than RFLAGS.ZF, and no error number is stored to the
3158 * VM-instruction error field.
3160 if (vmcs12->hdr.shadow_vmcs)
3161 return nested_vmx_failInvalid(vcpu);
3163 if (vmx->nested.hv_evmcs) {
3164 copy_enlightened_to_vmcs12(vmx);
3165 /* Enlightened VMCS doesn't have launch state */
3166 vmcs12->launch_state = !launch;
3167 } else if (enable_shadow_vmcs) {
3168 copy_shadow_to_vmcs12(vmx);
3172 * The nested entry process starts with enforcing various prerequisites
3173 * on vmcs12 as required by the Intel SDM, and act appropriately when
3174 * they fail: As the SDM explains, some conditions should cause the
3175 * instruction to fail, while others will cause the instruction to seem
3176 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3177 * To speed up the normal (success) code path, we should avoid checking
3178 * for misconfigurations which will anyway be caught by the processor
3179 * when using the merged vmcs02.
3181 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3182 return nested_vmx_failValid(vcpu,
3183 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3185 if (vmcs12->launch_state == launch)
3186 return nested_vmx_failValid(vcpu,
3187 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3188 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3190 if (nested_vmx_check_controls(vcpu, vmcs12))
3191 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3193 if (nested_vmx_check_host_state(vcpu, vmcs12))
3194 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3197 * We're finally done with prerequisite checking, and can start with
3200 vmx->nested.nested_run_pending = 1;
3201 ret = nested_vmx_enter_non_root_mode(vcpu, true);
3202 vmx->nested.nested_run_pending = !ret;
3206 return nested_vmx_failValid(vcpu,
3207 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3209 /* Hide L1D cache contents from the nested guest. */
3210 vmx->vcpu.arch.l1tf_flush_l1d = true;
3213 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3214 * also be used as part of restoring nVMX state for
3215 * snapshot restore (migration).
3217 * In this flow, it is assumed that vmcs12 cache was
3218 * trasferred as part of captured nVMX state and should
3219 * therefore not be read from guest memory (which may not
3220 * exist on destination host yet).
3222 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3225 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3226 * awakened by event injection or by an NMI-window VM-exit or
3227 * by an interrupt-window VM-exit, halt the vcpu.
3229 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3230 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3231 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
3232 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
3233 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3234 vmx->nested.nested_run_pending = 0;
3235 return kvm_vcpu_halt(vcpu);
3241 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3242 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3243 * This function returns the new value we should put in vmcs12.guest_cr0.
3244 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3245 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3246 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3247 * didn't trap the bit, because if L1 did, so would L0).
3248 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3249 * been modified by L2, and L1 knows it. So just leave the old value of
3250 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3251 * isn't relevant, because if L0 traps this bit it can set it to anything.
3252 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3253 * changed these bits, and therefore they need to be updated, but L0
3254 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3255 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3257 static inline unsigned long
3258 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3261 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3262 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3263 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3264 vcpu->arch.cr0_guest_owned_bits));
3267 static inline unsigned long
3268 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3271 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3272 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3273 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3274 vcpu->arch.cr4_guest_owned_bits));
3277 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3278 struct vmcs12 *vmcs12)
3283 if (vcpu->arch.exception.injected) {
3284 nr = vcpu->arch.exception.nr;
3285 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3287 if (kvm_exception_is_soft(nr)) {
3288 vmcs12->vm_exit_instruction_len =
3289 vcpu->arch.event_exit_inst_len;
3290 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3292 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3294 if (vcpu->arch.exception.has_error_code) {
3295 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3296 vmcs12->idt_vectoring_error_code =
3297 vcpu->arch.exception.error_code;
3300 vmcs12->idt_vectoring_info_field = idt_vectoring;
3301 } else if (vcpu->arch.nmi_injected) {
3302 vmcs12->idt_vectoring_info_field =
3303 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3304 } else if (vcpu->arch.interrupt.injected) {
3305 nr = vcpu->arch.interrupt.nr;
3306 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3308 if (vcpu->arch.interrupt.soft) {
3309 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3310 vmcs12->vm_entry_instruction_len =
3311 vcpu->arch.event_exit_inst_len;
3313 idt_vectoring |= INTR_TYPE_EXT_INTR;
3315 vmcs12->idt_vectoring_info_field = idt_vectoring;
3320 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3322 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3326 * Don't need to mark the APIC access page dirty; it is never
3327 * written to by the CPU during APIC virtualization.
3330 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3331 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3332 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3335 if (nested_cpu_has_posted_intr(vmcs12)) {
3336 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3337 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3341 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3343 struct vcpu_vmx *vmx = to_vmx(vcpu);
3348 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3351 vmx->nested.pi_pending = false;
3352 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3355 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3356 if (max_irr != 256) {
3357 vapic_page = vmx->nested.virtual_apic_map.hva;
3361 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3362 vapic_page, &max_irr);
3363 status = vmcs_read16(GUEST_INTR_STATUS);
3364 if ((u8)max_irr > ((u8)status & 0xff)) {
3366 status |= (u8)max_irr;
3367 vmcs_write16(GUEST_INTR_STATUS, status);
3371 nested_mark_vmcs12_pages_dirty(vcpu);
3374 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3375 unsigned long exit_qual)
3377 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3378 unsigned int nr = vcpu->arch.exception.nr;
3379 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3381 if (vcpu->arch.exception.has_error_code) {
3382 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3383 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3386 if (kvm_exception_is_soft(nr))
3387 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3389 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3391 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3392 vmx_get_nmi_mask(vcpu))
3393 intr_info |= INTR_INFO_UNBLOCK_NMI;
3395 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3398 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3400 struct vcpu_vmx *vmx = to_vmx(vcpu);
3401 unsigned long exit_qual;
3402 bool block_nested_events =
3403 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3405 if (vcpu->arch.exception.pending &&
3406 nested_vmx_check_exception(vcpu, &exit_qual)) {
3407 if (block_nested_events)
3409 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3413 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3414 vmx->nested.preemption_timer_expired) {
3415 if (block_nested_events)
3417 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3421 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3422 if (block_nested_events)
3424 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3425 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3426 INTR_INFO_VALID_MASK, 0);
3428 * The NMI-triggered VM exit counts as injection:
3429 * clear this one and block further NMIs.
3431 vcpu->arch.nmi_pending = 0;
3432 vmx_set_nmi_mask(vcpu, true);
3436 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3437 nested_exit_on_intr(vcpu)) {
3438 if (block_nested_events)
3440 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3444 vmx_complete_nested_posted_interrupt(vcpu);
3448 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3451 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3454 if (ktime_to_ns(remaining) <= 0)
3457 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3458 do_div(value, 1000000);
3459 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3462 static bool is_vmcs12_ext_field(unsigned long field)
3465 case GUEST_ES_SELECTOR:
3466 case GUEST_CS_SELECTOR:
3467 case GUEST_SS_SELECTOR:
3468 case GUEST_DS_SELECTOR:
3469 case GUEST_FS_SELECTOR:
3470 case GUEST_GS_SELECTOR:
3471 case GUEST_LDTR_SELECTOR:
3472 case GUEST_TR_SELECTOR:
3473 case GUEST_ES_LIMIT:
3474 case GUEST_CS_LIMIT:
3475 case GUEST_SS_LIMIT:
3476 case GUEST_DS_LIMIT:
3477 case GUEST_FS_LIMIT:
3478 case GUEST_GS_LIMIT:
3479 case GUEST_LDTR_LIMIT:
3480 case GUEST_TR_LIMIT:
3481 case GUEST_GDTR_LIMIT:
3482 case GUEST_IDTR_LIMIT:
3483 case GUEST_ES_AR_BYTES:
3484 case GUEST_DS_AR_BYTES:
3485 case GUEST_FS_AR_BYTES:
3486 case GUEST_GS_AR_BYTES:
3487 case GUEST_LDTR_AR_BYTES:
3488 case GUEST_TR_AR_BYTES:
3495 case GUEST_LDTR_BASE:
3497 case GUEST_GDTR_BASE:
3498 case GUEST_IDTR_BASE:
3499 case GUEST_PENDING_DBG_EXCEPTIONS:
3509 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3510 struct vmcs12 *vmcs12)
3512 struct vcpu_vmx *vmx = to_vmx(vcpu);
3514 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3515 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3516 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3517 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3518 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3519 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3520 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3521 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3522 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3523 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3524 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3525 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3526 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3527 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3528 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3529 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3530 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3531 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3532 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3533 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3534 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3535 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3536 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3537 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3538 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3539 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3540 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3541 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3542 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3543 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3544 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3545 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3546 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3547 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3548 vmcs12->guest_pending_dbg_exceptions =
3549 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3550 if (kvm_mpx_supported())
3551 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3553 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
3556 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3557 struct vmcs12 *vmcs12)
3559 struct vcpu_vmx *vmx = to_vmx(vcpu);
3562 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
3566 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
3569 vmx->loaded_vmcs = &vmx->nested.vmcs02;
3570 vmx_vcpu_load(&vmx->vcpu, cpu);
3572 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3574 vmx->loaded_vmcs = &vmx->vmcs01;
3575 vmx_vcpu_load(&vmx->vcpu, cpu);
3580 * Update the guest state fields of vmcs12 to reflect changes that
3581 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3582 * VM-entry controls is also updated, since this is really a guest
3585 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3587 struct vcpu_vmx *vmx = to_vmx(vcpu);
3589 if (vmx->nested.hv_evmcs)
3590 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3592 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
3594 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3595 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3597 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
3598 vmcs12->guest_rip = kvm_rip_read(vcpu);
3599 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3601 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3602 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3604 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3605 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3606 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3608 vmcs12->guest_interruptibility_info =
3609 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3611 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3612 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3614 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3616 if (nested_cpu_has_preemption_timer(vmcs12) &&
3617 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3618 vmcs12->vmx_preemption_timer_value =
3619 vmx_get_preemption_timer_value(vcpu);
3622 * In some cases (usually, nested EPT), L2 is allowed to change its
3623 * own CR3 without exiting. If it has changed it, we must keep it.
3624 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3625 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3627 * Additionally, restore L2's PDPTR to vmcs12.
3630 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3631 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3632 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3633 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3634 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3635 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3639 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3641 if (nested_cpu_has_vid(vmcs12))
3642 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3644 vmcs12->vm_entry_controls =
3645 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3646 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3648 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
3649 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3651 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3652 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3656 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3657 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3658 * and this function updates it to reflect the changes to the guest state while
3659 * L2 was running (and perhaps made some exits which were handled directly by L0
3660 * without going back to L1), and to reflect the exit reason.
3661 * Note that we do not have to copy here all VMCS fields, just those that
3662 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3663 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3664 * which already writes to vmcs12 directly.
3666 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3667 u32 exit_reason, u32 exit_intr_info,
3668 unsigned long exit_qualification)
3670 /* update exit information fields: */
3671 vmcs12->vm_exit_reason = exit_reason;
3672 vmcs12->exit_qualification = exit_qualification;
3673 vmcs12->vm_exit_intr_info = exit_intr_info;
3675 vmcs12->idt_vectoring_info_field = 0;
3676 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3677 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3679 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3680 vmcs12->launch_state = 1;
3682 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3683 * instead of reading the real value. */
3684 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3687 * Transfer the event that L0 or L1 may wanted to inject into
3688 * L2 to IDT_VECTORING_INFO_FIELD.
3690 vmcs12_save_pending_event(vcpu, vmcs12);
3693 * According to spec, there's no need to store the guest's
3694 * MSRs if the exit is due to a VM-entry failure that occurs
3695 * during or after loading the guest state. Since this exit
3696 * does not fall in that category, we need to save the MSRs.
3698 if (nested_vmx_store_msr(vcpu,
3699 vmcs12->vm_exit_msr_store_addr,
3700 vmcs12->vm_exit_msr_store_count))
3701 nested_vmx_abort(vcpu,
3702 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3706 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3707 * preserved above and would only end up incorrectly in L1.
3709 vcpu->arch.nmi_injected = false;
3710 kvm_clear_exception_queue(vcpu);
3711 kvm_clear_interrupt_queue(vcpu);
3715 * A part of what we need to when the nested L2 guest exits and we want to
3716 * run its L1 parent, is to reset L1's guest state to the host state specified
3718 * This function is to be called not only on normal nested exit, but also on
3719 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3720 * Failures During or After Loading Guest State").
3721 * This function should be called when the active VMCS is L1's (vmcs01).
3723 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3724 struct vmcs12 *vmcs12)
3726 struct kvm_segment seg;
3727 u32 entry_failure_code;
3729 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3730 vcpu->arch.efer = vmcs12->host_ia32_efer;
3731 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3732 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3734 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3735 vmx_set_efer(vcpu, vcpu->arch.efer);
3737 kvm_rsp_write(vcpu, vmcs12->host_rsp);
3738 kvm_rip_write(vcpu, vmcs12->host_rip);
3739 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3740 vmx_set_interrupt_shadow(vcpu, 0);
3743 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3744 * actually changed, because vmx_set_cr0 refers to efer set above.
3746 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3747 * (KVM doesn't change it);
3749 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3750 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3752 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3753 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3754 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3756 nested_ept_uninit_mmu_context(vcpu);
3759 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3760 * couldn't have changed.
3762 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3763 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3766 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3769 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3770 * VMEntry/VMExit. Thus, no need to flush TLB.
3772 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3773 * flushed on every VMEntry/VMExit.
3775 * Otherwise, we can preserve TLB entries as long as we are
3776 * able to tag L1 TLB entries differently than L2 TLB entries.
3778 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3779 * and therefore we request the TLB flush to happen only after VMCS EPTP
3780 * has been set by KVM_REQ_LOAD_CR3.
3783 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3784 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3787 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3788 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3789 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3790 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3791 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3792 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3793 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3795 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3796 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3797 vmcs_write64(GUEST_BNDCFGS, 0);
3799 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3800 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3801 vcpu->arch.pat = vmcs12->host_ia32_pat;
3803 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3804 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3805 vmcs12->host_ia32_perf_global_ctrl);
3807 /* Set L1 segment info according to Intel SDM
3808 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3809 seg = (struct kvm_segment) {
3811 .limit = 0xFFFFFFFF,
3812 .selector = vmcs12->host_cs_selector,
3818 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3822 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3823 seg = (struct kvm_segment) {
3825 .limit = 0xFFFFFFFF,
3832 seg.selector = vmcs12->host_ds_selector;
3833 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3834 seg.selector = vmcs12->host_es_selector;
3835 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3836 seg.selector = vmcs12->host_ss_selector;
3837 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3838 seg.selector = vmcs12->host_fs_selector;
3839 seg.base = vmcs12->host_fs_base;
3840 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3841 seg.selector = vmcs12->host_gs_selector;
3842 seg.base = vmcs12->host_gs_base;
3843 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3844 seg = (struct kvm_segment) {
3845 .base = vmcs12->host_tr_base,
3847 .selector = vmcs12->host_tr_selector,
3851 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3853 kvm_set_dr(vcpu, 7, 0x400);
3854 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3856 if (cpu_has_vmx_msr_bitmap())
3857 vmx_update_msr_bitmap(vcpu);
3859 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3860 vmcs12->vm_exit_msr_load_count))
3861 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3864 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3866 struct shared_msr_entry *efer_msr;
3869 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3870 return vmcs_read64(GUEST_IA32_EFER);
3872 if (cpu_has_load_ia32_efer())
3875 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3876 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3877 return vmx->msr_autoload.guest.val[i].value;
3880 efer_msr = find_msr_entry(vmx, MSR_EFER);
3882 return efer_msr->data;
3887 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3889 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3890 struct vcpu_vmx *vmx = to_vmx(vcpu);
3891 struct vmx_msr_entry g, h;
3892 struct msr_data msr;
3896 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3898 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3900 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3901 * as vmcs01.GUEST_DR7 contains a userspace defined value
3902 * and vcpu->arch.dr7 is not squirreled away before the
3903 * nested VMENTER (not worth adding a variable in nested_vmx).
3905 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3906 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3908 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3912 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3913 * handle a variety of side effects to KVM's software model.
3915 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3917 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3918 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3920 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3921 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3923 nested_ept_uninit_mmu_context(vcpu);
3924 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3925 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3928 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3929 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3930 * VMFail, like everything else we just need to ensure our
3931 * software model is up-to-date.
3934 ept_save_pdptrs(vcpu);
3936 kvm_mmu_reset_context(vcpu);
3938 if (cpu_has_vmx_msr_bitmap())
3939 vmx_update_msr_bitmap(vcpu);
3942 * This nasty bit of open coding is a compromise between blindly
3943 * loading L1's MSRs using the exit load lists (incorrect emulation
3944 * of VMFail), leaving the nested VM's MSRs in the software model
3945 * (incorrect behavior) and snapshotting the modified MSRs (too
3946 * expensive since the lists are unbound by hardware). For each
3947 * MSR that was (prematurely) loaded from the nested VMEntry load
3948 * list, reload it from the exit load list if it exists and differs
3949 * from the guest value. The intent is to stuff host state as
3950 * silently as possible, not to fully process the exit load list.
3952 msr.host_initiated = false;
3953 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3954 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3955 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3956 pr_debug_ratelimited(
3957 "%s read MSR index failed (%u, 0x%08llx)\n",
3962 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3963 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3964 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3965 pr_debug_ratelimited(
3966 "%s read MSR failed (%u, 0x%08llx)\n",
3970 if (h.index != g.index)
3972 if (h.value == g.value)
3975 if (nested_vmx_load_msr_check(vcpu, &h)) {
3976 pr_debug_ratelimited(
3977 "%s check failed (%u, 0x%x, 0x%x)\n",
3978 __func__, j, h.index, h.reserved);
3982 msr.index = h.index;
3984 if (kvm_set_msr(vcpu, &msr)) {
3985 pr_debug_ratelimited(
3986 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3987 __func__, j, h.index, h.value);
3996 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4000 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4001 * and modify vmcs12 to make it see what it would expect to see there if
4002 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4004 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
4005 u32 exit_intr_info, unsigned long exit_qualification)
4007 struct vcpu_vmx *vmx = to_vmx(vcpu);
4008 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4010 /* trying to cancel vmlaunch/vmresume is a bug */
4011 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4013 leave_guest_mode(vcpu);
4015 if (nested_cpu_has_preemption_timer(vmcs12))
4016 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4018 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
4019 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
4021 if (likely(!vmx->fail)) {
4022 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4024 if (exit_reason != -1)
4025 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
4026 exit_qualification);
4029 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4030 * also be used to capture vmcs12 cache as part of
4031 * capturing nVMX state for snapshot (migration).
4033 * Otherwise, this flush will dirty guest memory at a
4034 * point it is already assumed by user-space to be
4037 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4040 * The only expected VM-instruction error is "VM entry with
4041 * invalid control field(s)." Anything else indicates a
4042 * problem with L0. And we should never get here with a
4043 * VMFail of any type if early consistency checks are enabled.
4045 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4046 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4047 WARN_ON_ONCE(nested_early_check);
4050 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4052 /* Update any VMCS fields that might have changed while L2 ran */
4053 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4054 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4055 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4057 if (kvm_has_tsc_control)
4058 decache_tsc_multiplier(vmx);
4060 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4061 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4062 vmx_set_virtual_apic_mode(vcpu);
4063 } else if (!nested_cpu_has_ept(vmcs12) &&
4064 nested_cpu_has2(vmcs12,
4065 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
4066 vmx_flush_tlb(vcpu, true);
4069 /* Unpin physical memory we referred to in vmcs02 */
4070 if (vmx->nested.apic_access_page) {
4071 kvm_release_page_dirty(vmx->nested.apic_access_page);
4072 vmx->nested.apic_access_page = NULL;
4074 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4075 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4076 vmx->nested.pi_desc = NULL;
4079 * We are now running in L2, mmu_notifier will force to reload the
4080 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
4082 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4084 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
4085 vmx->nested.need_vmcs12_to_shadow_sync = true;
4087 /* in case we halted in L2 */
4088 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4090 if (likely(!vmx->fail)) {
4092 * TODO: SDM says that with acknowledge interrupt on
4093 * exit, bit 31 of the VM-exit interrupt information
4094 * (valid interrupt) is always set to 1 on
4095 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
4096 * need kvm_cpu_has_interrupt(). See the commit
4097 * message for details.
4099 if (nested_exit_intr_ack_set(vcpu) &&
4100 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4101 kvm_cpu_has_interrupt(vcpu)) {
4102 int irq = kvm_cpu_get_interrupt(vcpu);
4104 vmcs12->vm_exit_intr_info = irq |
4105 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4108 if (exit_reason != -1)
4109 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4110 vmcs12->exit_qualification,
4111 vmcs12->idt_vectoring_info_field,
4112 vmcs12->vm_exit_intr_info,
4113 vmcs12->vm_exit_intr_error_code,
4116 load_vmcs12_host_state(vcpu, vmcs12);
4122 * After an early L2 VM-entry failure, we're now back
4123 * in L1 which thinks it just finished a VMLAUNCH or
4124 * VMRESUME instruction, so we need to set the failure
4125 * flag and the VM-instruction error field of the VMCS
4126 * accordingly, and skip the emulated instruction.
4128 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4131 * Restore L1's host state to KVM's software model. We're here
4132 * because a consistency check was caught by hardware, which
4133 * means some amount of guest state has been propagated to KVM's
4134 * model and needs to be unwound to the host's state.
4136 nested_vmx_restore_host_state(vcpu);
4142 * Decode the memory-address operand of a vmx instruction, as recorded on an
4143 * exit caused by such an instruction (run by a guest hypervisor).
4144 * On success, returns 0. When the operand is invalid, returns 1 and throws
4147 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4148 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4152 struct kvm_segment s;
4155 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4156 * Execution", on an exit, vmx_instruction_info holds most of the
4157 * addressing components of the operand. Only the displacement part
4158 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4159 * For how an actual address is calculated from all these components,
4160 * refer to Vol. 1, "Operand Addressing".
4162 int scaling = vmx_instruction_info & 3;
4163 int addr_size = (vmx_instruction_info >> 7) & 7;
4164 bool is_reg = vmx_instruction_info & (1u << 10);
4165 int seg_reg = (vmx_instruction_info >> 15) & 7;
4166 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4167 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4168 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4169 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4172 kvm_queue_exception(vcpu, UD_VECTOR);
4176 /* Addr = segment_base + offset */
4177 /* offset = base + [index * scale] + displacement */
4178 off = exit_qualification; /* holds the displacement */
4180 off = (gva_t)sign_extend64(off, 31);
4181 else if (addr_size == 0)
4182 off = (gva_t)sign_extend64(off, 15);
4184 off += kvm_register_read(vcpu, base_reg);
4186 off += kvm_register_read(vcpu, index_reg)<<scaling;
4187 vmx_get_segment(vcpu, &s, seg_reg);
4190 * The effective address, i.e. @off, of a memory operand is truncated
4191 * based on the address size of the instruction. Note that this is
4192 * the *effective address*, i.e. the address prior to accounting for
4193 * the segment's base.
4195 if (addr_size == 1) /* 32 bit */
4197 else if (addr_size == 0) /* 16 bit */
4200 /* Checks for #GP/#SS exceptions. */
4202 if (is_long_mode(vcpu)) {
4204 * The virtual/linear address is never truncated in 64-bit
4205 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4206 * address when using FS/GS with a non-zero base.
4208 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4209 *ret = s.base + off;
4213 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4214 * non-canonical form. This is the only check on the memory
4215 * destination for long mode!
4217 exn = is_noncanonical_address(*ret, vcpu);
4220 * When not in long mode, the virtual/linear address is
4221 * unconditionally truncated to 32 bits regardless of the
4224 *ret = (s.base + off) & 0xffffffff;
4226 /* Protected mode: apply checks for segment validity in the
4228 * - segment type check (#GP(0) may be thrown)
4229 * - usability check (#GP(0)/#SS(0))
4230 * - limit check (#GP(0)/#SS(0))
4233 /* #GP(0) if the destination operand is located in a
4234 * read-only data segment or any code segment.
4236 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4238 /* #GP(0) if the source operand is located in an
4239 * execute-only code segment
4241 exn = ((s.type & 0xa) == 8);
4243 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4246 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4248 exn = (s.unusable != 0);
4251 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4252 * outside the segment limit. All CPUs that support VMX ignore
4253 * limit checks for flat segments, i.e. segments with base==0,
4254 * limit==0xffffffff and of type expand-up data or code.
4256 if (!(s.base == 0 && s.limit == 0xffffffff &&
4257 ((s.type & 8) || !(s.type & 4))))
4258 exn = exn || ((u64)off + len - 1 > s.limit);
4261 kvm_queue_exception_e(vcpu,
4262 seg_reg == VCPU_SREG_SS ?
4263 SS_VECTOR : GP_VECTOR,
4271 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4274 struct x86_exception e;
4276 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4277 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4278 sizeof(*vmpointer), &gva))
4281 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4282 kvm_inject_page_fault(vcpu, &e);
4290 * Allocate a shadow VMCS and associate it with the currently loaded
4291 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4292 * VMCS is also VMCLEARed, so that it is ready for use.
4294 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4296 struct vcpu_vmx *vmx = to_vmx(vcpu);
4297 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4300 * We should allocate a shadow vmcs for vmcs01 only when L1
4301 * executes VMXON and free it when L1 executes VMXOFF.
4302 * As it is invalid to execute VMXON twice, we shouldn't reach
4303 * here when vmcs01 already have an allocated shadow vmcs.
4305 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4307 if (!loaded_vmcs->shadow_vmcs) {
4308 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4309 if (loaded_vmcs->shadow_vmcs)
4310 vmcs_clear(loaded_vmcs->shadow_vmcs);
4312 return loaded_vmcs->shadow_vmcs;
4315 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4317 struct vcpu_vmx *vmx = to_vmx(vcpu);
4320 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4324 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4325 if (!vmx->nested.cached_vmcs12)
4326 goto out_cached_vmcs12;
4328 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4329 if (!vmx->nested.cached_shadow_vmcs12)
4330 goto out_cached_shadow_vmcs12;
4332 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4333 goto out_shadow_vmcs;
4335 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4336 HRTIMER_MODE_REL_PINNED);
4337 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4339 vmx->nested.vpid02 = allocate_vpid();
4341 vmx->nested.vmcs02_initialized = false;
4342 vmx->nested.vmxon = true;
4344 if (pt_mode == PT_MODE_HOST_GUEST) {
4345 vmx->pt_desc.guest.ctl = 0;
4346 pt_update_intercept_for_msr(vmx);
4352 kfree(vmx->nested.cached_shadow_vmcs12);
4354 out_cached_shadow_vmcs12:
4355 kfree(vmx->nested.cached_vmcs12);
4358 free_loaded_vmcs(&vmx->nested.vmcs02);
4365 * Emulate the VMXON instruction.
4366 * Currently, we just remember that VMX is active, and do not save or even
4367 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4368 * do not currently need to store anything in that guest-allocated memory
4369 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4370 * argument is different from the VMXON pointer (which the spec says they do).
4372 static int handle_vmon(struct kvm_vcpu *vcpu)
4377 struct vcpu_vmx *vmx = to_vmx(vcpu);
4378 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4379 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4382 * The Intel VMX Instruction Reference lists a bunch of bits that are
4383 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4384 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4385 * Otherwise, we should fail with #UD. But most faulting conditions
4386 * have already been checked by hardware, prior to the VM-exit for
4387 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4388 * that bit set to 1 in non-root mode.
4390 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4391 kvm_queue_exception(vcpu, UD_VECTOR);
4395 /* CPL=0 must be checked manually. */
4396 if (vmx_get_cpl(vcpu)) {
4397 kvm_inject_gp(vcpu, 0);
4401 if (vmx->nested.vmxon)
4402 return nested_vmx_failValid(vcpu,
4403 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4405 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4406 != VMXON_NEEDED_FEATURES) {
4407 kvm_inject_gp(vcpu, 0);
4411 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4416 * The first 4 bytes of VMXON region contain the supported
4417 * VMCS revision identifier
4419 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4420 * which replaces physical address width with 32
4422 if (!page_address_valid(vcpu, vmptr))
4423 return nested_vmx_failInvalid(vcpu);
4425 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4426 revision != VMCS12_REVISION)
4427 return nested_vmx_failInvalid(vcpu);
4429 vmx->nested.vmxon_ptr = vmptr;
4430 ret = enter_vmx_operation(vcpu);
4434 return nested_vmx_succeed(vcpu);
4437 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4439 struct vcpu_vmx *vmx = to_vmx(vcpu);
4441 if (vmx->nested.current_vmptr == -1ull)
4444 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4446 if (enable_shadow_vmcs) {
4447 /* copy to memory all shadowed fields in case
4448 they were modified */
4449 copy_shadow_to_vmcs12(vmx);
4450 vmx_disable_shadow_vmcs(vmx);
4452 vmx->nested.posted_intr_nv = -1;
4454 /* Flush VMCS12 to guest memory */
4455 kvm_vcpu_write_guest_page(vcpu,
4456 vmx->nested.current_vmptr >> PAGE_SHIFT,
4457 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4459 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4461 vmx->nested.current_vmptr = -1ull;
4464 /* Emulate the VMXOFF instruction */
4465 static int handle_vmoff(struct kvm_vcpu *vcpu)
4467 if (!nested_vmx_check_permission(vcpu))
4470 return nested_vmx_succeed(vcpu);
4473 /* Emulate the VMCLEAR instruction */
4474 static int handle_vmclear(struct kvm_vcpu *vcpu)
4476 struct vcpu_vmx *vmx = to_vmx(vcpu);
4481 if (!nested_vmx_check_permission(vcpu))
4484 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4487 if (!page_address_valid(vcpu, vmptr))
4488 return nested_vmx_failValid(vcpu,
4489 VMXERR_VMCLEAR_INVALID_ADDRESS);
4491 if (vmptr == vmx->nested.vmxon_ptr)
4492 return nested_vmx_failValid(vcpu,
4493 VMXERR_VMCLEAR_VMXON_POINTER);
4496 * When Enlightened VMEntry is enabled on the calling CPU we treat
4497 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
4498 * way to distinguish it from VMCS12) and we must not corrupt it by
4499 * writing to the non-existent 'launch_state' field. The area doesn't
4500 * have to be the currently active EVMCS on the calling CPU and there's
4501 * nothing KVM has to do to transition it from 'active' to 'non-active'
4502 * state. It is possible that the area will stay mapped as
4503 * vmx->nested.hv_evmcs but this shouldn't be a problem.
4505 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
4506 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
4507 if (vmptr == vmx->nested.current_vmptr)
4508 nested_release_vmcs12(vcpu);
4510 kvm_vcpu_write_guest(vcpu,
4511 vmptr + offsetof(struct vmcs12,
4513 &zero, sizeof(zero));
4516 return nested_vmx_succeed(vcpu);
4519 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4521 /* Emulate the VMLAUNCH instruction */
4522 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4524 return nested_vmx_run(vcpu, true);
4527 /* Emulate the VMRESUME instruction */
4528 static int handle_vmresume(struct kvm_vcpu *vcpu)
4531 return nested_vmx_run(vcpu, false);
4534 static int handle_vmread(struct kvm_vcpu *vcpu)
4536 unsigned long field;
4538 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4539 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4542 struct vmcs12 *vmcs12;
4543 struct x86_exception e;
4546 if (!nested_vmx_check_permission(vcpu))
4549 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4550 return nested_vmx_failInvalid(vcpu);
4552 if (!is_guest_mode(vcpu))
4553 vmcs12 = get_vmcs12(vcpu);
4556 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4557 * to shadowed-field sets the ALU flags for VMfailInvalid.
4559 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4560 return nested_vmx_failInvalid(vcpu);
4561 vmcs12 = get_shadow_vmcs12(vcpu);
4564 /* Decode instruction info and find the field to read */
4565 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4567 offset = vmcs_field_to_offset(field);
4569 return nested_vmx_failValid(vcpu,
4570 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4572 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
4573 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4575 /* Read the field, zero-extended to a u64 field_value */
4576 field_value = vmcs12_read_any(vmcs12, field, offset);
4579 * Now copy part of this value to register or memory, as requested.
4580 * Note that the number of bits actually copied is 32 or 64 depending
4581 * on the guest's mode (32 or 64 bit), not on the given field's length.
4583 if (vmx_instruction_info & (1u << 10)) {
4584 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4587 len = is_64_bit_mode(vcpu) ? 8 : 4;
4588 if (get_vmx_mem_address(vcpu, exit_qualification,
4589 vmx_instruction_info, true, len, &gva))
4591 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4592 if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
4593 kvm_inject_page_fault(vcpu, &e);
4596 return nested_vmx_succeed(vcpu);
4599 static bool is_shadow_field_rw(unsigned long field)
4602 #define SHADOW_FIELD_RW(x, y) case x:
4603 #include "vmcs_shadow_fields.h"
4611 static bool is_shadow_field_ro(unsigned long field)
4614 #define SHADOW_FIELD_RO(x, y) case x:
4615 #include "vmcs_shadow_fields.h"
4623 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4625 unsigned long field;
4628 struct vcpu_vmx *vmx = to_vmx(vcpu);
4629 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4630 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4632 /* The value to write might be 32 or 64 bits, depending on L1's long
4633 * mode, and eventually we need to write that into a field of several
4634 * possible lengths. The code below first zero-extends the value to 64
4635 * bit (field_value), and then copies only the appropriate number of
4636 * bits into the vmcs12 field.
4638 u64 field_value = 0;
4639 struct x86_exception e;
4640 struct vmcs12 *vmcs12;
4643 if (!nested_vmx_check_permission(vcpu))
4646 if (vmx->nested.current_vmptr == -1ull)
4647 return nested_vmx_failInvalid(vcpu);
4649 if (vmx_instruction_info & (1u << 10))
4650 field_value = kvm_register_readl(vcpu,
4651 (((vmx_instruction_info) >> 3) & 0xf));
4653 len = is_64_bit_mode(vcpu) ? 8 : 4;
4654 if (get_vmx_mem_address(vcpu, exit_qualification,
4655 vmx_instruction_info, false, len, &gva))
4657 if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
4658 kvm_inject_page_fault(vcpu, &e);
4664 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4666 * If the vCPU supports "VMWRITE to any supported field in the
4667 * VMCS," then the "read-only" fields are actually read/write.
4669 if (vmcs_field_readonly(field) &&
4670 !nested_cpu_has_vmwrite_any_field(vcpu))
4671 return nested_vmx_failValid(vcpu,
4672 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4674 if (!is_guest_mode(vcpu)) {
4675 vmcs12 = get_vmcs12(vcpu);
4678 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4679 * vmcs12, else we may crush a field or consume a stale value.
4681 if (!is_shadow_field_rw(field))
4682 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4685 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4686 * to shadowed-field sets the ALU flags for VMfailInvalid.
4688 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4689 return nested_vmx_failInvalid(vcpu);
4690 vmcs12 = get_shadow_vmcs12(vcpu);
4693 offset = vmcs_field_to_offset(field);
4695 return nested_vmx_failValid(vcpu,
4696 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4699 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4700 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
4701 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4702 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4703 * from L1 will return a different value than VMREAD from L2 (L1 sees
4704 * the stripped down value, L2 sees the full value as stored by KVM).
4706 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
4707 field_value &= 0x1f0ff;
4709 vmcs12_write_any(vmcs12, field, offset, field_value);
4712 * Do not track vmcs12 dirty-state if in guest-mode as we actually
4713 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
4714 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4715 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
4717 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
4719 * L1 can read these fields without exiting, ensure the
4720 * shadow VMCS is up-to-date.
4722 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
4724 vmcs_load(vmx->vmcs01.shadow_vmcs);
4726 __vmcs_writel(field, field_value);
4728 vmcs_clear(vmx->vmcs01.shadow_vmcs);
4729 vmcs_load(vmx->loaded_vmcs->vmcs);
4732 vmx->nested.dirty_vmcs12 = true;
4735 return nested_vmx_succeed(vcpu);
4738 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4740 vmx->nested.current_vmptr = vmptr;
4741 if (enable_shadow_vmcs) {
4742 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
4743 vmcs_write64(VMCS_LINK_POINTER,
4744 __pa(vmx->vmcs01.shadow_vmcs));
4745 vmx->nested.need_vmcs12_to_shadow_sync = true;
4747 vmx->nested.dirty_vmcs12 = true;
4750 /* Emulate the VMPTRLD instruction */
4751 static int handle_vmptrld(struct kvm_vcpu *vcpu)
4753 struct vcpu_vmx *vmx = to_vmx(vcpu);
4756 if (!nested_vmx_check_permission(vcpu))
4759 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4762 if (!page_address_valid(vcpu, vmptr))
4763 return nested_vmx_failValid(vcpu,
4764 VMXERR_VMPTRLD_INVALID_ADDRESS);
4766 if (vmptr == vmx->nested.vmxon_ptr)
4767 return nested_vmx_failValid(vcpu,
4768 VMXERR_VMPTRLD_VMXON_POINTER);
4770 /* Forbid normal VMPTRLD if Enlightened version was used */
4771 if (vmx->nested.hv_evmcs)
4774 if (vmx->nested.current_vmptr != vmptr) {
4775 struct kvm_host_map map;
4776 struct vmcs12 *new_vmcs12;
4778 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
4780 * Reads from an unbacked page return all 1s,
4781 * which means that the 32 bits located at the
4782 * given physical address won't match the required
4783 * VMCS12_REVISION identifier.
4785 return nested_vmx_failValid(vcpu,
4786 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4789 new_vmcs12 = map.hva;
4791 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4792 (new_vmcs12->hdr.shadow_vmcs &&
4793 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4794 kvm_vcpu_unmap(vcpu, &map, false);
4795 return nested_vmx_failValid(vcpu,
4796 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4799 nested_release_vmcs12(vcpu);
4802 * Load VMCS12 from guest memory since it is not already
4805 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4806 kvm_vcpu_unmap(vcpu, &map, false);
4808 set_current_vmptr(vmx, vmptr);
4811 return nested_vmx_succeed(vcpu);
4814 /* Emulate the VMPTRST instruction */
4815 static int handle_vmptrst(struct kvm_vcpu *vcpu)
4817 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4818 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4819 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4820 struct x86_exception e;
4823 if (!nested_vmx_check_permission(vcpu))
4826 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4829 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
4830 true, sizeof(gpa_t), &gva))
4832 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4833 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
4834 sizeof(gpa_t), &e)) {
4835 kvm_inject_page_fault(vcpu, &e);
4838 return nested_vmx_succeed(vcpu);
4841 /* Emulate the INVEPT instruction */
4842 static int handle_invept(struct kvm_vcpu *vcpu)
4844 struct vcpu_vmx *vmx = to_vmx(vcpu);
4845 u32 vmx_instruction_info, types;
4848 struct x86_exception e;
4853 if (!(vmx->nested.msrs.secondary_ctls_high &
4854 SECONDARY_EXEC_ENABLE_EPT) ||
4855 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4856 kvm_queue_exception(vcpu, UD_VECTOR);
4860 if (!nested_vmx_check_permission(vcpu))
4863 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4864 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4866 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4868 if (type >= 32 || !(types & (1 << type)))
4869 return nested_vmx_failValid(vcpu,
4870 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4872 /* According to the Intel VMX instruction reference, the memory
4873 * operand is read even if it isn't needed (e.g., for type==global)
4875 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4876 vmx_instruction_info, false, sizeof(operand), &gva))
4878 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4879 kvm_inject_page_fault(vcpu, &e);
4884 case VMX_EPT_EXTENT_GLOBAL:
4885 case VMX_EPT_EXTENT_CONTEXT:
4887 * TODO: Sync the necessary shadow EPT roots here, rather than
4888 * at the next emulated VM-entry.
4896 return nested_vmx_succeed(vcpu);
4899 static int handle_invvpid(struct kvm_vcpu *vcpu)
4901 struct vcpu_vmx *vmx = to_vmx(vcpu);
4902 u32 vmx_instruction_info;
4903 unsigned long type, types;
4905 struct x86_exception e;
4912 if (!(vmx->nested.msrs.secondary_ctls_high &
4913 SECONDARY_EXEC_ENABLE_VPID) ||
4914 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4915 kvm_queue_exception(vcpu, UD_VECTOR);
4919 if (!nested_vmx_check_permission(vcpu))
4922 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4923 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4925 types = (vmx->nested.msrs.vpid_caps &
4926 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4928 if (type >= 32 || !(types & (1 << type)))
4929 return nested_vmx_failValid(vcpu,
4930 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4932 /* according to the intel vmx instruction reference, the memory
4933 * operand is read even if it isn't needed (e.g., for type==global)
4935 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4936 vmx_instruction_info, false, sizeof(operand), &gva))
4938 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4939 kvm_inject_page_fault(vcpu, &e);
4942 if (operand.vpid >> 16)
4943 return nested_vmx_failValid(vcpu,
4944 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4946 vpid02 = nested_get_vpid02(vcpu);
4948 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4949 if (!operand.vpid ||
4950 is_noncanonical_address(operand.gla, vcpu))
4951 return nested_vmx_failValid(vcpu,
4952 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4953 if (cpu_has_vmx_invvpid_individual_addr()) {
4954 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4955 vpid02, operand.gla);
4957 __vmx_flush_tlb(vcpu, vpid02, false);
4959 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4960 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4962 return nested_vmx_failValid(vcpu,
4963 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4964 __vmx_flush_tlb(vcpu, vpid02, false);
4966 case VMX_VPID_EXTENT_ALL_CONTEXT:
4967 __vmx_flush_tlb(vcpu, vpid02, false);
4971 return kvm_skip_emulated_instruction(vcpu);
4974 return nested_vmx_succeed(vcpu);
4977 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4978 struct vmcs12 *vmcs12)
4980 u32 index = kvm_rcx_read(vcpu);
4982 bool accessed_dirty;
4983 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4985 if (!nested_cpu_has_eptp_switching(vmcs12) ||
4986 !nested_cpu_has_ept(vmcs12))
4989 if (index >= VMFUNC_EPTP_ENTRIES)
4993 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4994 &address, index * 8, 8))
4997 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
5000 * If the (L2) guest does a vmfunc to the currently
5001 * active ept pointer, we don't have to do anything else
5003 if (vmcs12->ept_pointer != address) {
5004 if (!valid_ept_address(vcpu, address))
5007 kvm_mmu_unload(vcpu);
5008 mmu->ept_ad = accessed_dirty;
5009 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
5010 vmcs12->ept_pointer = address;
5012 * TODO: Check what's the correct approach in case
5013 * mmu reload fails. Currently, we just let the next
5014 * reload potentially fail
5016 kvm_mmu_reload(vcpu);
5022 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5024 struct vcpu_vmx *vmx = to_vmx(vcpu);
5025 struct vmcs12 *vmcs12;
5026 u32 function = kvm_rax_read(vcpu);
5029 * VMFUNC is only supported for nested guests, but we always enable the
5030 * secondary control for simplicity; for non-nested mode, fake that we
5031 * didn't by injecting #UD.
5033 if (!is_guest_mode(vcpu)) {
5034 kvm_queue_exception(vcpu, UD_VECTOR);
5038 vmcs12 = get_vmcs12(vcpu);
5039 if ((vmcs12->vm_function_control & (1 << function)) == 0)
5044 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5050 return kvm_skip_emulated_instruction(vcpu);
5053 nested_vmx_vmexit(vcpu, vmx->exit_reason,
5054 vmcs_read32(VM_EXIT_INTR_INFO),
5055 vmcs_readl(EXIT_QUALIFICATION));
5060 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5061 struct vmcs12 *vmcs12)
5063 unsigned long exit_qualification;
5064 gpa_t bitmap, last_bitmap;
5069 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5070 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5072 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5074 port = exit_qualification >> 16;
5075 size = (exit_qualification & 7) + 1;
5077 last_bitmap = (gpa_t)-1;
5082 bitmap = vmcs12->io_bitmap_a;
5083 else if (port < 0x10000)
5084 bitmap = vmcs12->io_bitmap_b;
5087 bitmap += (port & 0x7fff) / 8;
5089 if (last_bitmap != bitmap)
5090 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5092 if (b & (1 << (port & 7)))
5097 last_bitmap = bitmap;
5104 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5105 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5106 * disinterest in the current event (read or write a specific MSR) by using an
5107 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5109 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5110 struct vmcs12 *vmcs12, u32 exit_reason)
5112 u32 msr_index = kvm_rcx_read(vcpu);
5115 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5119 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5120 * for the four combinations of read/write and low/high MSR numbers.
5121 * First we need to figure out which of the four to use:
5123 bitmap = vmcs12->msr_bitmap;
5124 if (exit_reason == EXIT_REASON_MSR_WRITE)
5126 if (msr_index >= 0xc0000000) {
5127 msr_index -= 0xc0000000;
5131 /* Then read the msr_index'th bit from this bitmap: */
5132 if (msr_index < 1024*8) {
5134 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5136 return 1 & (b >> (msr_index & 7));
5138 return true; /* let L1 handle the wrong parameter */
5142 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5143 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5144 * intercept (via guest_host_mask etc.) the current event.
5146 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5147 struct vmcs12 *vmcs12)
5149 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5150 int cr = exit_qualification & 15;
5154 switch ((exit_qualification >> 4) & 3) {
5155 case 0: /* mov to cr */
5156 reg = (exit_qualification >> 8) & 15;
5157 val = kvm_register_readl(vcpu, reg);
5160 if (vmcs12->cr0_guest_host_mask &
5161 (val ^ vmcs12->cr0_read_shadow))
5165 if ((vmcs12->cr3_target_count >= 1 &&
5166 vmcs12->cr3_target_value0 == val) ||
5167 (vmcs12->cr3_target_count >= 2 &&
5168 vmcs12->cr3_target_value1 == val) ||
5169 (vmcs12->cr3_target_count >= 3 &&
5170 vmcs12->cr3_target_value2 == val) ||
5171 (vmcs12->cr3_target_count >= 4 &&
5172 vmcs12->cr3_target_value3 == val))
5174 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5178 if (vmcs12->cr4_guest_host_mask &
5179 (vmcs12->cr4_read_shadow ^ val))
5183 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5189 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5190 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5193 case 1: /* mov from cr */
5196 if (vmcs12->cpu_based_vm_exec_control &
5197 CPU_BASED_CR3_STORE_EXITING)
5201 if (vmcs12->cpu_based_vm_exec_control &
5202 CPU_BASED_CR8_STORE_EXITING)
5209 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5210 * cr0. Other attempted changes are ignored, with no exit.
5212 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5213 if (vmcs12->cr0_guest_host_mask & 0xe &
5214 (val ^ vmcs12->cr0_read_shadow))
5216 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5217 !(vmcs12->cr0_read_shadow & 0x1) &&
5225 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5226 struct vmcs12 *vmcs12, gpa_t bitmap)
5228 u32 vmx_instruction_info;
5229 unsigned long field;
5232 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5235 /* Decode instruction info and find the field to access */
5236 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5237 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5239 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5243 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5246 return 1 & (b >> (field & 7));
5250 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5251 * should handle it ourselves in L0 (and then continue L2). Only call this
5252 * when in is_guest_mode (L2).
5254 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5256 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5257 struct vcpu_vmx *vmx = to_vmx(vcpu);
5258 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5260 if (vmx->nested.nested_run_pending)
5263 if (unlikely(vmx->fail)) {
5264 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
5265 vmcs_read32(VM_INSTRUCTION_ERROR));
5270 * The host physical addresses of some pages of guest memory
5271 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5272 * Page). The CPU may write to these pages via their host
5273 * physical address while L2 is running, bypassing any
5274 * address-translation-based dirty tracking (e.g. EPT write
5277 * Mark them dirty on every exit from L2 to prevent them from
5278 * getting out of sync with dirty tracking.
5280 nested_mark_vmcs12_pages_dirty(vcpu);
5282 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5283 vmcs_readl(EXIT_QUALIFICATION),
5284 vmx->idt_vectoring_info,
5286 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5289 switch (exit_reason) {
5290 case EXIT_REASON_EXCEPTION_NMI:
5291 if (is_nmi(intr_info))
5293 else if (is_page_fault(intr_info))
5294 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5295 else if (is_debug(intr_info) &&
5297 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5299 else if (is_breakpoint(intr_info) &&
5300 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5302 return vmcs12->exception_bitmap &
5303 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5304 case EXIT_REASON_EXTERNAL_INTERRUPT:
5306 case EXIT_REASON_TRIPLE_FAULT:
5308 case EXIT_REASON_PENDING_INTERRUPT:
5309 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5310 case EXIT_REASON_NMI_WINDOW:
5311 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5312 case EXIT_REASON_TASK_SWITCH:
5314 case EXIT_REASON_CPUID:
5316 case EXIT_REASON_HLT:
5317 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5318 case EXIT_REASON_INVD:
5320 case EXIT_REASON_INVLPG:
5321 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5322 case EXIT_REASON_RDPMC:
5323 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5324 case EXIT_REASON_RDRAND:
5325 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5326 case EXIT_REASON_RDSEED:
5327 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5328 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5329 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5330 case EXIT_REASON_VMREAD:
5331 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5332 vmcs12->vmread_bitmap);
5333 case EXIT_REASON_VMWRITE:
5334 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5335 vmcs12->vmwrite_bitmap);
5336 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5337 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5338 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5339 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5340 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5342 * VMX instructions trap unconditionally. This allows L1 to
5343 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5346 case EXIT_REASON_CR_ACCESS:
5347 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5348 case EXIT_REASON_DR_ACCESS:
5349 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5350 case EXIT_REASON_IO_INSTRUCTION:
5351 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5352 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5353 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5354 case EXIT_REASON_MSR_READ:
5355 case EXIT_REASON_MSR_WRITE:
5356 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5357 case EXIT_REASON_INVALID_STATE:
5359 case EXIT_REASON_MWAIT_INSTRUCTION:
5360 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5361 case EXIT_REASON_MONITOR_TRAP_FLAG:
5362 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5363 case EXIT_REASON_MONITOR_INSTRUCTION:
5364 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5365 case EXIT_REASON_PAUSE_INSTRUCTION:
5366 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5367 nested_cpu_has2(vmcs12,
5368 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5369 case EXIT_REASON_MCE_DURING_VMENTRY:
5371 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5372 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5373 case EXIT_REASON_APIC_ACCESS:
5374 case EXIT_REASON_APIC_WRITE:
5375 case EXIT_REASON_EOI_INDUCED:
5377 * The controls for "virtualize APIC accesses," "APIC-
5378 * register virtualization," and "virtual-interrupt
5379 * delivery" only come from vmcs12.
5382 case EXIT_REASON_EPT_VIOLATION:
5384 * L0 always deals with the EPT violation. If nested EPT is
5385 * used, and the nested mmu code discovers that the address is
5386 * missing in the guest EPT table (EPT12), the EPT violation
5387 * will be injected with nested_ept_inject_page_fault()
5390 case EXIT_REASON_EPT_MISCONFIG:
5392 * L2 never uses directly L1's EPT, but rather L0's own EPT
5393 * table (shadow on EPT) or a merged EPT table that L0 built
5394 * (EPT on EPT). So any problems with the structure of the
5395 * table is L0's fault.
5398 case EXIT_REASON_INVPCID:
5400 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5401 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5402 case EXIT_REASON_WBINVD:
5403 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5404 case EXIT_REASON_XSETBV:
5406 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5408 * This should never happen, since it is not possible to
5409 * set XSS to a non-zero value---neither in L1 nor in L2.
5410 * If if it were, XSS would have to be checked against
5411 * the XSS exit bitmap in vmcs12.
5413 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5414 case EXIT_REASON_PREEMPTION_TIMER:
5416 case EXIT_REASON_PML_FULL:
5417 /* We emulate PML support to L1. */
5419 case EXIT_REASON_VMFUNC:
5420 /* VM functions are emulated through L2->L0 vmexits. */
5422 case EXIT_REASON_ENCLS:
5423 /* SGX is never exposed to L1 */
5431 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5432 struct kvm_nested_state __user *user_kvm_nested_state,
5435 struct vcpu_vmx *vmx;
5436 struct vmcs12 *vmcs12;
5437 struct kvm_nested_state kvm_state = {
5439 .format = KVM_STATE_NESTED_FORMAT_VMX,
5440 .size = sizeof(kvm_state),
5441 .hdr.vmx.vmxon_pa = -1ull,
5442 .hdr.vmx.vmcs12_pa = -1ull,
5444 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5445 &user_kvm_nested_state->data.vmx[0];
5448 return kvm_state.size + sizeof(*user_vmx_nested_state);
5451 vmcs12 = get_vmcs12(vcpu);
5453 if (nested_vmx_allowed(vcpu) &&
5454 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5455 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5456 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
5458 if (vmx_has_valid_vmcs12(vcpu)) {
5459 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
5461 if (vmx->nested.hv_evmcs)
5462 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5464 if (is_guest_mode(vcpu) &&
5465 nested_cpu_has_shadow_vmcs(vmcs12) &&
5466 vmcs12->vmcs_link_pointer != -1ull)
5467 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
5470 if (vmx->nested.smm.vmxon)
5471 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5473 if (vmx->nested.smm.guest_mode)
5474 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5476 if (is_guest_mode(vcpu)) {
5477 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5479 if (vmx->nested.nested_run_pending)
5480 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5484 if (user_data_size < kvm_state.size)
5487 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5490 if (!vmx_has_valid_vmcs12(vcpu))
5494 * When running L2, the authoritative vmcs12 state is in the
5495 * vmcs02. When running L1, the authoritative vmcs12 state is
5496 * in the shadow or enlightened vmcs linked to vmcs01, unless
5497 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
5498 * vmcs12 state is in the vmcs12 already.
5500 if (is_guest_mode(vcpu)) {
5501 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
5502 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5503 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
5504 if (vmx->nested.hv_evmcs)
5505 copy_enlightened_to_vmcs12(vmx);
5506 else if (enable_shadow_vmcs)
5507 copy_shadow_to_vmcs12(vmx);
5510 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
5511 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
5514 * Copy over the full allocated size of vmcs12 rather than just the size
5517 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
5520 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5521 vmcs12->vmcs_link_pointer != -1ull) {
5522 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
5523 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5528 return kvm_state.size;
5532 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5534 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5536 if (is_guest_mode(vcpu)) {
5537 to_vmx(vcpu)->nested.nested_run_pending = 0;
5538 nested_vmx_vmexit(vcpu, -1, 0, 0);
5543 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5544 struct kvm_nested_state __user *user_kvm_nested_state,
5545 struct kvm_nested_state *kvm_state)
5547 struct vcpu_vmx *vmx = to_vmx(vcpu);
5548 struct vmcs12 *vmcs12;
5550 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5551 &user_kvm_nested_state->data.vmx[0];
5554 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
5557 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
5558 if (kvm_state->hdr.vmx.smm.flags)
5561 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
5565 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
5566 * enable eVMCS capability on vCPU. However, since then
5567 * code was changed such that flag signals vmcs12 should
5568 * be copied into eVMCS in guest memory.
5570 * To preserve backwards compatability, allow user
5571 * to set this flag even when there is no VMXON region.
5573 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
5576 if (!nested_vmx_allowed(vcpu))
5579 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
5583 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5584 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5587 if (kvm_state->hdr.vmx.smm.flags &
5588 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5592 * SMM temporarily disables VMX, so we cannot be in guest mode,
5593 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5598 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
5599 : kvm_state->hdr.vmx.smm.flags)
5602 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5603 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5606 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
5607 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
5610 vmx_leave_nested(vcpu);
5612 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
5615 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
5616 ret = enter_vmx_operation(vcpu);
5620 /* Empty 'VMXON' state is permitted */
5621 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
5624 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
5625 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
5626 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
5629 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
5630 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5632 * Sync eVMCS upon entry as we may not have
5633 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5635 vmx->nested.need_vmcs12_to_shadow_sync = true;
5640 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5641 vmx->nested.smm.vmxon = true;
5642 vmx->nested.vmxon = false;
5644 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5645 vmx->nested.smm.guest_mode = true;
5648 vmcs12 = get_vmcs12(vcpu);
5649 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
5652 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5655 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5658 vmx->nested.nested_run_pending =
5659 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5662 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5663 vmcs12->vmcs_link_pointer != -1ull) {
5664 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5666 if (kvm_state->size <
5667 sizeof(*kvm_state) +
5668 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
5669 goto error_guest_mode;
5671 if (copy_from_user(shadow_vmcs12,
5672 user_vmx_nested_state->shadow_vmcs12,
5673 sizeof(*shadow_vmcs12))) {
5675 goto error_guest_mode;
5678 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5679 !shadow_vmcs12->hdr.shadow_vmcs)
5680 goto error_guest_mode;
5683 if (nested_vmx_check_controls(vcpu, vmcs12) ||
5684 nested_vmx_check_host_state(vcpu, vmcs12) ||
5685 nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
5686 goto error_guest_mode;
5688 vmx->nested.dirty_vmcs12 = true;
5689 ret = nested_vmx_enter_non_root_mode(vcpu, false);
5691 goto error_guest_mode;
5696 vmx->nested.nested_run_pending = 0;
5700 void nested_vmx_vcpu_setup(void)
5702 if (enable_shadow_vmcs) {
5703 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5704 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
5709 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5710 * returned for the various VMX controls MSRs when nested VMX is enabled.
5711 * The same values should also be used to verify that vmcs12 control fields are
5712 * valid during nested entry from L1 to L2.
5713 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5714 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5715 * bit in the high half is on if the corresponding bit in the control field
5716 * may be on. See also vmx_control_verify().
5718 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5722 * Note that as a general rule, the high half of the MSRs (bits in
5723 * the control fields which may be 1) should be initialized by the
5724 * intersection of the underlying hardware's MSR (i.e., features which
5725 * can be supported) and the list of features we want to expose -
5726 * because they are known to be properly supported in our code.
5727 * Also, usually, the low half of the MSRs (bits which must be 1) can
5728 * be set to 0, meaning that L1 may turn off any of these bits. The
5729 * reason is that if one of these bits is necessary, it will appear
5730 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5731 * fields of vmcs01 and vmcs02, will turn these bits off - and
5732 * nested_vmx_exit_reflected() will not pass related exits to L1.
5733 * These rules have exceptions below.
5736 /* pin-based controls */
5737 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5738 msrs->pinbased_ctls_low,
5739 msrs->pinbased_ctls_high);
5740 msrs->pinbased_ctls_low |=
5741 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5742 msrs->pinbased_ctls_high &=
5743 PIN_BASED_EXT_INTR_MASK |
5744 PIN_BASED_NMI_EXITING |
5745 PIN_BASED_VIRTUAL_NMIS |
5746 (apicv ? PIN_BASED_POSTED_INTR : 0);
5747 msrs->pinbased_ctls_high |=
5748 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5749 PIN_BASED_VMX_PREEMPTION_TIMER;
5752 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5753 msrs->exit_ctls_low,
5754 msrs->exit_ctls_high);
5755 msrs->exit_ctls_low =
5756 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5758 msrs->exit_ctls_high &=
5759 #ifdef CONFIG_X86_64
5760 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5762 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5763 msrs->exit_ctls_high |=
5764 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5765 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5766 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5768 /* We support free control of debug control saving. */
5769 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5771 /* entry controls */
5772 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5773 msrs->entry_ctls_low,
5774 msrs->entry_ctls_high);
5775 msrs->entry_ctls_low =
5776 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5777 msrs->entry_ctls_high &=
5778 #ifdef CONFIG_X86_64
5779 VM_ENTRY_IA32E_MODE |
5781 VM_ENTRY_LOAD_IA32_PAT;
5782 msrs->entry_ctls_high |=
5783 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5785 /* We support free control of debug control loading. */
5786 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5788 /* cpu-based controls */
5789 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5790 msrs->procbased_ctls_low,
5791 msrs->procbased_ctls_high);
5792 msrs->procbased_ctls_low =
5793 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5794 msrs->procbased_ctls_high &=
5795 CPU_BASED_VIRTUAL_INTR_PENDING |
5796 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5797 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5798 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5799 CPU_BASED_CR3_STORE_EXITING |
5800 #ifdef CONFIG_X86_64
5801 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5803 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5804 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5805 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5806 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5807 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5809 * We can allow some features even when not supported by the
5810 * hardware. For example, L1 can specify an MSR bitmap - and we
5811 * can use it to avoid exits to L1 - even when L0 runs L2
5812 * without MSR bitmaps.
5814 msrs->procbased_ctls_high |=
5815 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5816 CPU_BASED_USE_MSR_BITMAPS;
5818 /* We support free control of CR3 access interception. */
5819 msrs->procbased_ctls_low &=
5820 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5823 * secondary cpu-based controls. Do not include those that
5824 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5826 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5827 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5828 msrs->secondary_ctls_low,
5829 msrs->secondary_ctls_high);
5831 msrs->secondary_ctls_low = 0;
5832 msrs->secondary_ctls_high &=
5833 SECONDARY_EXEC_DESC |
5834 SECONDARY_EXEC_RDTSCP |
5835 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5836 SECONDARY_EXEC_WBINVD_EXITING |
5837 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5838 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5839 SECONDARY_EXEC_RDRAND_EXITING |
5840 SECONDARY_EXEC_ENABLE_INVPCID |
5841 SECONDARY_EXEC_RDSEED_EXITING |
5842 SECONDARY_EXEC_XSAVES;
5845 * We can emulate "VMCS shadowing," even if the hardware
5846 * doesn't support it.
5848 msrs->secondary_ctls_high |=
5849 SECONDARY_EXEC_SHADOW_VMCS;
5852 /* nested EPT: emulate EPT also to L1 */
5853 msrs->secondary_ctls_high |=
5854 SECONDARY_EXEC_ENABLE_EPT;
5855 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5856 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5857 if (cpu_has_vmx_ept_execute_only())
5859 VMX_EPT_EXECUTE_ONLY_BIT;
5860 msrs->ept_caps &= ept_caps;
5861 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5862 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5863 VMX_EPT_1GB_PAGE_BIT;
5864 if (enable_ept_ad_bits) {
5865 msrs->secondary_ctls_high |=
5866 SECONDARY_EXEC_ENABLE_PML;
5867 msrs->ept_caps |= VMX_EPT_AD_BIT;
5871 if (cpu_has_vmx_vmfunc()) {
5872 msrs->secondary_ctls_high |=
5873 SECONDARY_EXEC_ENABLE_VMFUNC;
5875 * Advertise EPTP switching unconditionally
5876 * since we emulate it
5879 msrs->vmfunc_controls =
5880 VMX_VMFUNC_EPTP_SWITCHING;
5884 * Old versions of KVM use the single-context version without
5885 * checking for support, so declare that it is supported even
5886 * though it is treated as global context. The alternative is
5887 * not failing the single-context invvpid, and it is worse.
5890 msrs->secondary_ctls_high |=
5891 SECONDARY_EXEC_ENABLE_VPID;
5892 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5893 VMX_VPID_EXTENT_SUPPORTED_MASK;
5896 if (enable_unrestricted_guest)
5897 msrs->secondary_ctls_high |=
5898 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5900 if (flexpriority_enabled)
5901 msrs->secondary_ctls_high |=
5902 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5904 /* miscellaneous data */
5905 rdmsr(MSR_IA32_VMX_MISC,
5908 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5910 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5911 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5912 VMX_MISC_ACTIVITY_HLT;
5913 msrs->misc_high = 0;
5916 * This MSR reports some information about VMX support. We
5917 * should return information about the VMX we emulate for the
5918 * guest, and the VMCS structure we give it - not about the
5919 * VMX support of the underlying hardware.
5923 VMX_BASIC_TRUE_CTLS |
5924 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5925 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5927 if (cpu_has_vmx_basic_inout())
5928 msrs->basic |= VMX_BASIC_INOUT;
5931 * These MSRs specify bits which the guest must keep fixed on
5932 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5933 * We picked the standard core2 setting.
5935 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5936 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5937 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5938 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5940 /* These MSRs specify bits which the guest must keep fixed off. */
5941 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5942 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5944 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5945 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5948 void nested_vmx_hardware_unsetup(void)
5952 if (enable_shadow_vmcs) {
5953 for (i = 0; i < VMX_BITMAP_NR; i++)
5954 free_page((unsigned long)vmx_bitmap[i]);
5958 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5962 if (!cpu_has_vmx_shadow_vmcs())
5963 enable_shadow_vmcs = 0;
5964 if (enable_shadow_vmcs) {
5965 for (i = 0; i < VMX_BITMAP_NR; i++) {
5967 * The vmx_bitmap is not tied to a VM and so should
5968 * not be charged to a memcg.
5970 vmx_bitmap[i] = (unsigned long *)
5971 __get_free_page(GFP_KERNEL);
5972 if (!vmx_bitmap[i]) {
5973 nested_vmx_hardware_unsetup();
5978 init_vmcs_shadow_fields();
5981 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
5982 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
5983 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
5984 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
5985 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
5986 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
5987 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
5988 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
5989 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
5990 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
5991 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
5992 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
5994 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5995 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5996 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5997 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5998 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
5999 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;