1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
17 static bool __read_mostly enable_shadow_vmcs = 1;
18 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
20 static bool __read_mostly nested_early_check = 0;
21 module_param(nested_early_check, bool, S_IRUGO);
23 #define CC(consistency_check) \
25 bool failed = (consistency_check); \
27 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
32 * Hyper-V requires all of these, so mark them as supported even though
33 * they are just treated the same as all-context.
35 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
36 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
37 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
38 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
39 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
41 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
48 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
50 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
51 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
53 struct shadow_vmcs_field {
57 static struct shadow_vmcs_field shadow_read_only_fields[] = {
58 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
59 #include "vmcs_shadow_fields.h"
61 static int max_shadow_read_only_fields =
62 ARRAY_SIZE(shadow_read_only_fields);
64 static struct shadow_vmcs_field shadow_read_write_fields[] = {
65 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
66 #include "vmcs_shadow_fields.h"
68 static int max_shadow_read_write_fields =
69 ARRAY_SIZE(shadow_read_write_fields);
71 static void init_vmcs_shadow_fields(void)
75 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
76 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
78 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
79 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
80 u16 field = entry.encoding;
82 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
83 (i + 1 == max_shadow_read_only_fields ||
84 shadow_read_only_fields[i + 1].encoding != field + 1))
85 pr_err("Missing field from shadow_read_only_field %x\n",
88 clear_bit(field, vmx_vmread_bitmap);
93 entry.offset += sizeof(u32);
95 shadow_read_only_fields[j++] = entry;
97 max_shadow_read_only_fields = j;
99 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
100 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
101 u16 field = entry.encoding;
103 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
104 (i + 1 == max_shadow_read_write_fields ||
105 shadow_read_write_fields[i + 1].encoding != field + 1))
106 pr_err("Missing field from shadow_read_write_field %x\n",
109 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
110 field <= GUEST_TR_AR_BYTES,
111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
114 * PML and the preemption timer can be emulated, but the
115 * processor cannot vmwrite to fields that don't exist
119 case GUEST_PML_INDEX:
120 if (!cpu_has_vmx_pml())
123 case VMX_PREEMPTION_TIMER_VALUE:
124 if (!cpu_has_vmx_preemption_timer())
127 case GUEST_INTR_STATUS:
128 if (!cpu_has_vmx_apicv())
135 clear_bit(field, vmx_vmwrite_bitmap);
136 clear_bit(field, vmx_vmread_bitmap);
141 entry.offset += sizeof(u32);
143 shadow_read_write_fields[j++] = entry;
145 max_shadow_read_write_fields = j;
149 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
150 * set the success or error code of an emulated VMX instruction (as specified
151 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
154 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
156 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
157 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
158 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
159 return kvm_skip_emulated_instruction(vcpu);
162 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
165 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
166 X86_EFLAGS_SF | X86_EFLAGS_OF))
168 return kvm_skip_emulated_instruction(vcpu);
171 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
172 u32 vm_instruction_error)
174 struct vcpu_vmx *vmx = to_vmx(vcpu);
177 * failValid writes the error number to the current VMCS, which
178 * can't be done if there isn't a current VMCS.
180 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
181 return nested_vmx_failInvalid(vcpu);
183 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
184 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
185 X86_EFLAGS_SF | X86_EFLAGS_OF))
187 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
189 * We don't need to force a shadow sync because
190 * VM_INSTRUCTION_ERROR is not shadowed
192 return kvm_skip_emulated_instruction(vcpu);
195 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
197 /* TODO: not to reset guest simply here. */
198 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
199 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
202 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
204 return fixed_bits_valid(control, low, high);
207 static inline u64 vmx_control_msr(u32 low, u32 high)
209 return low | ((u64)high << 32);
212 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
214 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
215 vmcs_write64(VMCS_LINK_POINTER, -1ull);
216 vmx->nested.need_vmcs12_to_shadow_sync = false;
219 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
221 struct vcpu_vmx *vmx = to_vmx(vcpu);
223 if (!vmx->nested.hv_evmcs)
226 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
227 vmx->nested.hv_evmcs_vmptr = 0;
228 vmx->nested.hv_evmcs = NULL;
232 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
233 * just stops using VMX.
235 static void free_nested(struct kvm_vcpu *vcpu)
237 struct vcpu_vmx *vmx = to_vmx(vcpu);
239 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
242 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
244 vmx->nested.vmxon = false;
245 vmx->nested.smm.vmxon = false;
246 free_vpid(vmx->nested.vpid02);
247 vmx->nested.posted_intr_nv = -1;
248 vmx->nested.current_vmptr = -1ull;
249 if (enable_shadow_vmcs) {
250 vmx_disable_shadow_vmcs(vmx);
251 vmcs_clear(vmx->vmcs01.shadow_vmcs);
252 free_vmcs(vmx->vmcs01.shadow_vmcs);
253 vmx->vmcs01.shadow_vmcs = NULL;
255 kfree(vmx->nested.cached_vmcs12);
256 vmx->nested.cached_vmcs12 = NULL;
257 kfree(vmx->nested.cached_shadow_vmcs12);
258 vmx->nested.cached_shadow_vmcs12 = NULL;
259 /* Unpin physical memory we referred to in the vmcs02 */
260 if (vmx->nested.apic_access_page) {
261 kvm_release_page_clean(vmx->nested.apic_access_page);
262 vmx->nested.apic_access_page = NULL;
264 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
265 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
266 vmx->nested.pi_desc = NULL;
268 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
270 nested_release_evmcs(vcpu);
272 free_loaded_vmcs(&vmx->nested.vmcs02);
275 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
276 struct loaded_vmcs *prev)
278 struct vmcs_host_state *dest, *src;
280 if (unlikely(!vmx->guest_state_loaded))
283 src = &prev->host_state;
284 dest = &vmx->loaded_vmcs->host_state;
286 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
287 dest->ldt_sel = src->ldt_sel;
289 dest->ds_sel = src->ds_sel;
290 dest->es_sel = src->es_sel;
294 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
296 struct vcpu_vmx *vmx = to_vmx(vcpu);
297 struct loaded_vmcs *prev;
300 if (vmx->loaded_vmcs == vmcs)
304 prev = vmx->loaded_vmcs;
305 vmx->loaded_vmcs = vmcs;
306 vmx_vcpu_load_vmcs(vcpu, cpu);
307 vmx_sync_vmcs_host_state(vmx, prev);
310 vmx_segment_cache_clear(vmx);
314 * Ensure that the current vmcs of the logical processor is the
315 * vmcs01 of the vcpu before calling free_nested().
317 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
320 vmx_leave_nested(vcpu);
321 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
326 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
327 struct x86_exception *fault)
329 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
330 struct vcpu_vmx *vmx = to_vmx(vcpu);
332 unsigned long exit_qualification = vcpu->arch.exit_qualification;
334 if (vmx->nested.pml_full) {
335 exit_reason = EXIT_REASON_PML_FULL;
336 vmx->nested.pml_full = false;
337 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
338 } else if (fault->error_code & PFERR_RSVD_MASK)
339 exit_reason = EXIT_REASON_EPT_MISCONFIG;
341 exit_reason = EXIT_REASON_EPT_VIOLATION;
343 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
344 vmcs12->guest_physical_address = fault->address;
347 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
349 WARN_ON(mmu_is_nested(vcpu));
351 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
352 kvm_init_shadow_ept_mmu(vcpu,
353 to_vmx(vcpu)->nested.msrs.ept_caps &
354 VMX_EPT_EXECUTE_ONLY_BIT,
355 nested_ept_ad_enabled(vcpu),
356 nested_ept_get_eptp(vcpu));
357 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
358 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
359 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
361 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
364 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
366 vcpu->arch.mmu = &vcpu->arch.root_mmu;
367 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
370 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
373 bool inequality, bit;
375 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
377 (error_code & vmcs12->page_fault_error_code_mask) !=
378 vmcs12->page_fault_error_code_match;
379 return inequality ^ bit;
384 * KVM wants to inject page-faults which it got to the guest. This function
385 * checks whether in a nested guest, we need to inject them to L1 or L2.
387 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
389 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
390 unsigned int nr = vcpu->arch.exception.nr;
391 bool has_payload = vcpu->arch.exception.has_payload;
392 unsigned long payload = vcpu->arch.exception.payload;
394 if (nr == PF_VECTOR) {
395 if (vcpu->arch.exception.nested_apf) {
396 *exit_qual = vcpu->arch.apf.nested_apf_token;
399 if (nested_vmx_is_page_fault_vmexit(vmcs12,
400 vcpu->arch.exception.error_code)) {
401 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
404 } else if (vmcs12->exception_bitmap & (1u << nr)) {
405 if (nr == DB_VECTOR) {
407 payload = vcpu->arch.dr6;
408 payload &= ~(DR6_FIXED_1 | DR6_BT);
411 *exit_qual = payload;
421 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
422 struct x86_exception *fault)
424 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
426 WARN_ON(!is_guest_mode(vcpu));
428 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
429 !to_vmx(vcpu)->nested.nested_run_pending) {
430 vmcs12->vm_exit_intr_error_code = fault->error_code;
431 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
432 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
433 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
436 kvm_inject_page_fault(vcpu, fault);
440 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
442 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
445 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
446 struct vmcs12 *vmcs12)
448 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
451 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
452 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
458 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
459 struct vmcs12 *vmcs12)
461 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
464 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
470 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
471 struct vmcs12 *vmcs12)
473 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
476 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
483 * Check if MSR is intercepted for L01 MSR bitmap.
485 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
487 unsigned long *msr_bitmap;
488 int f = sizeof(unsigned long);
490 if (!cpu_has_vmx_msr_bitmap())
493 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
496 return !!test_bit(msr, msr_bitmap + 0x800 / f);
497 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
499 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
506 * If a msr is allowed by L0, we should check whether it is allowed by L1.
507 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
509 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
510 unsigned long *msr_bitmap_nested,
513 int f = sizeof(unsigned long);
516 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
517 * have the write-low and read-high bitmap offsets the wrong way round.
518 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
521 if (type & MSR_TYPE_R &&
522 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
524 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
526 if (type & MSR_TYPE_W &&
527 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
529 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
531 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
533 if (type & MSR_TYPE_R &&
534 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
536 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
538 if (type & MSR_TYPE_W &&
539 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
541 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
546 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
550 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
551 unsigned word = msr / BITS_PER_LONG;
553 msr_bitmap[word] = ~0;
554 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
559 * Merge L0's and L1's MSR bitmap, return false to indicate that
560 * we do not use the hardware.
562 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
563 struct vmcs12 *vmcs12)
566 unsigned long *msr_bitmap_l1;
567 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
568 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
570 /* Nothing to do if the MSR bitmap is not in use. */
571 if (!cpu_has_vmx_msr_bitmap() ||
572 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
575 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
578 msr_bitmap_l1 = (unsigned long *)map->hva;
581 * To keep the control flow simple, pay eight 8-byte writes (sixteen
582 * 4-byte writes on 32-bit systems) up front to enable intercepts for
583 * the x2APIC MSR range and selectively disable them below.
585 enable_x2apic_msr_intercepts(msr_bitmap_l0);
587 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
588 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
590 * L0 need not intercept reads for MSRs between 0x800
591 * and 0x8ff, it just lets the processor take the value
592 * from the virtual-APIC page; take those 256 bits
593 * directly from the L1 bitmap.
595 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
596 unsigned word = msr / BITS_PER_LONG;
598 msr_bitmap_l0[word] = msr_bitmap_l1[word];
602 nested_vmx_disable_intercept_for_msr(
603 msr_bitmap_l1, msr_bitmap_l0,
604 X2APIC_MSR(APIC_TASKPRI),
605 MSR_TYPE_R | MSR_TYPE_W);
607 if (nested_cpu_has_vid(vmcs12)) {
608 nested_vmx_disable_intercept_for_msr(
609 msr_bitmap_l1, msr_bitmap_l0,
610 X2APIC_MSR(APIC_EOI),
612 nested_vmx_disable_intercept_for_msr(
613 msr_bitmap_l1, msr_bitmap_l0,
614 X2APIC_MSR(APIC_SELF_IPI),
619 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
620 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
621 MSR_FS_BASE, MSR_TYPE_RW);
623 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
624 MSR_GS_BASE, MSR_TYPE_RW);
626 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
627 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
630 * Checking the L0->L1 bitmap is trying to verify two things:
632 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
633 * ensures that we do not accidentally generate an L02 MSR bitmap
634 * from the L12 MSR bitmap that is too permissive.
635 * 2. That L1 or L2s have actually used the MSR. This avoids
636 * unnecessarily merging of the bitmap if the MSR is unused. This
637 * works properly because we only update the L01 MSR bitmap lazily.
638 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
639 * updated to reflect this when L1 (or its L2s) actually write to
642 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
643 nested_vmx_disable_intercept_for_msr(
644 msr_bitmap_l1, msr_bitmap_l0,
646 MSR_TYPE_R | MSR_TYPE_W);
648 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
649 nested_vmx_disable_intercept_for_msr(
650 msr_bitmap_l1, msr_bitmap_l0,
654 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
659 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
660 struct vmcs12 *vmcs12)
662 struct kvm_host_map map;
663 struct vmcs12 *shadow;
665 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
666 vmcs12->vmcs_link_pointer == -1ull)
669 shadow = get_shadow_vmcs12(vcpu);
671 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
674 memcpy(shadow, map.hva, VMCS12_SIZE);
675 kvm_vcpu_unmap(vcpu, &map, false);
678 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
679 struct vmcs12 *vmcs12)
681 struct vcpu_vmx *vmx = to_vmx(vcpu);
683 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
684 vmcs12->vmcs_link_pointer == -1ull)
687 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
688 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
692 * In nested virtualization, check if L1 has set
693 * VM_EXIT_ACK_INTR_ON_EXIT
695 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
697 return get_vmcs12(vcpu)->vm_exit_controls &
698 VM_EXIT_ACK_INTR_ON_EXIT;
701 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
703 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
706 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
707 struct vmcs12 *vmcs12)
709 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
710 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
716 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
717 struct vmcs12 *vmcs12)
719 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
720 !nested_cpu_has_apic_reg_virt(vmcs12) &&
721 !nested_cpu_has_vid(vmcs12) &&
722 !nested_cpu_has_posted_intr(vmcs12))
726 * If virtualize x2apic mode is enabled,
727 * virtualize apic access must be disabled.
729 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
730 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
734 * If virtual interrupt delivery is enabled,
735 * we must exit on external interrupts.
737 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
741 * bits 15:8 should be zero in posted_intr_nv,
742 * the descriptor address has been already checked
743 * in nested_get_vmcs12_pages.
745 * bits 5:0 of posted_intr_desc_addr should be zero.
747 if (nested_cpu_has_posted_intr(vmcs12) &&
748 (CC(!nested_cpu_has_vid(vmcs12)) ||
749 CC(!nested_exit_intr_ack_set(vcpu)) ||
750 CC((vmcs12->posted_intr_nv & 0xff00)) ||
751 CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
752 CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
755 /* tpr shadow is needed by all apicv features. */
756 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
762 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
769 maxphyaddr = cpuid_maxphyaddr(vcpu);
770 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
771 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
777 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
778 struct vmcs12 *vmcs12)
780 if (CC(nested_vmx_check_msr_switch(vcpu,
781 vmcs12->vm_exit_msr_load_count,
782 vmcs12->vm_exit_msr_load_addr)) ||
783 CC(nested_vmx_check_msr_switch(vcpu,
784 vmcs12->vm_exit_msr_store_count,
785 vmcs12->vm_exit_msr_store_addr)))
791 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
792 struct vmcs12 *vmcs12)
794 if (CC(nested_vmx_check_msr_switch(vcpu,
795 vmcs12->vm_entry_msr_load_count,
796 vmcs12->vm_entry_msr_load_addr)))
802 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
803 struct vmcs12 *vmcs12)
805 if (!nested_cpu_has_pml(vmcs12))
808 if (CC(!nested_cpu_has_ept(vmcs12)) ||
809 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
815 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
816 struct vmcs12 *vmcs12)
818 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
819 !nested_cpu_has_ept(vmcs12)))
824 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
825 struct vmcs12 *vmcs12)
827 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
828 !nested_cpu_has_ept(vmcs12)))
833 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
834 struct vmcs12 *vmcs12)
836 if (!nested_cpu_has_shadow_vmcs(vmcs12))
839 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
840 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
846 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
847 struct vmx_msr_entry *e)
849 /* x2APIC MSR accesses are not allowed */
850 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
852 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
853 CC(e->index == MSR_IA32_UCODE_REV))
855 if (CC(e->reserved != 0))
860 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
861 struct vmx_msr_entry *e)
863 if (CC(e->index == MSR_FS_BASE) ||
864 CC(e->index == MSR_GS_BASE) ||
865 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
866 nested_vmx_msr_check_common(vcpu, e))
871 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
872 struct vmx_msr_entry *e)
874 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
875 nested_vmx_msr_check_common(vcpu, e))
880 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
882 struct vcpu_vmx *vmx = to_vmx(vcpu);
883 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
884 vmx->nested.msrs.misc_high);
886 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
890 * Load guest's/host's msr at nested entry/exit.
891 * return 0 for success, entry index for failure.
893 * One of the failure modes for MSR load/store is when a list exceeds the
894 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
895 * as possible, process all valid entries before failing rather than precheck
896 * for a capacity violation.
898 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
901 struct vmx_msr_entry e;
902 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
904 for (i = 0; i < count; i++) {
905 if (unlikely(i >= max_msr_list_size))
908 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
910 pr_debug_ratelimited(
911 "%s cannot read MSR entry (%u, 0x%08llx)\n",
912 __func__, i, gpa + i * sizeof(e));
915 if (nested_vmx_load_msr_check(vcpu, &e)) {
916 pr_debug_ratelimited(
917 "%s check failed (%u, 0x%x, 0x%x)\n",
918 __func__, i, e.index, e.reserved);
921 if (kvm_set_msr(vcpu, e.index, e.value)) {
922 pr_debug_ratelimited(
923 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
924 __func__, i, e.index, e.value);
933 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
937 struct vcpu_vmx *vmx = to_vmx(vcpu);
940 * If the L0 hypervisor stored a more accurate value for the TSC that
941 * does not include the time taken for emulation of the L2->L1
942 * VM-exit in L0, use the more accurate value.
944 if (msr_index == MSR_IA32_TSC) {
945 int index = vmx_find_msr_index(&vmx->msr_autostore.guest,
949 u64 val = vmx->msr_autostore.guest.val[index].value;
951 *data = kvm_read_l1_tsc(vcpu, val);
956 if (kvm_get_msr(vcpu, msr_index, data)) {
957 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
964 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
965 struct vmx_msr_entry *e)
967 if (kvm_vcpu_read_guest(vcpu,
968 gpa + i * sizeof(*e),
969 e, 2 * sizeof(u32))) {
970 pr_debug_ratelimited(
971 "%s cannot read MSR entry (%u, 0x%08llx)\n",
972 __func__, i, gpa + i * sizeof(*e));
975 if (nested_vmx_store_msr_check(vcpu, e)) {
976 pr_debug_ratelimited(
977 "%s check failed (%u, 0x%x, 0x%x)\n",
978 __func__, i, e->index, e->reserved);
984 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
988 struct vmx_msr_entry e;
989 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
991 for (i = 0; i < count; i++) {
992 if (unlikely(i >= max_msr_list_size))
995 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
998 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
1001 if (kvm_vcpu_write_guest(vcpu,
1002 gpa + i * sizeof(e) +
1003 offsetof(struct vmx_msr_entry, value),
1004 &data, sizeof(data))) {
1005 pr_debug_ratelimited(
1006 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1007 __func__, i, e.index, data);
1014 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1016 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1017 u32 count = vmcs12->vm_exit_msr_store_count;
1018 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1019 struct vmx_msr_entry e;
1022 for (i = 0; i < count; i++) {
1023 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1026 if (e.index == msr_index)
1032 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1035 struct vcpu_vmx *vmx = to_vmx(vcpu);
1036 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1037 bool in_vmcs12_store_list;
1038 int msr_autostore_index;
1039 bool in_autostore_list;
1042 msr_autostore_index = vmx_find_msr_index(autostore, msr_index);
1043 in_autostore_list = msr_autostore_index >= 0;
1044 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1046 if (in_vmcs12_store_list && !in_autostore_list) {
1047 if (autostore->nr == NR_LOADSTORE_MSRS) {
1049 * Emulated VMEntry does not fail here. Instead a less
1050 * accurate value will be returned by
1051 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1052 * instead of reading the value from the vmcs02 VMExit
1055 pr_warn_ratelimited(
1056 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1060 last = autostore->nr++;
1061 autostore->val[last].index = msr_index;
1062 } else if (!in_vmcs12_store_list && in_autostore_list) {
1063 last = --autostore->nr;
1064 autostore->val[msr_autostore_index] = autostore->val[last];
1068 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
1070 unsigned long invalid_mask;
1072 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
1073 return (val & invalid_mask) == 0;
1077 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1078 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1079 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1080 * @entry_failure_code.
1082 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
1083 u32 *entry_failure_code)
1085 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
1086 if (CC(!nested_cr3_valid(vcpu, cr3))) {
1087 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1092 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1093 * must not be dereferenced.
1095 if (is_pae_paging(vcpu) && !nested_ept) {
1096 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
1097 *entry_failure_code = ENTRY_FAIL_PDPTE;
1104 kvm_mmu_new_cr3(vcpu, cr3, false, false);
1106 vcpu->arch.cr3 = cr3;
1107 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
1109 kvm_init_mmu(vcpu, false);
1115 * Returns if KVM is able to config CPU to tag TLB entries
1116 * populated by L2 differently than TLB entries populated
1119 * If L0 uses EPT, L1 and L2 run with different EPTP because
1120 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1121 * are tagged with different EPTP.
1123 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1124 * with different VPID (L1 entries are tagged with vmx->vpid
1125 * while L2 entries are tagged with vmx->nested.vpid02).
1127 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1129 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1131 return enable_ept ||
1132 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1135 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1136 struct vmcs12 *vmcs12,
1139 struct vcpu_vmx *vmx = to_vmx(vcpu);
1142 * If VPID is disabled, linear and combined mappings are flushed on
1143 * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
1144 * their associated EPTP.
1150 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1151 * for *all* contexts to be flushed on VM-Enter/VM-Exit.
1153 * If VPID is enabled and used by vmc12, but L2 does not have a unique
1154 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
1155 * a VPID for L2, flush the current context as the effective ASID is
1156 * common to both L1 and L2.
1158 * Defer the flush so that it runs after vmcs02.EPTP has been set by
1159 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1160 * redundant flushes further down the nested pipeline.
1162 * If a TLB flush isn't required due to any of the above, and vpid12 is
1163 * changing then the new "virtual" VPID (vpid12) will reuse the same
1164 * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
1165 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
1168 if (!nested_cpu_has_vpid(vmcs12)) {
1169 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1170 } else if (!nested_has_guest_tlb_tag(vcpu)) {
1171 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1172 } else if (is_vmenter &&
1173 vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1174 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1175 vpid_sync_context(nested_get_vpid02(vcpu));
1179 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1184 return (superset | subset) == superset;
1187 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1189 const u64 feature_and_reserved =
1190 /* feature (except bit 48; see below) */
1191 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1193 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1194 u64 vmx_basic = vmx->nested.msrs.basic;
1196 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1200 * KVM does not emulate a version of VMX that constrains physical
1201 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1203 if (data & BIT_ULL(48))
1206 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1207 vmx_basic_vmcs_revision_id(data))
1210 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1213 vmx->nested.msrs.basic = data;
1218 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1223 switch (msr_index) {
1224 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1225 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1226 highp = &vmx->nested.msrs.pinbased_ctls_high;
1228 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1229 lowp = &vmx->nested.msrs.procbased_ctls_low;
1230 highp = &vmx->nested.msrs.procbased_ctls_high;
1232 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1233 lowp = &vmx->nested.msrs.exit_ctls_low;
1234 highp = &vmx->nested.msrs.exit_ctls_high;
1236 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1237 lowp = &vmx->nested.msrs.entry_ctls_low;
1238 highp = &vmx->nested.msrs.entry_ctls_high;
1240 case MSR_IA32_VMX_PROCBASED_CTLS2:
1241 lowp = &vmx->nested.msrs.secondary_ctls_low;
1242 highp = &vmx->nested.msrs.secondary_ctls_high;
1248 supported = vmx_control_msr(*lowp, *highp);
1250 /* Check must-be-1 bits are still 1. */
1251 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1254 /* Check must-be-0 bits are still 0. */
1255 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1259 *highp = data >> 32;
1263 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1265 const u64 feature_and_reserved_bits =
1267 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1268 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1270 GENMASK_ULL(13, 9) | BIT_ULL(31);
1273 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1274 vmx->nested.msrs.misc_high);
1276 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1279 if ((vmx->nested.msrs.pinbased_ctls_high &
1280 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1281 vmx_misc_preemption_timer_rate(data) !=
1282 vmx_misc_preemption_timer_rate(vmx_misc))
1285 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1288 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1291 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1294 vmx->nested.msrs.misc_low = data;
1295 vmx->nested.msrs.misc_high = data >> 32;
1300 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1302 u64 vmx_ept_vpid_cap;
1304 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1305 vmx->nested.msrs.vpid_caps);
1307 /* Every bit is either reserved or a feature bit. */
1308 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1311 vmx->nested.msrs.ept_caps = data;
1312 vmx->nested.msrs.vpid_caps = data >> 32;
1316 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1320 switch (msr_index) {
1321 case MSR_IA32_VMX_CR0_FIXED0:
1322 msr = &vmx->nested.msrs.cr0_fixed0;
1324 case MSR_IA32_VMX_CR4_FIXED0:
1325 msr = &vmx->nested.msrs.cr4_fixed0;
1332 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1333 * must be 1 in the restored value.
1335 if (!is_bitwise_subset(data, *msr, -1ULL))
1343 * Called when userspace is restoring VMX MSRs.
1345 * Returns 0 on success, non-0 otherwise.
1347 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1349 struct vcpu_vmx *vmx = to_vmx(vcpu);
1352 * Don't allow changes to the VMX capability MSRs while the vCPU
1353 * is in VMX operation.
1355 if (vmx->nested.vmxon)
1358 switch (msr_index) {
1359 case MSR_IA32_VMX_BASIC:
1360 return vmx_restore_vmx_basic(vmx, data);
1361 case MSR_IA32_VMX_PINBASED_CTLS:
1362 case MSR_IA32_VMX_PROCBASED_CTLS:
1363 case MSR_IA32_VMX_EXIT_CTLS:
1364 case MSR_IA32_VMX_ENTRY_CTLS:
1366 * The "non-true" VMX capability MSRs are generated from the
1367 * "true" MSRs, so we do not support restoring them directly.
1369 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1370 * should restore the "true" MSRs with the must-be-1 bits
1371 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1372 * DEFAULT SETTINGS".
1375 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1376 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1377 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1378 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1379 case MSR_IA32_VMX_PROCBASED_CTLS2:
1380 return vmx_restore_control_msr(vmx, msr_index, data);
1381 case MSR_IA32_VMX_MISC:
1382 return vmx_restore_vmx_misc(vmx, data);
1383 case MSR_IA32_VMX_CR0_FIXED0:
1384 case MSR_IA32_VMX_CR4_FIXED0:
1385 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1386 case MSR_IA32_VMX_CR0_FIXED1:
1387 case MSR_IA32_VMX_CR4_FIXED1:
1389 * These MSRs are generated based on the vCPU's CPUID, so we
1390 * do not support restoring them directly.
1393 case MSR_IA32_VMX_EPT_VPID_CAP:
1394 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1395 case MSR_IA32_VMX_VMCS_ENUM:
1396 vmx->nested.msrs.vmcs_enum = data;
1398 case MSR_IA32_VMX_VMFUNC:
1399 if (data & ~vmx->nested.msrs.vmfunc_controls)
1401 vmx->nested.msrs.vmfunc_controls = data;
1405 * The rest of the VMX capability MSRs do not support restore.
1411 /* Returns 0 on success, non-0 otherwise. */
1412 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1414 switch (msr_index) {
1415 case MSR_IA32_VMX_BASIC:
1416 *pdata = msrs->basic;
1418 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1419 case MSR_IA32_VMX_PINBASED_CTLS:
1420 *pdata = vmx_control_msr(
1421 msrs->pinbased_ctls_low,
1422 msrs->pinbased_ctls_high);
1423 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1424 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1426 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1427 case MSR_IA32_VMX_PROCBASED_CTLS:
1428 *pdata = vmx_control_msr(
1429 msrs->procbased_ctls_low,
1430 msrs->procbased_ctls_high);
1431 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1432 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1434 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1435 case MSR_IA32_VMX_EXIT_CTLS:
1436 *pdata = vmx_control_msr(
1437 msrs->exit_ctls_low,
1438 msrs->exit_ctls_high);
1439 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1440 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1442 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1443 case MSR_IA32_VMX_ENTRY_CTLS:
1444 *pdata = vmx_control_msr(
1445 msrs->entry_ctls_low,
1446 msrs->entry_ctls_high);
1447 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1448 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1450 case MSR_IA32_VMX_MISC:
1451 *pdata = vmx_control_msr(
1455 case MSR_IA32_VMX_CR0_FIXED0:
1456 *pdata = msrs->cr0_fixed0;
1458 case MSR_IA32_VMX_CR0_FIXED1:
1459 *pdata = msrs->cr0_fixed1;
1461 case MSR_IA32_VMX_CR4_FIXED0:
1462 *pdata = msrs->cr4_fixed0;
1464 case MSR_IA32_VMX_CR4_FIXED1:
1465 *pdata = msrs->cr4_fixed1;
1467 case MSR_IA32_VMX_VMCS_ENUM:
1468 *pdata = msrs->vmcs_enum;
1470 case MSR_IA32_VMX_PROCBASED_CTLS2:
1471 *pdata = vmx_control_msr(
1472 msrs->secondary_ctls_low,
1473 msrs->secondary_ctls_high);
1475 case MSR_IA32_VMX_EPT_VPID_CAP:
1476 *pdata = msrs->ept_caps |
1477 ((u64)msrs->vpid_caps << 32);
1479 case MSR_IA32_VMX_VMFUNC:
1480 *pdata = msrs->vmfunc_controls;
1490 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1491 * been modified by the L1 guest. Note, "writable" in this context means
1492 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1493 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1494 * VM-exit information fields (which are actually writable if the vCPU is
1495 * configured to support "VMWRITE to any supported field in the VMCS").
1497 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1499 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1500 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1501 struct shadow_vmcs_field field;
1505 if (WARN_ON(!shadow_vmcs))
1510 vmcs_load(shadow_vmcs);
1512 for (i = 0; i < max_shadow_read_write_fields; i++) {
1513 field = shadow_read_write_fields[i];
1514 val = __vmcs_readl(field.encoding);
1515 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1518 vmcs_clear(shadow_vmcs);
1519 vmcs_load(vmx->loaded_vmcs->vmcs);
1524 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1526 const struct shadow_vmcs_field *fields[] = {
1527 shadow_read_write_fields,
1528 shadow_read_only_fields
1530 const int max_fields[] = {
1531 max_shadow_read_write_fields,
1532 max_shadow_read_only_fields
1534 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1535 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1536 struct shadow_vmcs_field field;
1540 if (WARN_ON(!shadow_vmcs))
1543 vmcs_load(shadow_vmcs);
1545 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1546 for (i = 0; i < max_fields[q]; i++) {
1547 field = fields[q][i];
1548 val = vmcs12_read_any(vmcs12, field.encoding,
1550 __vmcs_writel(field.encoding, val);
1554 vmcs_clear(shadow_vmcs);
1555 vmcs_load(vmx->loaded_vmcs->vmcs);
1558 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1560 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1561 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1563 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1564 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1565 vmcs12->guest_rip = evmcs->guest_rip;
1567 if (unlikely(!(evmcs->hv_clean_fields &
1568 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1569 vmcs12->guest_rsp = evmcs->guest_rsp;
1570 vmcs12->guest_rflags = evmcs->guest_rflags;
1571 vmcs12->guest_interruptibility_info =
1572 evmcs->guest_interruptibility_info;
1575 if (unlikely(!(evmcs->hv_clean_fields &
1576 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1577 vmcs12->cpu_based_vm_exec_control =
1578 evmcs->cpu_based_vm_exec_control;
1581 if (unlikely(!(evmcs->hv_clean_fields &
1582 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1583 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1586 if (unlikely(!(evmcs->hv_clean_fields &
1587 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1588 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1591 if (unlikely(!(evmcs->hv_clean_fields &
1592 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1593 vmcs12->vm_entry_intr_info_field =
1594 evmcs->vm_entry_intr_info_field;
1595 vmcs12->vm_entry_exception_error_code =
1596 evmcs->vm_entry_exception_error_code;
1597 vmcs12->vm_entry_instruction_len =
1598 evmcs->vm_entry_instruction_len;
1601 if (unlikely(!(evmcs->hv_clean_fields &
1602 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1603 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1604 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1605 vmcs12->host_cr0 = evmcs->host_cr0;
1606 vmcs12->host_cr3 = evmcs->host_cr3;
1607 vmcs12->host_cr4 = evmcs->host_cr4;
1608 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1609 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1610 vmcs12->host_rip = evmcs->host_rip;
1611 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1612 vmcs12->host_es_selector = evmcs->host_es_selector;
1613 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1614 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1615 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1616 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1617 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1618 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1621 if (unlikely(!(evmcs->hv_clean_fields &
1622 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1623 vmcs12->pin_based_vm_exec_control =
1624 evmcs->pin_based_vm_exec_control;
1625 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1626 vmcs12->secondary_vm_exec_control =
1627 evmcs->secondary_vm_exec_control;
1630 if (unlikely(!(evmcs->hv_clean_fields &
1631 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1632 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1633 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1636 if (unlikely(!(evmcs->hv_clean_fields &
1637 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1638 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1641 if (unlikely(!(evmcs->hv_clean_fields &
1642 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1643 vmcs12->guest_es_base = evmcs->guest_es_base;
1644 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1645 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1646 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1647 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1648 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1649 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1650 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1651 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1652 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1653 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1654 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1655 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1656 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1657 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1658 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1659 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1660 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1661 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1662 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1663 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1664 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1665 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1666 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1667 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1668 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1669 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1670 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1671 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1672 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1673 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1674 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1675 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1676 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1677 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1678 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1681 if (unlikely(!(evmcs->hv_clean_fields &
1682 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1683 vmcs12->tsc_offset = evmcs->tsc_offset;
1684 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1685 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1688 if (unlikely(!(evmcs->hv_clean_fields &
1689 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1690 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1691 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1692 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1693 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1694 vmcs12->guest_cr0 = evmcs->guest_cr0;
1695 vmcs12->guest_cr3 = evmcs->guest_cr3;
1696 vmcs12->guest_cr4 = evmcs->guest_cr4;
1697 vmcs12->guest_dr7 = evmcs->guest_dr7;
1700 if (unlikely(!(evmcs->hv_clean_fields &
1701 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1702 vmcs12->host_fs_base = evmcs->host_fs_base;
1703 vmcs12->host_gs_base = evmcs->host_gs_base;
1704 vmcs12->host_tr_base = evmcs->host_tr_base;
1705 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1706 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1707 vmcs12->host_rsp = evmcs->host_rsp;
1710 if (unlikely(!(evmcs->hv_clean_fields &
1711 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1712 vmcs12->ept_pointer = evmcs->ept_pointer;
1713 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1716 if (unlikely(!(evmcs->hv_clean_fields &
1717 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1718 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1719 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1720 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1721 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1722 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1723 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1724 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1725 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1726 vmcs12->guest_pending_dbg_exceptions =
1727 evmcs->guest_pending_dbg_exceptions;
1728 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1729 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1730 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1731 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1732 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1737 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1738 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1739 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1740 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1741 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1742 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1743 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1744 * vmcs12->page_fault_error_code_mask =
1745 * evmcs->page_fault_error_code_mask;
1746 * vmcs12->page_fault_error_code_match =
1747 * evmcs->page_fault_error_code_match;
1748 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1749 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1750 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1751 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1756 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1757 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1758 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1759 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1760 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1761 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1762 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1763 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1764 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1765 * vmcs12->exit_qualification = evmcs->exit_qualification;
1766 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1768 * Not present in struct vmcs12:
1769 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1770 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1771 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1772 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1778 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1780 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1781 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1784 * Should not be changed by KVM:
1786 * evmcs->host_es_selector = vmcs12->host_es_selector;
1787 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1788 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1789 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1790 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1791 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1792 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1793 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1794 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1795 * evmcs->host_cr0 = vmcs12->host_cr0;
1796 * evmcs->host_cr3 = vmcs12->host_cr3;
1797 * evmcs->host_cr4 = vmcs12->host_cr4;
1798 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1799 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1800 * evmcs->host_rip = vmcs12->host_rip;
1801 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1802 * evmcs->host_fs_base = vmcs12->host_fs_base;
1803 * evmcs->host_gs_base = vmcs12->host_gs_base;
1804 * evmcs->host_tr_base = vmcs12->host_tr_base;
1805 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1806 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1807 * evmcs->host_rsp = vmcs12->host_rsp;
1808 * sync_vmcs02_to_vmcs12() doesn't read these:
1809 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1810 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1811 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1812 * evmcs->ept_pointer = vmcs12->ept_pointer;
1813 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1814 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1815 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1816 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1817 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1818 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1819 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1820 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1821 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1822 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1823 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1824 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1825 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1826 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1827 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1828 * evmcs->page_fault_error_code_mask =
1829 * vmcs12->page_fault_error_code_mask;
1830 * evmcs->page_fault_error_code_match =
1831 * vmcs12->page_fault_error_code_match;
1832 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1833 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1834 * evmcs->tsc_offset = vmcs12->tsc_offset;
1835 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1836 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1837 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1838 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1839 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1840 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1841 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1842 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1844 * Not present in struct vmcs12:
1845 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1846 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1847 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1848 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1851 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1852 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1853 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1854 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1855 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1856 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1857 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1858 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1860 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1861 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1862 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1863 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1864 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1865 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1866 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1867 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1868 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1869 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1871 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1872 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1873 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1874 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1875 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1876 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1877 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1878 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1880 evmcs->guest_es_base = vmcs12->guest_es_base;
1881 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1882 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1883 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1884 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1885 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1886 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1887 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1888 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1889 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1891 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1892 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1894 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1895 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1896 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1897 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1899 evmcs->guest_pending_dbg_exceptions =
1900 vmcs12->guest_pending_dbg_exceptions;
1901 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1902 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1904 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1905 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1907 evmcs->guest_cr0 = vmcs12->guest_cr0;
1908 evmcs->guest_cr3 = vmcs12->guest_cr3;
1909 evmcs->guest_cr4 = vmcs12->guest_cr4;
1910 evmcs->guest_dr7 = vmcs12->guest_dr7;
1912 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1914 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1915 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1916 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1917 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1918 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1919 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1920 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1921 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1923 evmcs->exit_qualification = vmcs12->exit_qualification;
1925 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1926 evmcs->guest_rsp = vmcs12->guest_rsp;
1927 evmcs->guest_rflags = vmcs12->guest_rflags;
1929 evmcs->guest_interruptibility_info =
1930 vmcs12->guest_interruptibility_info;
1931 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1932 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1933 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1934 evmcs->vm_entry_exception_error_code =
1935 vmcs12->vm_entry_exception_error_code;
1936 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1938 evmcs->guest_rip = vmcs12->guest_rip;
1940 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1946 * This is an equivalent of the nested hypervisor executing the vmptrld
1949 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1950 struct kvm_vcpu *vcpu, bool from_launch)
1952 struct vcpu_vmx *vmx = to_vmx(vcpu);
1953 bool evmcs_gpa_changed = false;
1956 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1957 return EVMPTRLD_DISABLED;
1959 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
1960 return EVMPTRLD_DISABLED;
1962 if (unlikely(!vmx->nested.hv_evmcs ||
1963 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1964 if (!vmx->nested.hv_evmcs)
1965 vmx->nested.current_vmptr = -1ull;
1967 nested_release_evmcs(vcpu);
1969 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1970 &vmx->nested.hv_evmcs_map))
1971 return EVMPTRLD_ERROR;
1973 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1976 * Currently, KVM only supports eVMCS version 1
1977 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1978 * value to first u32 field of eVMCS which should specify eVMCS
1981 * Guest should be aware of supported eVMCS versions by host by
1982 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1983 * expected to set this CPUID leaf according to the value
1984 * returned in vmcs_version from nested_enable_evmcs().
1986 * However, it turns out that Microsoft Hyper-V fails to comply
1987 * to their own invented interface: When Hyper-V use eVMCS, it
1988 * just sets first u32 field of eVMCS to revision_id specified
1989 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1990 * which is one of the supported versions specified in
1991 * CPUID.0x4000000A.EAX[0:15].
1993 * To overcome Hyper-V bug, we accept here either a supported
1994 * eVMCS version or VMCS12 revision_id as valid values for first
1995 * u32 field of eVMCS.
1997 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1998 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1999 nested_release_evmcs(vcpu);
2000 return EVMPTRLD_VMFAIL;
2003 vmx->nested.dirty_vmcs12 = true;
2004 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2006 evmcs_gpa_changed = true;
2008 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2009 * reloaded from guest's memory (read only fields, fields not
2010 * present in struct hv_enlightened_vmcs, ...). Make sure there
2014 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2015 memset(vmcs12, 0, sizeof(*vmcs12));
2016 vmcs12->hdr.revision_id = VMCS12_REVISION;
2022 * Clean fields data can't be used on VMLAUNCH and when we switch
2023 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2025 if (from_launch || evmcs_gpa_changed)
2026 vmx->nested.hv_evmcs->hv_clean_fields &=
2027 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2029 return EVMPTRLD_SUCCEEDED;
2032 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
2034 struct vcpu_vmx *vmx = to_vmx(vcpu);
2036 if (vmx->nested.hv_evmcs) {
2037 copy_vmcs12_to_enlightened(vmx);
2038 /* All fields are clean */
2039 vmx->nested.hv_evmcs->hv_clean_fields |=
2040 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2042 copy_vmcs12_to_shadow(vmx);
2045 vmx->nested.need_vmcs12_to_shadow_sync = false;
2048 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2050 struct vcpu_vmx *vmx =
2051 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2053 vmx->nested.preemption_timer_expired = true;
2054 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2055 kvm_vcpu_kick(&vmx->vcpu);
2057 return HRTIMER_NORESTART;
2060 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
2062 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
2063 struct vcpu_vmx *vmx = to_vmx(vcpu);
2066 * A timer value of zero is architecturally guaranteed to cause
2067 * a VMExit prior to executing any instructions in the guest.
2069 if (preemption_timeout == 0) {
2070 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2074 if (vcpu->arch.virtual_tsc_khz == 0)
2077 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2078 preemption_timeout *= 1000000;
2079 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2080 hrtimer_start(&vmx->nested.preemption_timer,
2081 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
2084 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2086 if (vmx->nested.nested_run_pending &&
2087 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2088 return vmcs12->guest_ia32_efer;
2089 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2090 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2092 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2095 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2098 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2099 * according to L0's settings (vmcs12 is irrelevant here). Host
2100 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2101 * will be set as needed prior to VMLAUNCH/VMRESUME.
2103 if (vmx->nested.vmcs02_initialized)
2105 vmx->nested.vmcs02_initialized = true;
2108 * We don't care what the EPTP value is we just need to guarantee
2109 * it's valid so we don't get a false positive when doing early
2110 * consistency checks.
2112 if (enable_ept && nested_early_check)
2113 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
2115 /* All VMFUNCs are currently emulated through L0 vmexits. */
2116 if (cpu_has_vmx_vmfunc())
2117 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2119 if (cpu_has_vmx_posted_intr())
2120 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2122 if (cpu_has_vmx_msr_bitmap())
2123 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2126 * The PML address never changes, so it is constant in vmcs02.
2127 * Conceptually we want to copy the PML index from vmcs01 here,
2128 * and then back to vmcs01 on nested vmexit. But since we flush
2129 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
2130 * index is also effectively constant in vmcs02.
2133 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
2134 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2137 if (cpu_has_vmx_encls_vmexit())
2138 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2141 * Set the MSR load/store lists to match L0's settings. Only the
2142 * addresses are constant (for vmcs02), the counts can change based
2143 * on L2's behavior, e.g. switching to/from long mode.
2145 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2146 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2147 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2149 vmx_set_constant_host_state(vmx);
2152 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2153 struct vmcs12 *vmcs12)
2155 prepare_vmcs02_constant_state(vmx);
2157 vmcs_write64(VMCS_LINK_POINTER, -1ull);
2160 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2161 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2163 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2167 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2169 u32 exec_control, vmcs12_exec_ctrl;
2170 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2172 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
2173 prepare_vmcs02_early_rare(vmx, vmcs12);
2178 exec_control = vmx_pin_based_exec_ctrl(vmx);
2179 exec_control |= (vmcs12->pin_based_vm_exec_control &
2180 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2182 /* Posted interrupts setting is only taken from vmcs12. */
2183 if (nested_cpu_has_posted_intr(vmcs12)) {
2184 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2185 vmx->nested.pi_pending = false;
2187 exec_control &= ~PIN_BASED_POSTED_INTR;
2189 pin_controls_set(vmx, exec_control);
2194 exec_control = vmx_exec_control(vmx); /* L0's desires */
2195 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2196 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2197 exec_control &= ~CPU_BASED_TPR_SHADOW;
2198 exec_control |= vmcs12->cpu_based_vm_exec_control;
2200 vmx->nested.l1_tpr_threshold = -1;
2201 if (exec_control & CPU_BASED_TPR_SHADOW)
2202 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2203 #ifdef CONFIG_X86_64
2205 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2206 CPU_BASED_CR8_STORE_EXITING;
2210 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2211 * for I/O port accesses.
2213 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2214 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2217 * This bit will be computed in nested_get_vmcs12_pages, because
2218 * we do not have access to L1's MSR bitmap yet. For now, keep
2219 * the same bit as before, hoping to avoid multiple VMWRITEs that
2220 * only set/clear this bit.
2222 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2223 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2225 exec_controls_set(vmx, exec_control);
2228 * SECONDARY EXEC CONTROLS
2230 if (cpu_has_secondary_exec_ctrls()) {
2231 exec_control = vmx->secondary_exec_control;
2233 /* Take the following fields only from vmcs12 */
2234 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2235 SECONDARY_EXEC_ENABLE_INVPCID |
2236 SECONDARY_EXEC_RDTSCP |
2237 SECONDARY_EXEC_XSAVES |
2238 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2239 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2240 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2241 SECONDARY_EXEC_ENABLE_VMFUNC);
2242 if (nested_cpu_has(vmcs12,
2243 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2244 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2245 ~SECONDARY_EXEC_ENABLE_PML;
2246 exec_control |= vmcs12_exec_ctrl;
2249 /* VMCS shadowing for L2 is emulated for now */
2250 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2253 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2254 * will not have to rewrite the controls just for this bit.
2256 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2257 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2258 exec_control |= SECONDARY_EXEC_DESC;
2260 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2261 vmcs_write16(GUEST_INTR_STATUS,
2262 vmcs12->guest_intr_status);
2264 secondary_exec_controls_set(vmx, exec_control);
2270 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2271 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2272 * on the related bits (if supported by the CPU) in the hope that
2273 * we can avoid VMWrites during vmx_set_efer().
2275 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2276 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2277 if (cpu_has_load_ia32_efer()) {
2278 if (guest_efer & EFER_LMA)
2279 exec_control |= VM_ENTRY_IA32E_MODE;
2280 if (guest_efer != host_efer)
2281 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2283 vm_entry_controls_set(vmx, exec_control);
2288 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2289 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2290 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2292 exec_control = vmx_vmexit_ctrl();
2293 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2294 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2295 vm_exit_controls_set(vmx, exec_control);
2298 * Interrupt/Exception Fields
2300 if (vmx->nested.nested_run_pending) {
2301 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2302 vmcs12->vm_entry_intr_info_field);
2303 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2304 vmcs12->vm_entry_exception_error_code);
2305 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2306 vmcs12->vm_entry_instruction_len);
2307 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2308 vmcs12->guest_interruptibility_info);
2309 vmx->loaded_vmcs->nmi_known_unmasked =
2310 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2312 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2316 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2318 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2320 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2321 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2322 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2323 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2324 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2325 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2326 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2327 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2328 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2329 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2330 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2331 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2332 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2333 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2334 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2335 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2336 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2337 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2338 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2339 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2340 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2341 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2342 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2343 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2344 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2345 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2346 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2347 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2348 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2349 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2350 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2351 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2352 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2353 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2354 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2355 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2356 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2357 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2360 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2361 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2362 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2363 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2364 vmcs12->guest_pending_dbg_exceptions);
2365 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2366 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2369 * L1 may access the L2's PDPTR, so save them to construct
2373 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2374 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2375 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2376 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2379 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2380 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2381 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2384 if (nested_cpu_has_xsaves(vmcs12))
2385 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2388 * Whether page-faults are trapped is determined by a combination of
2389 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2390 * If enable_ept, L0 doesn't care about page faults and we should
2391 * set all of these to L1's desires. However, if !enable_ept, L0 does
2392 * care about (at least some) page faults, and because it is not easy
2393 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2394 * to exit on each and every L2 page fault. This is done by setting
2395 * MASK=MATCH=0 and (see below) EB.PF=1.
2396 * Note that below we don't need special code to set EB.PF beyond the
2397 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2398 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2399 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2401 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2402 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2403 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2404 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2406 if (cpu_has_vmx_apicv()) {
2407 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2408 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2409 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2410 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2414 * Make sure the msr_autostore list is up to date before we set the
2415 * count in the vmcs02.
2417 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2419 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2420 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2421 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2423 set_cr4_guest_host_mask(vmx);
2427 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2428 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2429 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2430 * guest in a way that will both be appropriate to L1's requests, and our
2431 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2432 * function also has additional necessary side-effects, like setting various
2433 * vcpu->arch fields.
2434 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2435 * is assigned to entry_failure_code on failure.
2437 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2438 u32 *entry_failure_code)
2440 struct vcpu_vmx *vmx = to_vmx(vcpu);
2441 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2442 bool load_guest_pdptrs_vmcs12 = false;
2444 if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
2445 prepare_vmcs02_rare(vmx, vmcs12);
2446 vmx->nested.dirty_vmcs12 = false;
2448 load_guest_pdptrs_vmcs12 = !hv_evmcs ||
2449 !(hv_evmcs->hv_clean_fields &
2450 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2453 if (vmx->nested.nested_run_pending &&
2454 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2455 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2456 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2458 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2459 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2461 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2462 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2463 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2464 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2466 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2467 * bitwise-or of what L1 wants to trap for L2, and what we want to
2468 * trap. Note that CR0.TS also needs updating - we do this later.
2470 update_exception_bitmap(vcpu);
2471 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2472 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2474 if (vmx->nested.nested_run_pending &&
2475 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2476 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2477 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2478 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2479 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2482 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2484 if (kvm_has_tsc_control)
2485 decache_tsc_multiplier(vmx);
2487 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2489 if (nested_cpu_has_ept(vmcs12))
2490 nested_ept_init_mmu_context(vcpu);
2493 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2494 * bits which we consider mandatory enabled.
2495 * The CR0_READ_SHADOW is what L2 should have expected to read given
2496 * the specifications by L1; It's not enough to take
2497 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2498 * have more bits than L1 expected.
2500 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2501 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2503 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2504 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2506 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2507 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2508 vmx_set_efer(vcpu, vcpu->arch.efer);
2511 * Guest state is invalid and unrestricted guest is disabled,
2512 * which means L1 attempted VMEntry to L2 with invalid state.
2515 if (vmx->emulation_required) {
2516 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2520 /* Shadow page tables on either EPT or shadow page tables. */
2521 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2522 entry_failure_code))
2526 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2527 * on nested VM-Exit, which can occur without actually running L2 and
2528 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2529 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2530 * transition to HLT instead of running L2.
2533 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2535 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2536 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2537 is_pae_paging(vcpu)) {
2538 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2539 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2540 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2541 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2545 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2547 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2548 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2549 vmcs12->guest_ia32_perf_global_ctrl)))
2552 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2553 kvm_rip_write(vcpu, vmcs12->guest_rip);
2557 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2559 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2560 nested_cpu_has_virtual_nmis(vmcs12)))
2563 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2564 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2570 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
2572 struct vcpu_vmx *vmx = to_vmx(vcpu);
2573 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2575 /* Check for memory type validity */
2576 switch (new_eptp & VMX_EPTP_MT_MASK) {
2577 case VMX_EPTP_MT_UC:
2578 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2581 case VMX_EPTP_MT_WB:
2582 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2589 /* Page-walk levels validity. */
2590 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2591 case VMX_EPTP_PWL_5:
2592 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2595 case VMX_EPTP_PWL_4:
2596 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2603 /* Reserved bits should not be set */
2604 if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
2607 /* AD, if set, should be supported */
2608 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2609 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2617 * Checks related to VM-Execution Control Fields
2619 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2620 struct vmcs12 *vmcs12)
2622 struct vcpu_vmx *vmx = to_vmx(vcpu);
2624 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2625 vmx->nested.msrs.pinbased_ctls_low,
2626 vmx->nested.msrs.pinbased_ctls_high)) ||
2627 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2628 vmx->nested.msrs.procbased_ctls_low,
2629 vmx->nested.msrs.procbased_ctls_high)))
2632 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2633 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2634 vmx->nested.msrs.secondary_ctls_low,
2635 vmx->nested.msrs.secondary_ctls_high)))
2638 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2639 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2640 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2641 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2642 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2643 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2644 nested_vmx_check_nmi_controls(vmcs12) ||
2645 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2646 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2647 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2648 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2649 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2652 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2653 nested_cpu_has_save_preemption_timer(vmcs12))
2656 if (nested_cpu_has_ept(vmcs12) &&
2657 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2660 if (nested_cpu_has_vmfunc(vmcs12)) {
2661 if (CC(vmcs12->vm_function_control &
2662 ~vmx->nested.msrs.vmfunc_controls))
2665 if (nested_cpu_has_eptp_switching(vmcs12)) {
2666 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2667 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2676 * Checks related to VM-Exit Control Fields
2678 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2679 struct vmcs12 *vmcs12)
2681 struct vcpu_vmx *vmx = to_vmx(vcpu);
2683 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2684 vmx->nested.msrs.exit_ctls_low,
2685 vmx->nested.msrs.exit_ctls_high)) ||
2686 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2693 * Checks related to VM-Entry Control Fields
2695 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2696 struct vmcs12 *vmcs12)
2698 struct vcpu_vmx *vmx = to_vmx(vcpu);
2700 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2701 vmx->nested.msrs.entry_ctls_low,
2702 vmx->nested.msrs.entry_ctls_high)))
2706 * From the Intel SDM, volume 3:
2707 * Fields relevant to VM-entry event injection must be set properly.
2708 * These fields are the VM-entry interruption-information field, the
2709 * VM-entry exception error code, and the VM-entry instruction length.
2711 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2712 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2713 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2714 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2715 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2716 bool should_have_error_code;
2717 bool urg = nested_cpu_has2(vmcs12,
2718 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2719 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2721 /* VM-entry interruption-info field: interruption type */
2722 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2723 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2724 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2727 /* VM-entry interruption-info field: vector */
2728 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2729 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2730 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2733 /* VM-entry interruption-info field: deliver error code */
2734 should_have_error_code =
2735 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2736 x86_exception_has_error_code(vector);
2737 if (CC(has_error_code != should_have_error_code))
2740 /* VM-entry exception error code */
2741 if (CC(has_error_code &&
2742 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2745 /* VM-entry interruption-info field: reserved bits */
2746 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2749 /* VM-entry instruction length */
2750 switch (intr_type) {
2751 case INTR_TYPE_SOFT_EXCEPTION:
2752 case INTR_TYPE_SOFT_INTR:
2753 case INTR_TYPE_PRIV_SW_EXCEPTION:
2754 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2755 CC(vmcs12->vm_entry_instruction_len == 0 &&
2756 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2761 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2767 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2768 struct vmcs12 *vmcs12)
2770 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2771 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2772 nested_check_vm_entry_controls(vcpu, vmcs12))
2775 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2776 return nested_evmcs_check_controls(vmcs12);
2781 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2782 struct vmcs12 *vmcs12)
2786 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2787 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2788 CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
2791 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2792 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2795 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2796 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2799 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2800 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2801 vmcs12->host_ia32_perf_global_ctrl)))
2804 #ifdef CONFIG_X86_64
2805 ia32e = !!(vcpu->arch.efer & EFER_LMA);
2811 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
2812 CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2815 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
2816 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2817 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2818 CC((vmcs12->host_rip) >> 32))
2822 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2823 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2824 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2825 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2826 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2827 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2828 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2829 CC(vmcs12->host_cs_selector == 0) ||
2830 CC(vmcs12->host_tr_selector == 0) ||
2831 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2834 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2835 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2836 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2837 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2838 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2839 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2843 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2844 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2845 * the values of the LMA and LME bits in the field must each be that of
2846 * the host address-space size VM-exit control.
2848 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2849 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2850 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2851 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2858 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2859 struct vmcs12 *vmcs12)
2862 struct vmcs12 *shadow;
2863 struct kvm_host_map map;
2865 if (vmcs12->vmcs_link_pointer == -1ull)
2868 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2871 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
2876 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
2877 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2880 kvm_vcpu_unmap(vcpu, &map, false);
2885 * Checks related to Guest Non-register State
2887 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2889 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2890 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
2896 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2897 struct vmcs12 *vmcs12,
2902 *exit_qual = ENTRY_FAIL_DEFAULT;
2904 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
2905 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
2908 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
2909 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
2912 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
2913 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
2916 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2917 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2921 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2922 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2923 vmcs12->guest_ia32_perf_global_ctrl)))
2927 * If the load IA32_EFER VM-entry control is 1, the following checks
2928 * are performed on the field for the IA32_EFER MSR:
2929 * - Bits reserved in the IA32_EFER MSR must be 0.
2930 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2931 * the IA-32e mode guest VM-exit control. It must also be identical
2932 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2935 if (to_vmx(vcpu)->nested.nested_run_pending &&
2936 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2937 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2938 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
2939 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
2940 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
2941 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
2945 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2946 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
2947 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
2950 if (nested_check_guest_non_reg_state(vmcs12))
2956 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2958 struct vcpu_vmx *vmx = to_vmx(vcpu);
2959 unsigned long cr3, cr4;
2962 if (!nested_early_check)
2965 if (vmx->msr_autoload.host.nr)
2966 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2967 if (vmx->msr_autoload.guest.nr)
2968 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2972 vmx_prepare_switch_to_guest(vcpu);
2975 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2976 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2977 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
2978 * there is no need to preserve other bits or save/restore the field.
2980 vmcs_writel(GUEST_RFLAGS, 0);
2982 cr3 = __get_current_cr3_fast();
2983 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2984 vmcs_writel(HOST_CR3, cr3);
2985 vmx->loaded_vmcs->host_state.cr3 = cr3;
2988 cr4 = cr4_read_shadow();
2989 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2990 vmcs_writel(HOST_CR4, cr4);
2991 vmx->loaded_vmcs->host_state.cr4 = cr4;
2995 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
2996 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2998 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
2999 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
3001 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
3003 /* Check if vmlaunch or vmresume is needed */
3004 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
3007 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
3008 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
3009 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
3010 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
3012 "call vmx_vmenter\n\t"
3015 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
3016 : [HOST_RSP]"r"((unsigned long)HOST_RSP),
3017 [loaded_vmcs]"r"(vmx->loaded_vmcs),
3018 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
3019 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
3020 [wordsize]"i"(sizeof(ulong))
3024 if (vmx->msr_autoload.host.nr)
3025 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3026 if (vmx->msr_autoload.guest.nr)
3027 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3030 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3034 trace_kvm_nested_vmenter_failed(
3035 "early hardware check VM-instruction error: ", error);
3036 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3041 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3044 if (hw_breakpoint_active())
3045 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3049 * A non-failing VMEntry means we somehow entered guest mode with
3050 * an illegal RIP, and that's just the tip of the iceberg. There
3051 * is no telling what memory has been modified or what state has
3052 * been exposed to unknown code. Hitting this all but guarantees
3053 * a (very critical) hardware issue.
3055 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3056 VMX_EXIT_REASONS_FAILED_VMENTRY));
3061 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3063 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3064 struct vcpu_vmx *vmx = to_vmx(vcpu);
3065 struct kvm_host_map *map;
3070 * hv_evmcs may end up being not mapped after migration (when
3071 * L2 was running), map it here to make sure vmcs12 changes are
3072 * properly reflected.
3074 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
3075 enum nested_evmptrld_status evmptrld_status =
3076 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3078 if (evmptrld_status == EVMPTRLD_VMFAIL ||
3079 evmptrld_status == EVMPTRLD_ERROR) {
3080 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3082 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3083 vcpu->run->internal.suberror =
3084 KVM_INTERNAL_ERROR_EMULATION;
3085 vcpu->run->internal.ndata = 0;
3090 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3092 * Translate L1 physical address to host physical
3093 * address for vmcs02. Keep the page pinned, so this
3094 * physical address remains valid. We keep a reference
3095 * to it so we can release it later.
3097 if (vmx->nested.apic_access_page) { /* shouldn't happen */
3098 kvm_release_page_clean(vmx->nested.apic_access_page);
3099 vmx->nested.apic_access_page = NULL;
3101 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
3102 if (!is_error_page(page)) {
3103 vmx->nested.apic_access_page = page;
3104 hpa = page_to_phys(vmx->nested.apic_access_page);
3105 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3107 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3109 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3110 vcpu->run->internal.suberror =
3111 KVM_INTERNAL_ERROR_EMULATION;
3112 vcpu->run->internal.ndata = 0;
3117 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3118 map = &vmx->nested.virtual_apic_map;
3120 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3121 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3122 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3123 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3124 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3126 * The processor will never use the TPR shadow, simply
3127 * clear the bit from the execution control. Such a
3128 * configuration is useless, but it happens in tests.
3129 * For any other configuration, failing the vm entry is
3130 * _not_ what the processor does but it's basically the
3131 * only possibility we have.
3133 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3136 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3137 * force VM-Entry to fail.
3139 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
3143 if (nested_cpu_has_posted_intr(vmcs12)) {
3144 map = &vmx->nested.pi_desc_map;
3146 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3147 vmx->nested.pi_desc =
3148 (struct pi_desc *)(((void *)map->hva) +
3149 offset_in_page(vmcs12->posted_intr_desc_addr));
3150 vmcs_write64(POSTED_INTR_DESC_ADDR,
3151 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3154 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3155 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3157 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3162 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3163 * for running VMX instructions (except VMXON, whose prerequisites are
3164 * slightly different). It also specifies what exception to inject otherwise.
3165 * Note that many of these exceptions have priority over VM exits, so they
3166 * don't have to be checked again here.
3168 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3170 if (!to_vmx(vcpu)->nested.vmxon) {
3171 kvm_queue_exception(vcpu, UD_VECTOR);
3175 if (vmx_get_cpl(vcpu)) {
3176 kvm_inject_gp(vcpu, 0);
3183 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3185 u8 rvi = vmx_get_rvi();
3186 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3188 return ((rvi & 0xf0) > (vppr & 0xf0));
3191 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3192 struct vmcs12 *vmcs12);
3195 * If from_vmentry is false, this is being called from state restore (either RSM
3196 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3199 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3200 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3201 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3202 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3204 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3207 struct vcpu_vmx *vmx = to_vmx(vcpu);
3208 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3209 bool evaluate_pending_interrupts;
3210 u32 exit_reason = EXIT_REASON_INVALID_STATE;
3213 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3214 kvm_vcpu_flush_tlb_current(vcpu);
3216 evaluate_pending_interrupts = exec_controls_get(vmx) &
3217 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3218 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3219 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3221 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3222 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3223 if (kvm_mpx_supported() &&
3224 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
3225 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3228 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3229 * nested early checks are disabled. In the event of a "late" VM-Fail,
3230 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3231 * software model to the pre-VMEntry host state. When EPT is disabled,
3232 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3233 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3234 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3235 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3236 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3237 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3238 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3239 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3240 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3241 * path would need to manually save/restore vmcs01.GUEST_CR3.
3243 if (!enable_ept && !nested_early_check)
3244 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3246 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3248 prepare_vmcs02_early(vmx, vmcs12);
3251 if (unlikely(!nested_get_vmcs12_pages(vcpu)))
3252 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3254 if (nested_vmx_check_vmentry_hw(vcpu)) {
3255 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3256 return NVMX_VMENTRY_VMFAIL;
3259 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
3260 goto vmentry_fail_vmexit;
3263 enter_guest_mode(vcpu);
3264 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3265 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
3267 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
3268 goto vmentry_fail_vmexit_guest_mode;
3271 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
3272 exit_qual = nested_vmx_load_msr(vcpu,
3273 vmcs12->vm_entry_msr_load_addr,
3274 vmcs12->vm_entry_msr_load_count);
3276 goto vmentry_fail_vmexit_guest_mode;
3279 * The MMU is not initialized to point at the right entities yet and
3280 * "get pages" would need to read data from the guest (i.e. we will
3281 * need to perform gpa to hpa translation). Request a call
3282 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3283 * have already been set at vmentry time and should not be reset.
3285 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
3289 * If L1 had a pending IRQ/NMI until it executed
3290 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3291 * disallowed (e.g. interrupts disabled), L0 needs to
3292 * evaluate if this pending event should cause an exit from L2
3293 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3294 * intercept EXTERNAL_INTERRUPT).
3296 * Usually this would be handled by the processor noticing an
3297 * IRQ/NMI window request, or checking RVI during evaluation of
3298 * pending virtual interrupts. However, this setting was done
3299 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3300 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3302 if (unlikely(evaluate_pending_interrupts))
3303 kvm_make_request(KVM_REQ_EVENT, vcpu);
3306 * Do not start the preemption timer hrtimer until after we know
3307 * we are successful, so that only nested_vmx_vmexit needs to cancel
3310 vmx->nested.preemption_timer_expired = false;
3311 if (nested_cpu_has_preemption_timer(vmcs12))
3312 vmx_start_preemption_timer(vcpu);
3315 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3316 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3317 * returned as far as L1 is concerned. It will only return (and set
3318 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3320 return NVMX_VMENTRY_SUCCESS;
3323 * A failed consistency check that leads to a VMExit during L1's
3324 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3325 * 26.7 "VM-entry failures during or after loading guest state".
3327 vmentry_fail_vmexit_guest_mode:
3328 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3329 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3330 leave_guest_mode(vcpu);
3332 vmentry_fail_vmexit:
3333 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3336 return NVMX_VMENTRY_VMEXIT;
3338 load_vmcs12_host_state(vcpu, vmcs12);
3339 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3340 vmcs12->exit_qualification = exit_qual;
3341 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3342 vmx->nested.need_vmcs12_to_shadow_sync = true;
3343 return NVMX_VMENTRY_VMEXIT;
3347 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3348 * for running an L2 nested guest.
3350 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3352 struct vmcs12 *vmcs12;
3353 enum nvmx_vmentry_status status;
3354 struct vcpu_vmx *vmx = to_vmx(vcpu);
3355 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3356 enum nested_evmptrld_status evmptrld_status;
3358 if (!nested_vmx_check_permission(vcpu))
3361 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3362 if (evmptrld_status == EVMPTRLD_ERROR) {
3363 kvm_queue_exception(vcpu, UD_VECTOR);
3365 } else if (evmptrld_status == EVMPTRLD_VMFAIL) {
3366 return nested_vmx_failInvalid(vcpu);
3369 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3370 return nested_vmx_failInvalid(vcpu);
3372 vmcs12 = get_vmcs12(vcpu);
3375 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3376 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3377 * rather than RFLAGS.ZF, and no error number is stored to the
3378 * VM-instruction error field.
3380 if (vmcs12->hdr.shadow_vmcs)
3381 return nested_vmx_failInvalid(vcpu);
3383 if (vmx->nested.hv_evmcs) {
3384 copy_enlightened_to_vmcs12(vmx);
3385 /* Enlightened VMCS doesn't have launch state */
3386 vmcs12->launch_state = !launch;
3387 } else if (enable_shadow_vmcs) {
3388 copy_shadow_to_vmcs12(vmx);
3392 * The nested entry process starts with enforcing various prerequisites
3393 * on vmcs12 as required by the Intel SDM, and act appropriately when
3394 * they fail: As the SDM explains, some conditions should cause the
3395 * instruction to fail, while others will cause the instruction to seem
3396 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3397 * To speed up the normal (success) code path, we should avoid checking
3398 * for misconfigurations which will anyway be caught by the processor
3399 * when using the merged vmcs02.
3401 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3402 return nested_vmx_failValid(vcpu,
3403 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3405 if (vmcs12->launch_state == launch)
3406 return nested_vmx_failValid(vcpu,
3407 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3408 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3410 if (nested_vmx_check_controls(vcpu, vmcs12))
3411 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3413 if (nested_vmx_check_host_state(vcpu, vmcs12))
3414 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3417 * We're finally done with prerequisite checking, and can start with
3420 vmx->nested.nested_run_pending = 1;
3421 status = nested_vmx_enter_non_root_mode(vcpu, true);
3422 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3423 goto vmentry_failed;
3425 /* Hide L1D cache contents from the nested guest. */
3426 vmx->vcpu.arch.l1tf_flush_l1d = true;
3429 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3430 * also be used as part of restoring nVMX state for
3431 * snapshot restore (migration).
3433 * In this flow, it is assumed that vmcs12 cache was
3434 * trasferred as part of captured nVMX state and should
3435 * therefore not be read from guest memory (which may not
3436 * exist on destination host yet).
3438 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3441 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3442 * awakened by event injection or by an NMI-window VM-exit or
3443 * by an interrupt-window VM-exit, halt the vcpu.
3445 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3446 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3447 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
3448 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
3449 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3450 vmx->nested.nested_run_pending = 0;
3451 return kvm_vcpu_halt(vcpu);
3456 vmx->nested.nested_run_pending = 0;
3457 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3459 if (status == NVMX_VMENTRY_VMEXIT)
3461 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3462 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3466 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3467 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3468 * This function returns the new value we should put in vmcs12.guest_cr0.
3469 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3470 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3471 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3472 * didn't trap the bit, because if L1 did, so would L0).
3473 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3474 * been modified by L2, and L1 knows it. So just leave the old value of
3475 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3476 * isn't relevant, because if L0 traps this bit it can set it to anything.
3477 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3478 * changed these bits, and therefore they need to be updated, but L0
3479 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3480 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3482 static inline unsigned long
3483 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3486 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3487 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3488 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3489 vcpu->arch.cr0_guest_owned_bits));
3492 static inline unsigned long
3493 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3496 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3497 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3498 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3499 vcpu->arch.cr4_guest_owned_bits));
3502 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3503 struct vmcs12 *vmcs12)
3508 if (vcpu->arch.exception.injected) {
3509 nr = vcpu->arch.exception.nr;
3510 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3512 if (kvm_exception_is_soft(nr)) {
3513 vmcs12->vm_exit_instruction_len =
3514 vcpu->arch.event_exit_inst_len;
3515 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3517 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3519 if (vcpu->arch.exception.has_error_code) {
3520 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3521 vmcs12->idt_vectoring_error_code =
3522 vcpu->arch.exception.error_code;
3525 vmcs12->idt_vectoring_info_field = idt_vectoring;
3526 } else if (vcpu->arch.nmi_injected) {
3527 vmcs12->idt_vectoring_info_field =
3528 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3529 } else if (vcpu->arch.interrupt.injected) {
3530 nr = vcpu->arch.interrupt.nr;
3531 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3533 if (vcpu->arch.interrupt.soft) {
3534 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3535 vmcs12->vm_entry_instruction_len =
3536 vcpu->arch.event_exit_inst_len;
3538 idt_vectoring |= INTR_TYPE_EXT_INTR;
3540 vmcs12->idt_vectoring_info_field = idt_vectoring;
3545 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3547 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3551 * Don't need to mark the APIC access page dirty; it is never
3552 * written to by the CPU during APIC virtualization.
3555 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3556 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3557 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3560 if (nested_cpu_has_posted_intr(vmcs12)) {
3561 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3562 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3566 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3568 struct vcpu_vmx *vmx = to_vmx(vcpu);
3573 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3576 vmx->nested.pi_pending = false;
3577 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3580 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3581 if (max_irr != 256) {
3582 vapic_page = vmx->nested.virtual_apic_map.hva;
3586 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3587 vapic_page, &max_irr);
3588 status = vmcs_read16(GUEST_INTR_STATUS);
3589 if ((u8)max_irr > ((u8)status & 0xff)) {
3591 status |= (u8)max_irr;
3592 vmcs_write16(GUEST_INTR_STATUS, status);
3596 nested_mark_vmcs12_pages_dirty(vcpu);
3599 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3600 unsigned long exit_qual)
3602 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3603 unsigned int nr = vcpu->arch.exception.nr;
3604 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3606 if (vcpu->arch.exception.has_error_code) {
3607 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3608 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3611 if (kvm_exception_is_soft(nr))
3612 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3614 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3616 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3617 vmx_get_nmi_mask(vcpu))
3618 intr_info |= INTR_INFO_UNBLOCK_NMI;
3620 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3624 * Returns true if a debug trap is pending delivery.
3626 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3627 * exception may be inferred from the presence of an exception payload.
3629 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3631 return vcpu->arch.exception.pending &&
3632 vcpu->arch.exception.nr == DB_VECTOR &&
3633 vcpu->arch.exception.payload;
3637 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3638 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3639 * represents these debug traps with a payload that is said to be compatible
3640 * with the 'pending debug exceptions' field, write the payload to the VMCS
3641 * field if a VM-exit is delivered before the debug trap.
3643 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3645 if (vmx_pending_dbg_trap(vcpu))
3646 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3647 vcpu->arch.exception.payload);
3650 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
3652 struct vcpu_vmx *vmx = to_vmx(vcpu);
3653 unsigned long exit_qual;
3654 bool block_nested_events =
3655 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3656 bool mtf_pending = vmx->nested.mtf_pending;
3657 struct kvm_lapic *apic = vcpu->arch.apic;
3660 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3661 * this state is discarded.
3663 if (!block_nested_events)
3664 vmx->nested.mtf_pending = false;
3666 if (lapic_in_kernel(vcpu) &&
3667 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3668 if (block_nested_events)
3670 nested_vmx_update_pending_dbg(vcpu);
3671 clear_bit(KVM_APIC_INIT, &apic->pending_events);
3672 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3677 * Process any exceptions that are not debug traps before MTF.
3679 if (vcpu->arch.exception.pending &&
3680 !vmx_pending_dbg_trap(vcpu) &&
3681 nested_vmx_check_exception(vcpu, &exit_qual)) {
3682 if (block_nested_events)
3684 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3689 if (block_nested_events)
3691 nested_vmx_update_pending_dbg(vcpu);
3692 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3696 if (vcpu->arch.exception.pending &&
3697 nested_vmx_check_exception(vcpu, &exit_qual)) {
3698 if (block_nested_events)
3700 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3704 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3705 vmx->nested.preemption_timer_expired) {
3706 if (block_nested_events)
3708 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3712 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3713 if (block_nested_events)
3715 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3716 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3717 INTR_INFO_VALID_MASK, 0);
3719 * The NMI-triggered VM exit counts as injection:
3720 * clear this one and block further NMIs.
3722 vcpu->arch.nmi_pending = 0;
3723 vmx_set_nmi_mask(vcpu, true);
3727 if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
3728 if (block_nested_events)
3730 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3734 vmx_complete_nested_posted_interrupt(vcpu);
3738 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3741 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3744 if (ktime_to_ns(remaining) <= 0)
3747 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3748 do_div(value, 1000000);
3749 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3752 static bool is_vmcs12_ext_field(unsigned long field)
3755 case GUEST_ES_SELECTOR:
3756 case GUEST_CS_SELECTOR:
3757 case GUEST_SS_SELECTOR:
3758 case GUEST_DS_SELECTOR:
3759 case GUEST_FS_SELECTOR:
3760 case GUEST_GS_SELECTOR:
3761 case GUEST_LDTR_SELECTOR:
3762 case GUEST_TR_SELECTOR:
3763 case GUEST_ES_LIMIT:
3764 case GUEST_CS_LIMIT:
3765 case GUEST_SS_LIMIT:
3766 case GUEST_DS_LIMIT:
3767 case GUEST_FS_LIMIT:
3768 case GUEST_GS_LIMIT:
3769 case GUEST_LDTR_LIMIT:
3770 case GUEST_TR_LIMIT:
3771 case GUEST_GDTR_LIMIT:
3772 case GUEST_IDTR_LIMIT:
3773 case GUEST_ES_AR_BYTES:
3774 case GUEST_DS_AR_BYTES:
3775 case GUEST_FS_AR_BYTES:
3776 case GUEST_GS_AR_BYTES:
3777 case GUEST_LDTR_AR_BYTES:
3778 case GUEST_TR_AR_BYTES:
3785 case GUEST_LDTR_BASE:
3787 case GUEST_GDTR_BASE:
3788 case GUEST_IDTR_BASE:
3789 case GUEST_PENDING_DBG_EXCEPTIONS:
3799 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3800 struct vmcs12 *vmcs12)
3802 struct vcpu_vmx *vmx = to_vmx(vcpu);
3804 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3805 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3806 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3807 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3808 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3809 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3810 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3811 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3812 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3813 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3814 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3815 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3816 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3817 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3818 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3819 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3820 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3821 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3822 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3823 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3824 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3825 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3826 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3827 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3828 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3829 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3830 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3831 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3832 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3833 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3834 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3835 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3836 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3837 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3838 vmcs12->guest_pending_dbg_exceptions =
3839 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3840 if (kvm_mpx_supported())
3841 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3843 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
3846 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3847 struct vmcs12 *vmcs12)
3849 struct vcpu_vmx *vmx = to_vmx(vcpu);
3852 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
3856 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
3859 vmx->loaded_vmcs = &vmx->nested.vmcs02;
3860 vmx_vcpu_load(&vmx->vcpu, cpu);
3862 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3864 vmx->loaded_vmcs = &vmx->vmcs01;
3865 vmx_vcpu_load(&vmx->vcpu, cpu);
3870 * Update the guest state fields of vmcs12 to reflect changes that
3871 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3872 * VM-entry controls is also updated, since this is really a guest
3875 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3877 struct vcpu_vmx *vmx = to_vmx(vcpu);
3879 if (vmx->nested.hv_evmcs)
3880 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3882 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
3884 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3885 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3887 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
3888 vmcs12->guest_rip = kvm_rip_read(vcpu);
3889 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3891 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3892 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3894 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3895 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3896 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3898 vmcs12->guest_interruptibility_info =
3899 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3901 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3902 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3904 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3906 if (nested_cpu_has_preemption_timer(vmcs12) &&
3907 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3908 vmcs12->vmx_preemption_timer_value =
3909 vmx_get_preemption_timer_value(vcpu);
3912 * In some cases (usually, nested EPT), L2 is allowed to change its
3913 * own CR3 without exiting. If it has changed it, we must keep it.
3914 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3915 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3917 * Additionally, restore L2's PDPTR to vmcs12.
3920 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3921 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3922 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3923 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3924 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3925 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3929 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3931 if (nested_cpu_has_vid(vmcs12))
3932 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3934 vmcs12->vm_entry_controls =
3935 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3936 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3938 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
3939 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3941 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3942 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3946 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3947 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3948 * and this function updates it to reflect the changes to the guest state while
3949 * L2 was running (and perhaps made some exits which were handled directly by L0
3950 * without going back to L1), and to reflect the exit reason.
3951 * Note that we do not have to copy here all VMCS fields, just those that
3952 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3953 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3954 * which already writes to vmcs12 directly.
3956 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3957 u32 exit_reason, u32 exit_intr_info,
3958 unsigned long exit_qualification)
3960 /* update exit information fields: */
3961 vmcs12->vm_exit_reason = exit_reason;
3962 vmcs12->exit_qualification = exit_qualification;
3963 vmcs12->vm_exit_intr_info = exit_intr_info;
3965 vmcs12->idt_vectoring_info_field = 0;
3966 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3967 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3969 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3970 vmcs12->launch_state = 1;
3972 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3973 * instead of reading the real value. */
3974 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3977 * Transfer the event that L0 or L1 may wanted to inject into
3978 * L2 to IDT_VECTORING_INFO_FIELD.
3980 vmcs12_save_pending_event(vcpu, vmcs12);
3983 * According to spec, there's no need to store the guest's
3984 * MSRs if the exit is due to a VM-entry failure that occurs
3985 * during or after loading the guest state. Since this exit
3986 * does not fall in that category, we need to save the MSRs.
3988 if (nested_vmx_store_msr(vcpu,
3989 vmcs12->vm_exit_msr_store_addr,
3990 vmcs12->vm_exit_msr_store_count))
3991 nested_vmx_abort(vcpu,
3992 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3996 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3997 * preserved above and would only end up incorrectly in L1.
3999 vcpu->arch.nmi_injected = false;
4000 kvm_clear_exception_queue(vcpu);
4001 kvm_clear_interrupt_queue(vcpu);
4005 * A part of what we need to when the nested L2 guest exits and we want to
4006 * run its L1 parent, is to reset L1's guest state to the host state specified
4008 * This function is to be called not only on normal nested exit, but also on
4009 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4010 * Failures During or After Loading Guest State").
4011 * This function should be called when the active VMCS is L1's (vmcs01).
4013 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4014 struct vmcs12 *vmcs12)
4016 struct kvm_segment seg;
4017 u32 entry_failure_code;
4019 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4020 vcpu->arch.efer = vmcs12->host_ia32_efer;
4021 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4022 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4024 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4025 vmx_set_efer(vcpu, vcpu->arch.efer);
4027 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4028 kvm_rip_write(vcpu, vmcs12->host_rip);
4029 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4030 vmx_set_interrupt_shadow(vcpu, 0);
4033 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4034 * actually changed, because vmx_set_cr0 refers to efer set above.
4036 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4037 * (KVM doesn't change it);
4039 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
4040 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4042 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4043 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4044 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4046 nested_ept_uninit_mmu_context(vcpu);
4049 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4050 * couldn't have changed.
4052 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
4053 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4056 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
4058 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4060 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4061 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4062 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4063 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4064 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4065 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4066 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4068 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4069 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4070 vmcs_write64(GUEST_BNDCFGS, 0);
4072 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4073 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4074 vcpu->arch.pat = vmcs12->host_ia32_pat;
4076 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
4077 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4078 vmcs12->host_ia32_perf_global_ctrl));
4080 /* Set L1 segment info according to Intel SDM
4081 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4082 seg = (struct kvm_segment) {
4084 .limit = 0xFFFFFFFF,
4085 .selector = vmcs12->host_cs_selector,
4091 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4095 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4096 seg = (struct kvm_segment) {
4098 .limit = 0xFFFFFFFF,
4105 seg.selector = vmcs12->host_ds_selector;
4106 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4107 seg.selector = vmcs12->host_es_selector;
4108 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4109 seg.selector = vmcs12->host_ss_selector;
4110 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4111 seg.selector = vmcs12->host_fs_selector;
4112 seg.base = vmcs12->host_fs_base;
4113 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4114 seg.selector = vmcs12->host_gs_selector;
4115 seg.base = vmcs12->host_gs_base;
4116 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4117 seg = (struct kvm_segment) {
4118 .base = vmcs12->host_tr_base,
4120 .selector = vmcs12->host_tr_selector,
4124 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4126 kvm_set_dr(vcpu, 7, 0x400);
4127 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4129 if (cpu_has_vmx_msr_bitmap())
4130 vmx_update_msr_bitmap(vcpu);
4132 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4133 vmcs12->vm_exit_msr_load_count))
4134 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4137 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4139 struct shared_msr_entry *efer_msr;
4142 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4143 return vmcs_read64(GUEST_IA32_EFER);
4145 if (cpu_has_load_ia32_efer())
4148 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4149 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4150 return vmx->msr_autoload.guest.val[i].value;
4153 efer_msr = find_msr_entry(vmx, MSR_EFER);
4155 return efer_msr->data;
4160 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4162 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4163 struct vcpu_vmx *vmx = to_vmx(vcpu);
4164 struct vmx_msr_entry g, h;
4168 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4170 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4172 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4173 * as vmcs01.GUEST_DR7 contains a userspace defined value
4174 * and vcpu->arch.dr7 is not squirreled away before the
4175 * nested VMENTER (not worth adding a variable in nested_vmx).
4177 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4178 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4180 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4184 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4185 * handle a variety of side effects to KVM's software model.
4187 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4189 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
4190 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4192 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4193 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4195 nested_ept_uninit_mmu_context(vcpu);
4196 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4197 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4200 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4201 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4202 * VMFail, like everything else we just need to ensure our
4203 * software model is up-to-date.
4206 ept_save_pdptrs(vcpu);
4208 kvm_mmu_reset_context(vcpu);
4210 if (cpu_has_vmx_msr_bitmap())
4211 vmx_update_msr_bitmap(vcpu);
4214 * This nasty bit of open coding is a compromise between blindly
4215 * loading L1's MSRs using the exit load lists (incorrect emulation
4216 * of VMFail), leaving the nested VM's MSRs in the software model
4217 * (incorrect behavior) and snapshotting the modified MSRs (too
4218 * expensive since the lists are unbound by hardware). For each
4219 * MSR that was (prematurely) loaded from the nested VMEntry load
4220 * list, reload it from the exit load list if it exists and differs
4221 * from the guest value. The intent is to stuff host state as
4222 * silently as possible, not to fully process the exit load list.
4224 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4225 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4226 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4227 pr_debug_ratelimited(
4228 "%s read MSR index failed (%u, 0x%08llx)\n",
4233 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4234 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4235 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4236 pr_debug_ratelimited(
4237 "%s read MSR failed (%u, 0x%08llx)\n",
4241 if (h.index != g.index)
4243 if (h.value == g.value)
4246 if (nested_vmx_load_msr_check(vcpu, &h)) {
4247 pr_debug_ratelimited(
4248 "%s check failed (%u, 0x%x, 0x%x)\n",
4249 __func__, j, h.index, h.reserved);
4253 if (kvm_set_msr(vcpu, h.index, h.value)) {
4254 pr_debug_ratelimited(
4255 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4256 __func__, j, h.index, h.value);
4265 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4269 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4270 * and modify vmcs12 to make it see what it would expect to see there if
4271 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4273 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
4274 u32 exit_intr_info, unsigned long exit_qualification)
4276 struct vcpu_vmx *vmx = to_vmx(vcpu);
4277 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4279 /* trying to cancel vmlaunch/vmresume is a bug */
4280 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4282 /* Service the TLB flush request for L2 before switching to L1. */
4283 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
4284 kvm_vcpu_flush_tlb_current(vcpu);
4286 leave_guest_mode(vcpu);
4288 if (nested_cpu_has_preemption_timer(vmcs12))
4289 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4291 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
4292 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
4294 if (likely(!vmx->fail)) {
4295 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4297 if (exit_reason != -1)
4298 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
4299 exit_qualification);
4302 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4303 * also be used to capture vmcs12 cache as part of
4304 * capturing nVMX state for snapshot (migration).
4306 * Otherwise, this flush will dirty guest memory at a
4307 * point it is already assumed by user-space to be
4310 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4313 * The only expected VM-instruction error is "VM entry with
4314 * invalid control field(s)." Anything else indicates a
4315 * problem with L0. And we should never get here with a
4316 * VMFail of any type if early consistency checks are enabled.
4318 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4319 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4320 WARN_ON_ONCE(nested_early_check);
4323 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4325 /* Update any VMCS fields that might have changed while L2 ran */
4326 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4327 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4328 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4329 if (vmx->nested.l1_tpr_threshold != -1)
4330 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4332 if (kvm_has_tsc_control)
4333 decache_tsc_multiplier(vmx);
4335 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4336 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4337 vmx_set_virtual_apic_mode(vcpu);
4340 /* Unpin physical memory we referred to in vmcs02 */
4341 if (vmx->nested.apic_access_page) {
4342 kvm_release_page_clean(vmx->nested.apic_access_page);
4343 vmx->nested.apic_access_page = NULL;
4345 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4346 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4347 vmx->nested.pi_desc = NULL;
4349 if (vmx->nested.reload_vmcs01_apic_access_page) {
4350 vmx->nested.reload_vmcs01_apic_access_page = false;
4351 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4354 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
4355 vmx->nested.need_vmcs12_to_shadow_sync = true;
4357 /* in case we halted in L2 */
4358 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4360 if (likely(!vmx->fail)) {
4361 if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4362 nested_exit_intr_ack_set(vcpu)) {
4363 int irq = kvm_cpu_get_interrupt(vcpu);
4365 vmcs12->vm_exit_intr_info = irq |
4366 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4369 if (exit_reason != -1)
4370 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4371 vmcs12->exit_qualification,
4372 vmcs12->idt_vectoring_info_field,
4373 vmcs12->vm_exit_intr_info,
4374 vmcs12->vm_exit_intr_error_code,
4377 load_vmcs12_host_state(vcpu, vmcs12);
4383 * After an early L2 VM-entry failure, we're now back
4384 * in L1 which thinks it just finished a VMLAUNCH or
4385 * VMRESUME instruction, so we need to set the failure
4386 * flag and the VM-instruction error field of the VMCS
4387 * accordingly, and skip the emulated instruction.
4389 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4392 * Restore L1's host state to KVM's software model. We're here
4393 * because a consistency check was caught by hardware, which
4394 * means some amount of guest state has been propagated to KVM's
4395 * model and needs to be unwound to the host's state.
4397 nested_vmx_restore_host_state(vcpu);
4403 * Decode the memory-address operand of a vmx instruction, as recorded on an
4404 * exit caused by such an instruction (run by a guest hypervisor).
4405 * On success, returns 0. When the operand is invalid, returns 1 and throws
4408 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4409 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4413 struct kvm_segment s;
4416 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4417 * Execution", on an exit, vmx_instruction_info holds most of the
4418 * addressing components of the operand. Only the displacement part
4419 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4420 * For how an actual address is calculated from all these components,
4421 * refer to Vol. 1, "Operand Addressing".
4423 int scaling = vmx_instruction_info & 3;
4424 int addr_size = (vmx_instruction_info >> 7) & 7;
4425 bool is_reg = vmx_instruction_info & (1u << 10);
4426 int seg_reg = (vmx_instruction_info >> 15) & 7;
4427 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4428 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4429 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4430 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4433 kvm_queue_exception(vcpu, UD_VECTOR);
4437 /* Addr = segment_base + offset */
4438 /* offset = base + [index * scale] + displacement */
4439 off = exit_qualification; /* holds the displacement */
4441 off = (gva_t)sign_extend64(off, 31);
4442 else if (addr_size == 0)
4443 off = (gva_t)sign_extend64(off, 15);
4445 off += kvm_register_read(vcpu, base_reg);
4447 off += kvm_register_read(vcpu, index_reg) << scaling;
4448 vmx_get_segment(vcpu, &s, seg_reg);
4451 * The effective address, i.e. @off, of a memory operand is truncated
4452 * based on the address size of the instruction. Note that this is
4453 * the *effective address*, i.e. the address prior to accounting for
4454 * the segment's base.
4456 if (addr_size == 1) /* 32 bit */
4458 else if (addr_size == 0) /* 16 bit */
4461 /* Checks for #GP/#SS exceptions. */
4463 if (is_long_mode(vcpu)) {
4465 * The virtual/linear address is never truncated in 64-bit
4466 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4467 * address when using FS/GS with a non-zero base.
4469 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4470 *ret = s.base + off;
4474 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4475 * non-canonical form. This is the only check on the memory
4476 * destination for long mode!
4478 exn = is_noncanonical_address(*ret, vcpu);
4481 * When not in long mode, the virtual/linear address is
4482 * unconditionally truncated to 32 bits regardless of the
4485 *ret = (s.base + off) & 0xffffffff;
4487 /* Protected mode: apply checks for segment validity in the
4489 * - segment type check (#GP(0) may be thrown)
4490 * - usability check (#GP(0)/#SS(0))
4491 * - limit check (#GP(0)/#SS(0))
4494 /* #GP(0) if the destination operand is located in a
4495 * read-only data segment or any code segment.
4497 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4499 /* #GP(0) if the source operand is located in an
4500 * execute-only code segment
4502 exn = ((s.type & 0xa) == 8);
4504 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4507 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4509 exn = (s.unusable != 0);
4512 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4513 * outside the segment limit. All CPUs that support VMX ignore
4514 * limit checks for flat segments, i.e. segments with base==0,
4515 * limit==0xffffffff and of type expand-up data or code.
4517 if (!(s.base == 0 && s.limit == 0xffffffff &&
4518 ((s.type & 8) || !(s.type & 4))))
4519 exn = exn || ((u64)off + len - 1 > s.limit);
4522 kvm_queue_exception_e(vcpu,
4523 seg_reg == VCPU_SREG_SS ?
4524 SS_VECTOR : GP_VECTOR,
4532 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
4534 struct vcpu_vmx *vmx;
4536 if (!nested_vmx_allowed(vcpu))
4540 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
4541 vmx->nested.msrs.entry_ctls_high |=
4542 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4543 vmx->nested.msrs.exit_ctls_high |=
4544 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4546 vmx->nested.msrs.entry_ctls_high &=
4547 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4548 vmx->nested.msrs.exit_ctls_high &=
4549 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4553 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4556 struct x86_exception e;
4558 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4559 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4560 sizeof(*vmpointer), &gva))
4563 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4564 kvm_inject_emulated_page_fault(vcpu, &e);
4572 * Allocate a shadow VMCS and associate it with the currently loaded
4573 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4574 * VMCS is also VMCLEARed, so that it is ready for use.
4576 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4578 struct vcpu_vmx *vmx = to_vmx(vcpu);
4579 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4582 * We should allocate a shadow vmcs for vmcs01 only when L1
4583 * executes VMXON and free it when L1 executes VMXOFF.
4584 * As it is invalid to execute VMXON twice, we shouldn't reach
4585 * here when vmcs01 already have an allocated shadow vmcs.
4587 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4589 if (!loaded_vmcs->shadow_vmcs) {
4590 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4591 if (loaded_vmcs->shadow_vmcs)
4592 vmcs_clear(loaded_vmcs->shadow_vmcs);
4594 return loaded_vmcs->shadow_vmcs;
4597 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4599 struct vcpu_vmx *vmx = to_vmx(vcpu);
4602 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4606 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4607 if (!vmx->nested.cached_vmcs12)
4608 goto out_cached_vmcs12;
4610 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4611 if (!vmx->nested.cached_shadow_vmcs12)
4612 goto out_cached_shadow_vmcs12;
4614 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4615 goto out_shadow_vmcs;
4617 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4618 HRTIMER_MODE_REL_PINNED);
4619 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4621 vmx->nested.vpid02 = allocate_vpid();
4623 vmx->nested.vmcs02_initialized = false;
4624 vmx->nested.vmxon = true;
4626 if (vmx_pt_mode_is_host_guest()) {
4627 vmx->pt_desc.guest.ctl = 0;
4628 pt_update_intercept_for_msr(vmx);
4634 kfree(vmx->nested.cached_shadow_vmcs12);
4636 out_cached_shadow_vmcs12:
4637 kfree(vmx->nested.cached_vmcs12);
4640 free_loaded_vmcs(&vmx->nested.vmcs02);
4647 * Emulate the VMXON instruction.
4648 * Currently, we just remember that VMX is active, and do not save or even
4649 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4650 * do not currently need to store anything in that guest-allocated memory
4651 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4652 * argument is different from the VMXON pointer (which the spec says they do).
4654 static int handle_vmon(struct kvm_vcpu *vcpu)
4659 struct vcpu_vmx *vmx = to_vmx(vcpu);
4660 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4661 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
4664 * The Intel VMX Instruction Reference lists a bunch of bits that are
4665 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4666 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4667 * Otherwise, we should fail with #UD. But most faulting conditions
4668 * have already been checked by hardware, prior to the VM-exit for
4669 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4670 * that bit set to 1 in non-root mode.
4672 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4673 kvm_queue_exception(vcpu, UD_VECTOR);
4677 /* CPL=0 must be checked manually. */
4678 if (vmx_get_cpl(vcpu)) {
4679 kvm_inject_gp(vcpu, 0);
4683 if (vmx->nested.vmxon)
4684 return nested_vmx_failValid(vcpu,
4685 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4687 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4688 != VMXON_NEEDED_FEATURES) {
4689 kvm_inject_gp(vcpu, 0);
4693 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4698 * The first 4 bytes of VMXON region contain the supported
4699 * VMCS revision identifier
4701 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4702 * which replaces physical address width with 32
4704 if (!page_address_valid(vcpu, vmptr))
4705 return nested_vmx_failInvalid(vcpu);
4707 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4708 revision != VMCS12_REVISION)
4709 return nested_vmx_failInvalid(vcpu);
4711 vmx->nested.vmxon_ptr = vmptr;
4712 ret = enter_vmx_operation(vcpu);
4716 return nested_vmx_succeed(vcpu);
4719 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4721 struct vcpu_vmx *vmx = to_vmx(vcpu);
4723 if (vmx->nested.current_vmptr == -1ull)
4726 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4728 if (enable_shadow_vmcs) {
4729 /* copy to memory all shadowed fields in case
4730 they were modified */
4731 copy_shadow_to_vmcs12(vmx);
4732 vmx_disable_shadow_vmcs(vmx);
4734 vmx->nested.posted_intr_nv = -1;
4736 /* Flush VMCS12 to guest memory */
4737 kvm_vcpu_write_guest_page(vcpu,
4738 vmx->nested.current_vmptr >> PAGE_SHIFT,
4739 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4741 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4743 vmx->nested.current_vmptr = -1ull;
4746 /* Emulate the VMXOFF instruction */
4747 static int handle_vmoff(struct kvm_vcpu *vcpu)
4749 if (!nested_vmx_check_permission(vcpu))
4754 /* Process a latched INIT during time CPU was in VMX operation */
4755 kvm_make_request(KVM_REQ_EVENT, vcpu);
4757 return nested_vmx_succeed(vcpu);
4760 /* Emulate the VMCLEAR instruction */
4761 static int handle_vmclear(struct kvm_vcpu *vcpu)
4763 struct vcpu_vmx *vmx = to_vmx(vcpu);
4768 if (!nested_vmx_check_permission(vcpu))
4771 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4774 if (!page_address_valid(vcpu, vmptr))
4775 return nested_vmx_failValid(vcpu,
4776 VMXERR_VMCLEAR_INVALID_ADDRESS);
4778 if (vmptr == vmx->nested.vmxon_ptr)
4779 return nested_vmx_failValid(vcpu,
4780 VMXERR_VMCLEAR_VMXON_POINTER);
4783 * When Enlightened VMEntry is enabled on the calling CPU we treat
4784 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
4785 * way to distinguish it from VMCS12) and we must not corrupt it by
4786 * writing to the non-existent 'launch_state' field. The area doesn't
4787 * have to be the currently active EVMCS on the calling CPU and there's
4788 * nothing KVM has to do to transition it from 'active' to 'non-active'
4789 * state. It is possible that the area will stay mapped as
4790 * vmx->nested.hv_evmcs but this shouldn't be a problem.
4792 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
4793 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
4794 if (vmptr == vmx->nested.current_vmptr)
4795 nested_release_vmcs12(vcpu);
4797 kvm_vcpu_write_guest(vcpu,
4798 vmptr + offsetof(struct vmcs12,
4800 &zero, sizeof(zero));
4803 return nested_vmx_succeed(vcpu);
4806 /* Emulate the VMLAUNCH instruction */
4807 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4809 return nested_vmx_run(vcpu, true);
4812 /* Emulate the VMRESUME instruction */
4813 static int handle_vmresume(struct kvm_vcpu *vcpu)
4816 return nested_vmx_run(vcpu, false);
4819 static int handle_vmread(struct kvm_vcpu *vcpu)
4821 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
4823 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4824 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4825 struct vcpu_vmx *vmx = to_vmx(vcpu);
4826 struct x86_exception e;
4827 unsigned long field;
4833 if (!nested_vmx_check_permission(vcpu))
4837 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
4838 * any VMREAD sets the ALU flags for VMfailInvalid.
4840 if (vmx->nested.current_vmptr == -1ull ||
4841 (is_guest_mode(vcpu) &&
4842 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
4843 return nested_vmx_failInvalid(vcpu);
4845 /* Decode instruction info and find the field to read */
4846 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
4848 offset = vmcs_field_to_offset(field);
4850 return nested_vmx_failValid(vcpu,
4851 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4853 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
4854 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4856 /* Read the field, zero-extended to a u64 value */
4857 value = vmcs12_read_any(vmcs12, field, offset);
4860 * Now copy part of this value to register or memory, as requested.
4861 * Note that the number of bits actually copied is 32 or 64 depending
4862 * on the guest's mode (32 or 64 bit), not on the given field's length.
4864 if (instr_info & BIT(10)) {
4865 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
4867 len = is_64_bit_mode(vcpu) ? 8 : 4;
4868 if (get_vmx_mem_address(vcpu, exit_qualification,
4869 instr_info, true, len, &gva))
4871 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4872 if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
4873 kvm_inject_emulated_page_fault(vcpu, &e);
4878 return nested_vmx_succeed(vcpu);
4881 static bool is_shadow_field_rw(unsigned long field)
4884 #define SHADOW_FIELD_RW(x, y) case x:
4885 #include "vmcs_shadow_fields.h"
4893 static bool is_shadow_field_ro(unsigned long field)
4896 #define SHADOW_FIELD_RO(x, y) case x:
4897 #include "vmcs_shadow_fields.h"
4905 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4907 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
4909 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4910 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4911 struct vcpu_vmx *vmx = to_vmx(vcpu);
4912 struct x86_exception e;
4913 unsigned long field;
4919 * The value to write might be 32 or 64 bits, depending on L1's long
4920 * mode, and eventually we need to write that into a field of several
4921 * possible lengths. The code below first zero-extends the value to 64
4922 * bit (value), and then copies only the appropriate number of
4923 * bits into the vmcs12 field.
4927 if (!nested_vmx_check_permission(vcpu))
4931 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
4932 * any VMWRITE sets the ALU flags for VMfailInvalid.
4934 if (vmx->nested.current_vmptr == -1ull ||
4935 (is_guest_mode(vcpu) &&
4936 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
4937 return nested_vmx_failInvalid(vcpu);
4939 if (instr_info & BIT(10))
4940 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
4942 len = is_64_bit_mode(vcpu) ? 8 : 4;
4943 if (get_vmx_mem_address(vcpu, exit_qualification,
4944 instr_info, false, len, &gva))
4946 if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
4947 kvm_inject_emulated_page_fault(vcpu, &e);
4952 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
4954 offset = vmcs_field_to_offset(field);
4956 return nested_vmx_failValid(vcpu,
4957 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4960 * If the vCPU supports "VMWRITE to any supported field in the
4961 * VMCS," then the "read-only" fields are actually read/write.
4963 if (vmcs_field_readonly(field) &&
4964 !nested_cpu_has_vmwrite_any_field(vcpu))
4965 return nested_vmx_failValid(vcpu,
4966 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4969 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4970 * vmcs12, else we may crush a field or consume a stale value.
4972 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
4973 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4976 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4977 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
4978 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4979 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4980 * from L1 will return a different value than VMREAD from L2 (L1 sees
4981 * the stripped down value, L2 sees the full value as stored by KVM).
4983 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
4986 vmcs12_write_any(vmcs12, field, offset, value);
4989 * Do not track vmcs12 dirty-state if in guest-mode as we actually
4990 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
4991 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4992 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
4994 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
4996 * L1 can read these fields without exiting, ensure the
4997 * shadow VMCS is up-to-date.
4999 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5001 vmcs_load(vmx->vmcs01.shadow_vmcs);
5003 __vmcs_writel(field, value);
5005 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5006 vmcs_load(vmx->loaded_vmcs->vmcs);
5009 vmx->nested.dirty_vmcs12 = true;
5012 return nested_vmx_succeed(vcpu);
5015 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5017 vmx->nested.current_vmptr = vmptr;
5018 if (enable_shadow_vmcs) {
5019 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5020 vmcs_write64(VMCS_LINK_POINTER,
5021 __pa(vmx->vmcs01.shadow_vmcs));
5022 vmx->nested.need_vmcs12_to_shadow_sync = true;
5024 vmx->nested.dirty_vmcs12 = true;
5027 /* Emulate the VMPTRLD instruction */
5028 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5030 struct vcpu_vmx *vmx = to_vmx(vcpu);
5033 if (!nested_vmx_check_permission(vcpu))
5036 if (nested_vmx_get_vmptr(vcpu, &vmptr))
5039 if (!page_address_valid(vcpu, vmptr))
5040 return nested_vmx_failValid(vcpu,
5041 VMXERR_VMPTRLD_INVALID_ADDRESS);
5043 if (vmptr == vmx->nested.vmxon_ptr)
5044 return nested_vmx_failValid(vcpu,
5045 VMXERR_VMPTRLD_VMXON_POINTER);
5047 /* Forbid normal VMPTRLD if Enlightened version was used */
5048 if (vmx->nested.hv_evmcs)
5051 if (vmx->nested.current_vmptr != vmptr) {
5052 struct kvm_host_map map;
5053 struct vmcs12 *new_vmcs12;
5055 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
5057 * Reads from an unbacked page return all 1s,
5058 * which means that the 32 bits located at the
5059 * given physical address won't match the required
5060 * VMCS12_REVISION identifier.
5062 return nested_vmx_failValid(vcpu,
5063 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5066 new_vmcs12 = map.hva;
5068 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5069 (new_vmcs12->hdr.shadow_vmcs &&
5070 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
5071 kvm_vcpu_unmap(vcpu, &map, false);
5072 return nested_vmx_failValid(vcpu,
5073 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5076 nested_release_vmcs12(vcpu);
5079 * Load VMCS12 from guest memory since it is not already
5082 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
5083 kvm_vcpu_unmap(vcpu, &map, false);
5085 set_current_vmptr(vmx, vmptr);
5088 return nested_vmx_succeed(vcpu);
5091 /* Emulate the VMPTRST instruction */
5092 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5094 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
5095 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5096 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5097 struct x86_exception e;
5100 if (!nested_vmx_check_permission(vcpu))
5103 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
5106 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5107 true, sizeof(gpa_t), &gva))
5109 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5110 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
5111 sizeof(gpa_t), &e)) {
5112 kvm_inject_emulated_page_fault(vcpu, &e);
5115 return nested_vmx_succeed(vcpu);
5118 /* Emulate the INVEPT instruction */
5119 static int handle_invept(struct kvm_vcpu *vcpu)
5121 struct vcpu_vmx *vmx = to_vmx(vcpu);
5122 u32 vmx_instruction_info, types;
5125 struct x86_exception e;
5130 if (!(vmx->nested.msrs.secondary_ctls_high &
5131 SECONDARY_EXEC_ENABLE_EPT) ||
5132 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5133 kvm_queue_exception(vcpu, UD_VECTOR);
5137 if (!nested_vmx_check_permission(vcpu))
5140 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5141 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
5143 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5145 if (type >= 32 || !(types & (1 << type)))
5146 return nested_vmx_failValid(vcpu,
5147 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5149 /* According to the Intel VMX instruction reference, the memory
5150 * operand is read even if it isn't needed (e.g., for type==global)
5152 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5153 vmx_instruction_info, false, sizeof(operand), &gva))
5155 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
5156 kvm_inject_emulated_page_fault(vcpu, &e);
5161 case VMX_EPT_EXTENT_CONTEXT:
5162 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5163 return nested_vmx_failValid(vcpu,
5164 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5166 /* TODO: sync only the target EPTP context. */
5168 case VMX_EPT_EXTENT_GLOBAL:
5170 * Nested EPT roots are always held through guest_mmu,
5173 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu,
5181 return nested_vmx_succeed(vcpu);
5184 static int handle_invvpid(struct kvm_vcpu *vcpu)
5186 struct vcpu_vmx *vmx = to_vmx(vcpu);
5187 u32 vmx_instruction_info;
5188 unsigned long type, types;
5190 struct x86_exception e;
5197 if (!(vmx->nested.msrs.secondary_ctls_high &
5198 SECONDARY_EXEC_ENABLE_VPID) ||
5199 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5200 kvm_queue_exception(vcpu, UD_VECTOR);
5204 if (!nested_vmx_check_permission(vcpu))
5207 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5208 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
5210 types = (vmx->nested.msrs.vpid_caps &
5211 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5213 if (type >= 32 || !(types & (1 << type)))
5214 return nested_vmx_failValid(vcpu,
5215 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5217 /* according to the intel vmx instruction reference, the memory
5218 * operand is read even if it isn't needed (e.g., for type==global)
5220 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5221 vmx_instruction_info, false, sizeof(operand), &gva))
5223 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
5224 kvm_inject_emulated_page_fault(vcpu, &e);
5227 if (operand.vpid >> 16)
5228 return nested_vmx_failValid(vcpu,
5229 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5231 vpid02 = nested_get_vpid02(vcpu);
5233 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5234 if (!operand.vpid ||
5235 is_noncanonical_address(operand.gla, vcpu))
5236 return nested_vmx_failValid(vcpu,
5237 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5238 vpid_sync_vcpu_addr(vpid02, operand.gla);
5240 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5241 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5243 return nested_vmx_failValid(vcpu,
5244 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5245 vpid_sync_context(vpid02);
5247 case VMX_VPID_EXTENT_ALL_CONTEXT:
5248 vpid_sync_context(vpid02);
5252 return kvm_skip_emulated_instruction(vcpu);
5256 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5257 * linear mappings for L2 (tagged with L2's VPID). Free all roots as
5258 * VPIDs are not tracked in the MMU role.
5260 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5261 * an MMU when EPT is disabled.
5263 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5266 kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu,
5269 return nested_vmx_succeed(vcpu);
5272 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5273 struct vmcs12 *vmcs12)
5275 u32 index = kvm_rcx_read(vcpu);
5277 bool accessed_dirty;
5278 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5280 if (!nested_cpu_has_eptp_switching(vmcs12) ||
5281 !nested_cpu_has_ept(vmcs12))
5284 if (index >= VMFUNC_EPTP_ENTRIES)
5288 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5289 &new_eptp, index * 8, 8))
5292 accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
5295 * If the (L2) guest does a vmfunc to the currently
5296 * active ept pointer, we don't have to do anything else
5298 if (vmcs12->ept_pointer != new_eptp) {
5299 if (!nested_vmx_check_eptp(vcpu, new_eptp))
5302 kvm_mmu_unload(vcpu);
5303 mmu->ept_ad = accessed_dirty;
5304 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
5305 vmcs12->ept_pointer = new_eptp;
5307 * TODO: Check what's the correct approach in case
5308 * mmu reload fails. Currently, we just let the next
5309 * reload potentially fail
5311 kvm_mmu_reload(vcpu);
5317 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5319 struct vcpu_vmx *vmx = to_vmx(vcpu);
5320 struct vmcs12 *vmcs12;
5321 u32 function = kvm_rax_read(vcpu);
5324 * VMFUNC is only supported for nested guests, but we always enable the
5325 * secondary control for simplicity; for non-nested mode, fake that we
5326 * didn't by injecting #UD.
5328 if (!is_guest_mode(vcpu)) {
5329 kvm_queue_exception(vcpu, UD_VECTOR);
5333 vmcs12 = get_vmcs12(vcpu);
5334 if ((vmcs12->vm_function_control & (1 << function)) == 0)
5339 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5345 return kvm_skip_emulated_instruction(vcpu);
5348 nested_vmx_vmexit(vcpu, vmx->exit_reason,
5349 vmcs_read32(VM_EXIT_INTR_INFO),
5350 vmcs_readl(EXIT_QUALIFICATION));
5355 * Return true if an IO instruction with the specified port and size should cause
5356 * a VM-exit into L1.
5358 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5361 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5362 gpa_t bitmap, last_bitmap;
5365 last_bitmap = (gpa_t)-1;
5370 bitmap = vmcs12->io_bitmap_a;
5371 else if (port < 0x10000)
5372 bitmap = vmcs12->io_bitmap_b;
5375 bitmap += (port & 0x7fff) / 8;
5377 if (last_bitmap != bitmap)
5378 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5380 if (b & (1 << (port & 7)))
5385 last_bitmap = bitmap;
5391 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5392 struct vmcs12 *vmcs12)
5394 unsigned long exit_qualification;
5395 unsigned short port;
5398 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5399 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5401 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5403 port = exit_qualification >> 16;
5404 size = (exit_qualification & 7) + 1;
5406 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5410 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5411 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5412 * disinterest in the current event (read or write a specific MSR) by using an
5413 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5415 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5416 struct vmcs12 *vmcs12, u32 exit_reason)
5418 u32 msr_index = kvm_rcx_read(vcpu);
5421 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5425 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5426 * for the four combinations of read/write and low/high MSR numbers.
5427 * First we need to figure out which of the four to use:
5429 bitmap = vmcs12->msr_bitmap;
5430 if (exit_reason == EXIT_REASON_MSR_WRITE)
5432 if (msr_index >= 0xc0000000) {
5433 msr_index -= 0xc0000000;
5437 /* Then read the msr_index'th bit from this bitmap: */
5438 if (msr_index < 1024*8) {
5440 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5442 return 1 & (b >> (msr_index & 7));
5444 return true; /* let L1 handle the wrong parameter */
5448 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5449 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5450 * intercept (via guest_host_mask etc.) the current event.
5452 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5453 struct vmcs12 *vmcs12)
5455 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5456 int cr = exit_qualification & 15;
5460 switch ((exit_qualification >> 4) & 3) {
5461 case 0: /* mov to cr */
5462 reg = (exit_qualification >> 8) & 15;
5463 val = kvm_register_readl(vcpu, reg);
5466 if (vmcs12->cr0_guest_host_mask &
5467 (val ^ vmcs12->cr0_read_shadow))
5471 if ((vmcs12->cr3_target_count >= 1 &&
5472 vmcs12->cr3_target_value0 == val) ||
5473 (vmcs12->cr3_target_count >= 2 &&
5474 vmcs12->cr3_target_value1 == val) ||
5475 (vmcs12->cr3_target_count >= 3 &&
5476 vmcs12->cr3_target_value2 == val) ||
5477 (vmcs12->cr3_target_count >= 4 &&
5478 vmcs12->cr3_target_value3 == val))
5480 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5484 if (vmcs12->cr4_guest_host_mask &
5485 (vmcs12->cr4_read_shadow ^ val))
5489 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5495 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5496 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5499 case 1: /* mov from cr */
5502 if (vmcs12->cpu_based_vm_exec_control &
5503 CPU_BASED_CR3_STORE_EXITING)
5507 if (vmcs12->cpu_based_vm_exec_control &
5508 CPU_BASED_CR8_STORE_EXITING)
5515 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5516 * cr0. Other attempted changes are ignored, with no exit.
5518 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5519 if (vmcs12->cr0_guest_host_mask & 0xe &
5520 (val ^ vmcs12->cr0_read_shadow))
5522 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5523 !(vmcs12->cr0_read_shadow & 0x1) &&
5531 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5532 struct vmcs12 *vmcs12, gpa_t bitmap)
5534 u32 vmx_instruction_info;
5535 unsigned long field;
5538 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5541 /* Decode instruction info and find the field to access */
5542 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5543 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5545 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5549 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5552 return 1 & (b >> (field & 7));
5555 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5557 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5559 if (nested_cpu_has_mtf(vmcs12))
5563 * An MTF VM-exit may be injected into the guest by setting the
5564 * interruption-type to 7 (other event) and the vector field to 0. Such
5565 * is the case regardless of the 'monitor trap flag' VM-execution
5568 return entry_intr_info == (INTR_INFO_VALID_MASK
5569 | INTR_TYPE_OTHER_EVENT);
5573 * Return true if we should exit from L2 to L1 to handle an exit, or false if we
5574 * should handle it ourselves in L0 (and then continue L2). Only call this
5575 * when in is_guest_mode (L2).
5577 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5579 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5580 struct vcpu_vmx *vmx = to_vmx(vcpu);
5581 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5583 WARN_ON_ONCE(vmx->nested.nested_run_pending);
5585 if (unlikely(vmx->fail)) {
5586 trace_kvm_nested_vmenter_failed(
5587 "hardware VM-instruction error: ",
5588 vmcs_read32(VM_INSTRUCTION_ERROR));
5592 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5593 vmcs_readl(EXIT_QUALIFICATION),
5594 vmx->idt_vectoring_info,
5596 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5599 switch (exit_reason) {
5600 case EXIT_REASON_EXCEPTION_NMI:
5601 if (is_nmi(intr_info))
5603 else if (is_page_fault(intr_info))
5604 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5605 else if (is_debug(intr_info) &&
5607 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5609 else if (is_breakpoint(intr_info) &&
5610 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5612 return vmcs12->exception_bitmap &
5613 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5614 case EXIT_REASON_EXTERNAL_INTERRUPT:
5616 case EXIT_REASON_TRIPLE_FAULT:
5618 case EXIT_REASON_INTERRUPT_WINDOW:
5619 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
5620 case EXIT_REASON_NMI_WINDOW:
5621 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
5622 case EXIT_REASON_TASK_SWITCH:
5624 case EXIT_REASON_CPUID:
5626 case EXIT_REASON_HLT:
5627 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5628 case EXIT_REASON_INVD:
5630 case EXIT_REASON_INVLPG:
5631 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5632 case EXIT_REASON_RDPMC:
5633 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5634 case EXIT_REASON_RDRAND:
5635 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5636 case EXIT_REASON_RDSEED:
5637 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5638 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5639 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5640 case EXIT_REASON_VMREAD:
5641 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5642 vmcs12->vmread_bitmap);
5643 case EXIT_REASON_VMWRITE:
5644 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5645 vmcs12->vmwrite_bitmap);
5646 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5647 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5648 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5649 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5650 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5652 * VMX instructions trap unconditionally. This allows L1 to
5653 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5656 case EXIT_REASON_CR_ACCESS:
5657 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5658 case EXIT_REASON_DR_ACCESS:
5659 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5660 case EXIT_REASON_IO_INSTRUCTION:
5661 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5662 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5663 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5664 case EXIT_REASON_MSR_READ:
5665 case EXIT_REASON_MSR_WRITE:
5666 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5667 case EXIT_REASON_INVALID_STATE:
5669 case EXIT_REASON_MWAIT_INSTRUCTION:
5670 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5671 case EXIT_REASON_MONITOR_TRAP_FLAG:
5672 return nested_vmx_exit_handled_mtf(vmcs12);
5673 case EXIT_REASON_MONITOR_INSTRUCTION:
5674 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5675 case EXIT_REASON_PAUSE_INSTRUCTION:
5676 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5677 nested_cpu_has2(vmcs12,
5678 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5679 case EXIT_REASON_MCE_DURING_VMENTRY:
5681 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5682 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5683 case EXIT_REASON_APIC_ACCESS:
5684 case EXIT_REASON_APIC_WRITE:
5685 case EXIT_REASON_EOI_INDUCED:
5687 * The controls for "virtualize APIC accesses," "APIC-
5688 * register virtualization," and "virtual-interrupt
5689 * delivery" only come from vmcs12.
5692 case EXIT_REASON_EPT_VIOLATION:
5694 * L0 always deals with the EPT violation. If nested EPT is
5695 * used, and the nested mmu code discovers that the address is
5696 * missing in the guest EPT table (EPT12), the EPT violation
5697 * will be injected with nested_ept_inject_page_fault()
5700 case EXIT_REASON_EPT_MISCONFIG:
5702 * L2 never uses directly L1's EPT, but rather L0's own EPT
5703 * table (shadow on EPT) or a merged EPT table that L0 built
5704 * (EPT on EPT). So any problems with the structure of the
5705 * table is L0's fault.
5708 case EXIT_REASON_INVPCID:
5710 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5711 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5712 case EXIT_REASON_WBINVD:
5713 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5714 case EXIT_REASON_XSETBV:
5716 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5718 * This should never happen, since it is not possible to
5719 * set XSS to a non-zero value---neither in L1 nor in L2.
5720 * If if it were, XSS would have to be checked against
5721 * the XSS exit bitmap in vmcs12.
5723 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5724 case EXIT_REASON_PREEMPTION_TIMER:
5726 case EXIT_REASON_PML_FULL:
5727 /* We emulate PML support to L1. */
5729 case EXIT_REASON_VMFUNC:
5730 /* VM functions are emulated through L2->L0 vmexits. */
5732 case EXIT_REASON_ENCLS:
5733 /* SGX is never exposed to L1 */
5735 case EXIT_REASON_UMWAIT:
5736 case EXIT_REASON_TPAUSE:
5737 return nested_cpu_has2(vmcs12,
5738 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
5745 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5746 struct kvm_nested_state __user *user_kvm_nested_state,
5749 struct vcpu_vmx *vmx;
5750 struct vmcs12 *vmcs12;
5751 struct kvm_nested_state kvm_state = {
5753 .format = KVM_STATE_NESTED_FORMAT_VMX,
5754 .size = sizeof(kvm_state),
5755 .hdr.vmx.vmxon_pa = -1ull,
5756 .hdr.vmx.vmcs12_pa = -1ull,
5758 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5759 &user_kvm_nested_state->data.vmx[0];
5762 return kvm_state.size + sizeof(*user_vmx_nested_state);
5765 vmcs12 = get_vmcs12(vcpu);
5767 if (nested_vmx_allowed(vcpu) &&
5768 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5769 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5770 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
5772 if (vmx_has_valid_vmcs12(vcpu)) {
5773 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
5775 if (vmx->nested.hv_evmcs)
5776 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5778 if (is_guest_mode(vcpu) &&
5779 nested_cpu_has_shadow_vmcs(vmcs12) &&
5780 vmcs12->vmcs_link_pointer != -1ull)
5781 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
5784 if (vmx->nested.smm.vmxon)
5785 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5787 if (vmx->nested.smm.guest_mode)
5788 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5790 if (is_guest_mode(vcpu)) {
5791 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5793 if (vmx->nested.nested_run_pending)
5794 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5796 if (vmx->nested.mtf_pending)
5797 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
5801 if (user_data_size < kvm_state.size)
5804 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5807 if (!vmx_has_valid_vmcs12(vcpu))
5811 * When running L2, the authoritative vmcs12 state is in the
5812 * vmcs02. When running L1, the authoritative vmcs12 state is
5813 * in the shadow or enlightened vmcs linked to vmcs01, unless
5814 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
5815 * vmcs12 state is in the vmcs12 already.
5817 if (is_guest_mode(vcpu)) {
5818 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
5819 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5820 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
5821 if (vmx->nested.hv_evmcs)
5822 copy_enlightened_to_vmcs12(vmx);
5823 else if (enable_shadow_vmcs)
5824 copy_shadow_to_vmcs12(vmx);
5827 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
5828 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
5831 * Copy over the full allocated size of vmcs12 rather than just the size
5834 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
5837 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5838 vmcs12->vmcs_link_pointer != -1ull) {
5839 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
5840 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5845 return kvm_state.size;
5849 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5851 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5853 if (is_guest_mode(vcpu)) {
5854 to_vmx(vcpu)->nested.nested_run_pending = 0;
5855 nested_vmx_vmexit(vcpu, -1, 0, 0);
5860 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5861 struct kvm_nested_state __user *user_kvm_nested_state,
5862 struct kvm_nested_state *kvm_state)
5864 struct vcpu_vmx *vmx = to_vmx(vcpu);
5865 struct vmcs12 *vmcs12;
5867 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
5868 &user_kvm_nested_state->data.vmx[0];
5871 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
5874 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
5875 if (kvm_state->hdr.vmx.smm.flags)
5878 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
5882 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
5883 * enable eVMCS capability on vCPU. However, since then
5884 * code was changed such that flag signals vmcs12 should
5885 * be copied into eVMCS in guest memory.
5887 * To preserve backwards compatability, allow user
5888 * to set this flag even when there is no VMXON region.
5890 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
5893 if (!nested_vmx_allowed(vcpu))
5896 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
5900 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5901 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5904 if (kvm_state->hdr.vmx.smm.flags &
5905 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5909 * SMM temporarily disables VMX, so we cannot be in guest mode,
5910 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5915 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
5916 : kvm_state->hdr.vmx.smm.flags)
5919 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5920 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5923 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
5924 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
5927 vmx_leave_nested(vcpu);
5929 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
5932 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
5933 ret = enter_vmx_operation(vcpu);
5937 /* Empty 'VMXON' state is permitted */
5938 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
5941 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
5942 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
5943 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
5946 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
5947 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5949 * nested_vmx_handle_enlightened_vmptrld() cannot be called
5950 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
5951 * restored yet. EVMCS will be mapped from
5952 * nested_get_vmcs12_pages().
5954 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
5959 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5960 vmx->nested.smm.vmxon = true;
5961 vmx->nested.vmxon = false;
5963 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5964 vmx->nested.smm.guest_mode = true;
5967 vmcs12 = get_vmcs12(vcpu);
5968 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
5971 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5974 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5977 vmx->nested.nested_run_pending =
5978 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5980 vmx->nested.mtf_pending =
5981 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
5984 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5985 vmcs12->vmcs_link_pointer != -1ull) {
5986 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5988 if (kvm_state->size <
5989 sizeof(*kvm_state) +
5990 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
5991 goto error_guest_mode;
5993 if (copy_from_user(shadow_vmcs12,
5994 user_vmx_nested_state->shadow_vmcs12,
5995 sizeof(*shadow_vmcs12))) {
5997 goto error_guest_mode;
6000 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6001 !shadow_vmcs12->hdr.shadow_vmcs)
6002 goto error_guest_mode;
6005 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6006 nested_vmx_check_host_state(vcpu, vmcs12) ||
6007 nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
6008 goto error_guest_mode;
6010 vmx->nested.dirty_vmcs12 = true;
6011 ret = nested_vmx_enter_non_root_mode(vcpu, false);
6013 goto error_guest_mode;
6018 vmx->nested.nested_run_pending = 0;
6022 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6024 if (enable_shadow_vmcs) {
6025 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6026 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6031 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6032 * returned for the various VMX controls MSRs when nested VMX is enabled.
6033 * The same values should also be used to verify that vmcs12 control fields are
6034 * valid during nested entry from L1 to L2.
6035 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6036 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6037 * bit in the high half is on if the corresponding bit in the control field
6038 * may be on. See also vmx_control_verify().
6040 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
6043 * Note that as a general rule, the high half of the MSRs (bits in
6044 * the control fields which may be 1) should be initialized by the
6045 * intersection of the underlying hardware's MSR (i.e., features which
6046 * can be supported) and the list of features we want to expose -
6047 * because they are known to be properly supported in our code.
6048 * Also, usually, the low half of the MSRs (bits which must be 1) can
6049 * be set to 0, meaning that L1 may turn off any of these bits. The
6050 * reason is that if one of these bits is necessary, it will appear
6051 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6052 * fields of vmcs01 and vmcs02, will turn these bits off - and
6053 * nested_vmx_exit_reflected() will not pass related exits to L1.
6054 * These rules have exceptions below.
6057 /* pin-based controls */
6058 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6059 msrs->pinbased_ctls_low,
6060 msrs->pinbased_ctls_high);
6061 msrs->pinbased_ctls_low |=
6062 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6063 msrs->pinbased_ctls_high &=
6064 PIN_BASED_EXT_INTR_MASK |
6065 PIN_BASED_NMI_EXITING |
6066 PIN_BASED_VIRTUAL_NMIS |
6067 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6068 msrs->pinbased_ctls_high |=
6069 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6070 PIN_BASED_VMX_PREEMPTION_TIMER;
6073 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6074 msrs->exit_ctls_low,
6075 msrs->exit_ctls_high);
6076 msrs->exit_ctls_low =
6077 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6079 msrs->exit_ctls_high &=
6080 #ifdef CONFIG_X86_64
6081 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6083 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
6084 msrs->exit_ctls_high |=
6085 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6086 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6087 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6089 /* We support free control of debug control saving. */
6090 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6092 /* entry controls */
6093 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6094 msrs->entry_ctls_low,
6095 msrs->entry_ctls_high);
6096 msrs->entry_ctls_low =
6097 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6098 msrs->entry_ctls_high &=
6099 #ifdef CONFIG_X86_64
6100 VM_ENTRY_IA32E_MODE |
6102 VM_ENTRY_LOAD_IA32_PAT;
6103 msrs->entry_ctls_high |=
6104 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6106 /* We support free control of debug control loading. */
6107 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6109 /* cpu-based controls */
6110 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6111 msrs->procbased_ctls_low,
6112 msrs->procbased_ctls_high);
6113 msrs->procbased_ctls_low =
6114 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6115 msrs->procbased_ctls_high &=
6116 CPU_BASED_INTR_WINDOW_EXITING |
6117 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6118 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6119 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6120 CPU_BASED_CR3_STORE_EXITING |
6121 #ifdef CONFIG_X86_64
6122 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6124 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6125 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6126 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6127 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6128 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6130 * We can allow some features even when not supported by the
6131 * hardware. For example, L1 can specify an MSR bitmap - and we
6132 * can use it to avoid exits to L1 - even when L0 runs L2
6133 * without MSR bitmaps.
6135 msrs->procbased_ctls_high |=
6136 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6137 CPU_BASED_USE_MSR_BITMAPS;
6139 /* We support free control of CR3 access interception. */
6140 msrs->procbased_ctls_low &=
6141 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6144 * secondary cpu-based controls. Do not include those that
6145 * depend on CPUID bits, they are added later by vmx_cpuid_update.
6147 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6148 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6149 msrs->secondary_ctls_low,
6150 msrs->secondary_ctls_high);
6152 msrs->secondary_ctls_low = 0;
6153 msrs->secondary_ctls_high &=
6154 SECONDARY_EXEC_DESC |
6155 SECONDARY_EXEC_RDTSCP |
6156 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6157 SECONDARY_EXEC_WBINVD_EXITING |
6158 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6159 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6160 SECONDARY_EXEC_RDRAND_EXITING |
6161 SECONDARY_EXEC_ENABLE_INVPCID |
6162 SECONDARY_EXEC_RDSEED_EXITING |
6163 SECONDARY_EXEC_XSAVES;
6166 * We can emulate "VMCS shadowing," even if the hardware
6167 * doesn't support it.
6169 msrs->secondary_ctls_high |=
6170 SECONDARY_EXEC_SHADOW_VMCS;
6173 /* nested EPT: emulate EPT also to L1 */
6174 msrs->secondary_ctls_high |=
6175 SECONDARY_EXEC_ENABLE_EPT;
6177 VMX_EPT_PAGE_WALK_4_BIT |
6178 VMX_EPT_PAGE_WALK_5_BIT |
6180 VMX_EPT_INVEPT_BIT |
6181 VMX_EPT_EXECUTE_ONLY_BIT;
6183 msrs->ept_caps &= ept_caps;
6184 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6185 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6186 VMX_EPT_1GB_PAGE_BIT;
6187 if (enable_ept_ad_bits) {
6188 msrs->secondary_ctls_high |=
6189 SECONDARY_EXEC_ENABLE_PML;
6190 msrs->ept_caps |= VMX_EPT_AD_BIT;
6194 if (cpu_has_vmx_vmfunc()) {
6195 msrs->secondary_ctls_high |=
6196 SECONDARY_EXEC_ENABLE_VMFUNC;
6198 * Advertise EPTP switching unconditionally
6199 * since we emulate it
6202 msrs->vmfunc_controls =
6203 VMX_VMFUNC_EPTP_SWITCHING;
6207 * Old versions of KVM use the single-context version without
6208 * checking for support, so declare that it is supported even
6209 * though it is treated as global context. The alternative is
6210 * not failing the single-context invvpid, and it is worse.
6213 msrs->secondary_ctls_high |=
6214 SECONDARY_EXEC_ENABLE_VPID;
6215 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6216 VMX_VPID_EXTENT_SUPPORTED_MASK;
6219 if (enable_unrestricted_guest)
6220 msrs->secondary_ctls_high |=
6221 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6223 if (flexpriority_enabled)
6224 msrs->secondary_ctls_high |=
6225 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6227 /* miscellaneous data */
6228 rdmsr(MSR_IA32_VMX_MISC,
6231 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6233 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6234 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
6235 VMX_MISC_ACTIVITY_HLT;
6236 msrs->misc_high = 0;
6239 * This MSR reports some information about VMX support. We
6240 * should return information about the VMX we emulate for the
6241 * guest, and the VMCS structure we give it - not about the
6242 * VMX support of the underlying hardware.
6246 VMX_BASIC_TRUE_CTLS |
6247 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6248 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6250 if (cpu_has_vmx_basic_inout())
6251 msrs->basic |= VMX_BASIC_INOUT;
6254 * These MSRs specify bits which the guest must keep fixed on
6255 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6256 * We picked the standard core2 setting.
6258 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6259 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6260 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6261 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6263 /* These MSRs specify bits which the guest must keep fixed off. */
6264 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6265 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6267 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
6268 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
6271 void nested_vmx_hardware_unsetup(void)
6275 if (enable_shadow_vmcs) {
6276 for (i = 0; i < VMX_BITMAP_NR; i++)
6277 free_page((unsigned long)vmx_bitmap[i]);
6281 __init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
6282 int (*exit_handlers[])(struct kvm_vcpu *))
6286 if (!cpu_has_vmx_shadow_vmcs())
6287 enable_shadow_vmcs = 0;
6288 if (enable_shadow_vmcs) {
6289 for (i = 0; i < VMX_BITMAP_NR; i++) {
6291 * The vmx_bitmap is not tied to a VM and so should
6292 * not be charged to a memcg.
6294 vmx_bitmap[i] = (unsigned long *)
6295 __get_free_page(GFP_KERNEL);
6296 if (!vmx_bitmap[i]) {
6297 nested_vmx_hardware_unsetup();
6302 init_vmcs_shadow_fields();
6305 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6306 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6307 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6308 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6309 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6310 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6311 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6312 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6313 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6314 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6315 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6316 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
6318 ops->check_nested_events = vmx_check_nested_events;
6319 ops->get_nested_state = vmx_get_nested_state;
6320 ops->set_nested_state = vmx_set_nested_state;
6321 ops->get_vmcs12_pages = nested_get_vmcs12_pages;
6322 ops->nested_enable_evmcs = nested_enable_evmcs;
6323 ops->nested_get_evmcs_version = nested_get_evmcs_version;