1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24 #define MAX_NR_USER_RETURN_MSRS 7
26 #define MAX_NR_USER_RETURN_MSRS 4
29 #define MAX_NR_LOADSTORE_MSRS 8
33 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
37 bool load_into_hardware;
42 enum segment_cache_field {
51 #define RTIT_ADDR_RANGE 4
59 u64 addr_a[RTIT_ADDR_RANGE];
60 u64 addr_b[RTIT_ADDR_RANGE];
65 u32 num_address_ranges;
66 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
71 union vmx_exit_reason {
84 u32 bus_lock_detected : 1;
86 u32 smi_pending_mtf : 1;
87 u32 smi_from_vmx_root : 1;
89 u32 failed_vmentry : 1;
94 static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
97 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
98 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
99 * greater than zero. However, KVM only exposes and emulates the MSR
100 * to/for the guest if the guest PMU supports at least "Architectural
101 * Performance Monitoring Version 2".
103 return pmu->version > 1;
106 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
107 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
109 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
110 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
112 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
113 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
116 /* Basic info about guest LBR records. */
117 struct x86_pmu_lbr records;
120 * Emulate LBR feature via passthrough LBR registers when the
121 * per-vcpu guest LBR event is scheduled on the current pcpu.
123 * The records may be inaccurate if the host reclaims the LBR.
125 struct perf_event *event;
127 /* True if LBRs are marked as not intercepted in the MSR bitmap */
128 bool msr_passthrough;
132 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
133 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
136 /* Has the level1 guest done vmxon? */
141 /* The guest-physical address of the current VMCS L1 keeps for L2 */
144 * Cache of the guest's VMCS, existing outside of guest memory.
145 * Loaded from guest memory during VMPTRLD. Flushed to guest
146 * memory during VMCLEAR and VMPTRLD.
148 struct vmcs12 *cached_vmcs12;
150 * Cache of the guest's shadow VMCS, existing outside of guest
151 * memory. Loaded from guest memory during VM entry. Flushed
152 * to guest memory during VM exit.
154 struct vmcs12 *cached_shadow_vmcs12;
157 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
159 struct gfn_to_hva_cache shadow_vmcs12_cache;
162 * GPA to HVA cache for VMCS12
164 struct gfn_to_hva_cache vmcs12_cache;
167 * Indicates if the shadow vmcs or enlightened vmcs must be updated
168 * with the data held by struct vmcs12.
170 bool need_vmcs12_to_shadow_sync;
174 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
175 * changes in MSR bitmap for L1 or switching to a different L2. Note,
176 * this flag can only be used reliably in conjunction with a paravirt L1
177 * which informs L0 whether any changes to MSR bitmap for L2 were done
180 bool force_msr_bitmap_recalc;
183 * Indicates lazily loaded guest state has not yet been decached from
186 bool need_sync_vmcs02_to_vmcs12_rare;
189 * vmcs02 has been initialized, i.e. state that is constant for
190 * vmcs02 has been written to the backing VMCS. Initialization
191 * is delayed until L1 actually attempts to run a nested VM.
193 bool vmcs02_initialized;
195 bool change_vmcs01_virtual_apic_mode;
196 bool reload_vmcs01_apic_access_page;
197 bool update_vmcs01_cpu_dirty_logging;
198 bool update_vmcs01_apicv_status;
201 * Enlightened VMCS has been enabled. It does not mean that L1 has to
202 * use it. However, VMX features available to L1 will be limited based
203 * on what the enlightened VMCS supports.
205 bool enlightened_vmcs_enabled;
207 /* L2 must run next, and mustn't decide to exit to L1. */
208 bool nested_run_pending;
210 /* Pending MTF VM-exit into L1. */
213 struct loaded_vmcs vmcs02;
216 * Guest pages referred to in the vmcs02 with host-physical
217 * pointers, so we must keep them pinned while L2 runs.
219 struct kvm_host_map apic_access_page_map;
220 struct kvm_host_map virtual_apic_map;
221 struct kvm_host_map pi_desc_map;
223 struct kvm_host_map msr_bitmap_map;
225 struct pi_desc *pi_desc;
229 struct hrtimer preemption_timer;
230 u64 preemption_timer_deadline;
231 bool has_preemption_timer_deadline;
232 bool preemption_timer_expired;
235 * Used to snapshot MSRs that are conditionally loaded on VM-Enter in
236 * order to propagate the guest's pre-VM-Enter value into vmcs02. For
237 * emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value.
238 * For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_
239 * userspace restores MSRs before nested state. If userspace restores
240 * MSRs after nested state, the snapshot holds garbage, but KVM can't
241 * detect that, and the garbage value in vmcs02 will be overwritten by
242 * MSR restoration in any case.
244 u64 pre_vmenter_debugctl;
245 u64 pre_vmenter_bndcfgs;
247 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
248 int l1_tpr_threshold;
253 struct nested_vmx_msrs msrs;
255 /* SMM related state */
257 /* in VMX operation on SMM entry? */
259 /* in guest mode on SMM entry? */
263 gpa_t hv_evmcs_vmptr;
264 struct kvm_host_map hv_evmcs_map;
265 struct hv_enlightened_vmcs *hv_evmcs;
269 struct kvm_vcpu vcpu;
271 u8 x2apic_msr_bitmap_mode;
274 * If true, host state has been stored in vmx->loaded_vmcs for
275 * the CPU registers that only need to be switched when transitioning
276 * to/from the kernel, and the registers have been loaded with guest
277 * values. If false, host state is loaded in the CPU registers
278 * and vmx->loaded_vmcs->host_state is invalid.
280 bool guest_state_loaded;
282 unsigned long exit_qualification;
284 u32 idt_vectoring_info;
288 * User return MSRs are always emulated when enabled in the guest, but
289 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
290 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
291 * be loaded into hardware if those conditions aren't met.
293 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
294 bool guest_uret_msrs_loaded;
296 u64 msr_host_kernel_gs_base;
297 u64 msr_guest_kernel_gs_base;
301 u32 msr_ia32_umwait_control;
304 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
305 * non-nested (L1) guest, it always points to vmcs01. For a nested
306 * guest (L2), it points to a different VMCS.
308 struct loaded_vmcs vmcs01;
309 struct loaded_vmcs *loaded_vmcs;
311 struct msr_autoload {
312 struct vmx_msrs guest;
313 struct vmx_msrs host;
316 struct msr_autostore {
317 struct vmx_msrs guest;
323 struct kvm_segment segs[8];
326 u32 bitmask; /* 4 bits per segment (1 bit per field) */
327 struct kvm_save_segment {
335 bool emulation_required;
337 union vmx_exit_reason exit_reason;
339 /* Posted interrupt descriptor */
340 struct pi_desc pi_desc;
342 /* Used if this vCPU is waiting for PI notification wakeup. */
343 struct list_head pi_wakeup_list;
345 /* Support for a guest hypervisor (nested VMX) */
346 struct nested_vmx nested;
348 /* Dynamic PLE window. */
349 unsigned int ple_window;
350 bool ple_window_dirty;
352 bool req_immediate_exit;
354 /* Support for PML */
355 #define PML_ENTITY_NUM 512
358 /* apic deadline value in host tsc */
361 unsigned long host_debugctlmsr;
364 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
365 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
366 * in msr_ia32_feature_control_valid_bits.
368 u64 msr_ia32_feature_control;
369 u64 msr_ia32_feature_control_valid_bits;
370 /* SGX Launch Control public key hash */
371 u64 msr_ia32_sgxlepubkeyhash[4];
373 struct pt_desc pt_desc;
374 struct lbr_desc lbr_desc;
376 /* Save desired MSR intercept (read: pass-through) state */
377 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
379 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
380 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
381 } shadow_msr_intercept;
387 unsigned int tss_addr;
388 bool ept_identity_pagetable_done;
389 gpa_t ept_identity_map_addr;
390 /* Posted Interrupt Descriptor (PID) table for IPI virtualization */
394 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
395 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
396 struct loaded_vmcs *buddy);
397 int allocate_vpid(void);
398 void free_vpid(int vpid);
399 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
400 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
401 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
402 unsigned long fs_base, unsigned long gs_base);
403 int vmx_get_cpl(struct kvm_vcpu *vcpu);
404 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
405 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
406 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
407 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
408 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
409 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
410 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
411 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
412 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
413 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
414 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
415 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
416 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
418 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
419 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
420 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
421 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
422 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
423 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
424 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
425 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
426 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
427 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
428 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
429 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
430 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
432 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
433 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
435 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
436 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
438 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
439 int type, bool value)
442 vmx_enable_intercept_for_msr(vcpu, msr, type);
444 vmx_disable_intercept_for_msr(vcpu, msr, type);
447 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
450 * Note, early Intel manuals have the write-low and read-high bitmap offsets
451 * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
452 * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
453 * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
454 * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
457 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
458 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
461 int f = sizeof(unsigned long); \
464 return bitop##_bit(msr, bitmap + base / f); \
465 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
466 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
467 return (rtype)true; \
469 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
470 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
471 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
473 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
474 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
475 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
477 static inline u8 vmx_get_rvi(void)
479 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
482 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
483 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
485 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
486 vmcs_write##bits(uname, val); \
487 vmx->loaded_vmcs->controls_shadow.lname = val; \
490 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
492 return vmcs->controls_shadow.lname; \
494 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
496 return __##lname##_controls_get(vmx->loaded_vmcs); \
498 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
500 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
502 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
504 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
506 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
507 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
508 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
509 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
510 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
511 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
514 * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
515 * cache on demand. Other registers not listed here are synced to
516 * the cache immediately after VM-Exit.
518 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
519 (1 << VCPU_REGS_RSP) | \
520 (1 << VCPU_EXREG_RFLAGS) | \
521 (1 << VCPU_EXREG_PDPTR) | \
522 (1 << VCPU_EXREG_SEGMENTS) | \
523 (1 << VCPU_EXREG_CR0) | \
524 (1 << VCPU_EXREG_CR3) | \
525 (1 << VCPU_EXREG_CR4) | \
526 (1 << VCPU_EXREG_EXIT_INFO_1) | \
527 (1 << VCPU_EXREG_EXIT_INFO_2))
529 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
531 return container_of(kvm, struct kvm_vmx, kvm);
534 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
536 return container_of(vcpu, struct vcpu_vmx, vcpu);
539 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
541 struct vcpu_vmx *vmx = to_vmx(vcpu);
543 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
544 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
545 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
547 return vmx->exit_qualification;
550 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
552 struct vcpu_vmx *vmx = to_vmx(vcpu);
554 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
555 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
556 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
558 return vmx->exit_intr_info;
561 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
562 void free_vmcs(struct vmcs *vmcs);
563 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
564 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
565 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
567 static inline struct vmcs *alloc_vmcs(bool shadow)
569 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
573 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
575 return secondary_exec_controls_get(vmx) &
576 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
579 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
584 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
587 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
589 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
590 (secondary_exec_controls_get(to_vmx(vcpu)) &
591 SECONDARY_EXEC_UNRESTRICTED_GUEST));
594 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
595 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
597 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
600 void dump_vmcs(struct kvm_vcpu *vcpu);
602 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
604 return (vmx_instr_info >> 28) & 0xf;
607 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
609 return lapic_in_kernel(vcpu) && enable_ipiv;
612 #endif /* __KVM_X86_VMX_H */