1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
17 extern const u32 vmx_msr_index[];
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
26 #define MAX_NR_USER_RETURN_MSRS 7
28 #define MAX_NR_USER_RETURN_MSRS 4
31 #define MAX_NR_LOADSTORE_MSRS 8
35 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
39 unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
44 enum segment_cache_field {
53 #define RTIT_ADDR_RANGE 4
61 u64 addr_a[RTIT_ADDR_RANGE];
62 u64 addr_b[RTIT_ADDR_RANGE];
68 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
73 union vmx_exit_reason {
86 u32 bus_lock_detected : 1;
88 u32 smi_pending_mtf : 1;
89 u32 smi_from_vmx_root : 1;
91 u32 failed_vmentry : 1;
96 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
97 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
99 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
102 /* Basic info about guest LBR records. */
103 struct x86_pmu_lbr records;
107 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
108 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
111 /* Has the level1 guest done vmxon? */
116 /* The guest-physical address of the current VMCS L1 keeps for L2 */
119 * Cache of the guest's VMCS, existing outside of guest memory.
120 * Loaded from guest memory during VMPTRLD. Flushed to guest
121 * memory during VMCLEAR and VMPTRLD.
123 struct vmcs12 *cached_vmcs12;
125 * Cache of the guest's shadow VMCS, existing outside of guest
126 * memory. Loaded from guest memory during VM entry. Flushed
127 * to guest memory during VM exit.
129 struct vmcs12 *cached_shadow_vmcs12;
132 * Indicates if the shadow vmcs or enlightened vmcs must be updated
133 * with the data held by struct vmcs12.
135 bool need_vmcs12_to_shadow_sync;
139 * Indicates lazily loaded guest state has not yet been decached from
142 bool need_sync_vmcs02_to_vmcs12_rare;
145 * vmcs02 has been initialized, i.e. state that is constant for
146 * vmcs02 has been written to the backing VMCS. Initialization
147 * is delayed until L1 actually attempts to run a nested VM.
149 bool vmcs02_initialized;
151 bool change_vmcs01_virtual_apic_mode;
152 bool reload_vmcs01_apic_access_page;
155 * Enlightened VMCS has been enabled. It does not mean that L1 has to
156 * use it. However, VMX features available to L1 will be limited based
157 * on what the enlightened VMCS supports.
159 bool enlightened_vmcs_enabled;
161 /* L2 must run next, and mustn't decide to exit to L1. */
162 bool nested_run_pending;
164 /* Pending MTF VM-exit into L1. */
167 struct loaded_vmcs vmcs02;
170 * Guest pages referred to in the vmcs02 with host-physical
171 * pointers, so we must keep them pinned while L2 runs.
173 struct page *apic_access_page;
174 struct kvm_host_map virtual_apic_map;
175 struct kvm_host_map pi_desc_map;
177 struct kvm_host_map msr_bitmap_map;
179 struct pi_desc *pi_desc;
183 struct hrtimer preemption_timer;
184 u64 preemption_timer_deadline;
185 bool has_preemption_timer_deadline;
186 bool preemption_timer_expired;
188 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
190 u64 vmcs01_guest_bndcfgs;
192 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
193 int l1_tpr_threshold;
198 struct nested_vmx_msrs msrs;
200 /* SMM related state */
202 /* in VMX operation on SMM entry? */
204 /* in guest mode on SMM entry? */
208 gpa_t hv_evmcs_vmptr;
209 struct kvm_host_map hv_evmcs_map;
210 struct hv_enlightened_vmcs *hv_evmcs;
214 struct kvm_vcpu vcpu;
219 * If true, host state has been stored in vmx->loaded_vmcs for
220 * the CPU registers that only need to be switched when transitioning
221 * to/from the kernel, and the registers have been loaded with guest
222 * values. If false, host state is loaded in the CPU registers
223 * and vmx->loaded_vmcs->host_state is invalid.
225 bool guest_state_loaded;
227 unsigned long exit_qualification;
229 u32 idt_vectoring_info;
232 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
234 int nr_active_uret_msrs;
235 bool guest_uret_msrs_loaded;
237 u64 msr_host_kernel_gs_base;
238 u64 msr_guest_kernel_gs_base;
242 u32 msr_ia32_umwait_control;
244 u32 secondary_exec_control;
247 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
248 * non-nested (L1) guest, it always points to vmcs01. For a nested
249 * guest (L2), it points to a different VMCS.
251 struct loaded_vmcs vmcs01;
252 struct loaded_vmcs *loaded_vmcs;
254 struct msr_autoload {
255 struct vmx_msrs guest;
256 struct vmx_msrs host;
259 struct msr_autostore {
260 struct vmx_msrs guest;
266 struct kvm_segment segs[8];
269 u32 bitmask; /* 4 bits per segment (1 bit per field) */
270 struct kvm_save_segment {
278 bool emulation_required;
280 union vmx_exit_reason exit_reason;
282 /* Posted interrupt descriptor */
283 struct pi_desc pi_desc;
285 /* Support for a guest hypervisor (nested VMX) */
286 struct nested_vmx nested;
288 /* Dynamic PLE window. */
289 unsigned int ple_window;
290 bool ple_window_dirty;
292 bool req_immediate_exit;
294 /* Support for PML */
295 #define PML_ENTITY_NUM 512
298 /* apic deadline value in host tsc */
301 u64 current_tsc_ratio;
303 unsigned long host_debugctlmsr;
306 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
307 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
308 * in msr_ia32_feature_control_valid_bits.
310 u64 msr_ia32_feature_control;
311 u64 msr_ia32_feature_control_valid_bits;
314 struct pt_desc pt_desc;
315 struct lbr_desc lbr_desc;
317 /* Save desired MSR intercept (read: pass-through) state */
318 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
320 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
321 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
322 } shadow_msr_intercept;
325 enum ept_pointers_status {
326 EPT_POINTERS_CHECK = 0,
327 EPT_POINTERS_MATCH = 1,
328 EPT_POINTERS_MISMATCH = 2
334 unsigned int tss_addr;
335 bool ept_identity_pagetable_done;
336 gpa_t ept_identity_map_addr;
338 enum ept_pointers_status ept_pointers_match;
339 spinlock_t ept_pointer_lock;
342 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
343 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
344 struct loaded_vmcs *buddy);
345 int allocate_vpid(void);
346 void free_vpid(int vpid);
347 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
348 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
349 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
350 unsigned long fs_base, unsigned long gs_base);
351 int vmx_get_cpl(struct kvm_vcpu *vcpu);
352 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
353 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
354 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
355 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
356 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
357 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
358 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
359 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
360 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
361 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
362 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
363 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
366 void update_exception_bitmap(struct kvm_vcpu *vcpu);
367 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
368 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
369 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
370 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
371 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
372 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
373 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
374 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
375 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
376 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
377 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
378 void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
379 u32 msr, int type, bool value);
381 static inline u8 vmx_get_rvi(void)
383 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
386 #define BUILD_CONTROLS_SHADOW(lname, uname) \
387 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
389 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
390 vmcs_write32(uname, val); \
391 vmx->loaded_vmcs->controls_shadow.lname = val; \
394 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
396 return vmx->loaded_vmcs->controls_shadow.lname; \
398 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
400 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
402 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
404 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
406 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
407 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
408 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
409 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
410 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
412 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
414 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
415 | (1 << VCPU_EXREG_RFLAGS)
416 | (1 << VCPU_EXREG_PDPTR)
417 | (1 << VCPU_EXREG_SEGMENTS)
418 | (1 << VCPU_EXREG_CR0)
419 | (1 << VCPU_EXREG_CR3)
420 | (1 << VCPU_EXREG_CR4)
421 | (1 << VCPU_EXREG_EXIT_INFO_1)
422 | (1 << VCPU_EXREG_EXIT_INFO_2));
423 vcpu->arch.regs_dirty = 0;
426 static inline u32 vmx_vmentry_ctrl(void)
428 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
429 if (vmx_pt_mode_is_system())
430 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
431 VM_ENTRY_LOAD_IA32_RTIT_CTL);
432 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
433 return vmentry_ctrl &
434 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
437 static inline u32 vmx_vmexit_ctrl(void)
439 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
440 if (vmx_pt_mode_is_system())
441 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
442 VM_EXIT_CLEAR_IA32_RTIT_CTL);
443 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
445 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
448 u32 vmx_exec_control(struct vcpu_vmx *vmx);
449 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
451 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
453 return container_of(kvm, struct kvm_vmx, kvm);
456 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
458 return container_of(vcpu, struct vcpu_vmx, vcpu);
461 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
463 struct vcpu_vmx *vmx = to_vmx(vcpu);
465 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
466 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
467 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
469 return vmx->exit_qualification;
472 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
474 struct vcpu_vmx *vmx = to_vmx(vcpu);
476 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
477 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
478 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
480 return vmx->exit_intr_info;
483 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
484 void free_vmcs(struct vmcs *vmcs);
485 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
486 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
487 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
489 static inline struct vmcs *alloc_vmcs(bool shadow)
491 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
495 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
497 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
498 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
501 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
503 return vmx->secondary_exec_control &
504 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
507 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
512 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
515 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
517 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
518 (secondary_exec_controls_get(to_vmx(vcpu)) &
519 SECONDARY_EXEC_UNRESTRICTED_GUEST));
522 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
523 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
525 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
528 void dump_vmcs(void);
530 #endif /* __KVM_X86_VMX_H */