1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
15 extern const u32 vmx_msr_index[];
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24 #define NR_SHARED_MSRS 7
26 #define NR_SHARED_MSRS 4
29 #define NR_LOADSTORE_MSRS 8
33 struct vmx_msr_entry val[NR_LOADSTORE_MSRS];
36 struct shared_msr_entry {
42 enum segment_cache_field {
51 /* Posted-Interrupt Descriptor */
53 u32 pir[8]; /* Posted interrupt requested */
56 /* bit 256 - Outstanding Notification */
58 /* bit 257 - Suppress Notification */
60 /* bit 271:258 - Reserved */
62 /* bit 279:272 - Notification Vector */
64 /* bit 287:280 - Reserved */
66 /* bit 319:288 - Notification Destination */
74 #define RTIT_ADDR_RANGE 4
82 u64 addr_a[RTIT_ADDR_RANGE];
83 u64 addr_b[RTIT_ADDR_RANGE];
89 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
95 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
96 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
99 /* Has the level1 guest done vmxon? */
104 /* The guest-physical address of the current VMCS L1 keeps for L2 */
107 * Cache of the guest's VMCS, existing outside of guest memory.
108 * Loaded from guest memory during VMPTRLD. Flushed to guest
109 * memory during VMCLEAR and VMPTRLD.
111 struct vmcs12 *cached_vmcs12;
113 * Cache of the guest's shadow VMCS, existing outside of guest
114 * memory. Loaded from guest memory during VM entry. Flushed
115 * to guest memory during VM exit.
117 struct vmcs12 *cached_shadow_vmcs12;
120 * Indicates if the shadow vmcs or enlightened vmcs must be updated
121 * with the data held by struct vmcs12.
123 bool need_vmcs12_to_shadow_sync;
127 * Indicates lazily loaded guest state has not yet been decached from
130 bool need_sync_vmcs02_to_vmcs12_rare;
133 * vmcs02 has been initialized, i.e. state that is constant for
134 * vmcs02 has been written to the backing VMCS. Initialization
135 * is delayed until L1 actually attempts to run a nested VM.
137 bool vmcs02_initialized;
139 bool change_vmcs01_virtual_apic_mode;
140 bool reload_vmcs01_apic_access_page;
143 * Enlightened VMCS has been enabled. It does not mean that L1 has to
144 * use it. However, VMX features available to L1 will be limited based
145 * on what the enlightened VMCS supports.
147 bool enlightened_vmcs_enabled;
149 /* L2 must run next, and mustn't decide to exit to L1. */
150 bool nested_run_pending;
152 /* Pending MTF VM-exit into L1. */
155 struct loaded_vmcs vmcs02;
158 * Guest pages referred to in the vmcs02 with host-physical
159 * pointers, so we must keep them pinned while L2 runs.
161 struct page *apic_access_page;
162 struct kvm_host_map virtual_apic_map;
163 struct kvm_host_map pi_desc_map;
165 struct kvm_host_map msr_bitmap_map;
167 struct pi_desc *pi_desc;
171 struct hrtimer preemption_timer;
172 bool preemption_timer_expired;
174 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
176 u64 vmcs01_guest_bndcfgs;
178 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
179 int l1_tpr_threshold;
184 struct nested_vmx_msrs msrs;
186 /* SMM related state */
188 /* in VMX operation on SMM entry? */
190 /* in guest mode on SMM entry? */
194 gpa_t hv_evmcs_vmptr;
195 struct kvm_host_map hv_evmcs_map;
196 struct hv_enlightened_vmcs *hv_evmcs;
200 struct kvm_vcpu vcpu;
205 * If true, host state has been stored in vmx->loaded_vmcs for
206 * the CPU registers that only need to be switched when transitioning
207 * to/from the kernel, and the registers have been loaded with guest
208 * values. If false, host state is loaded in the CPU registers
209 * and vmx->loaded_vmcs->host_state is invalid.
211 bool guest_state_loaded;
213 unsigned long exit_qualification;
215 u32 idt_vectoring_info;
218 struct shared_msr_entry guest_msrs[NR_SHARED_MSRS];
221 bool guest_msrs_ready;
223 u64 msr_host_kernel_gs_base;
224 u64 msr_guest_kernel_gs_base;
228 u32 msr_ia32_umwait_control;
230 u32 secondary_exec_control;
233 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
234 * non-nested (L1) guest, it always points to vmcs01. For a nested
235 * guest (L2), it points to a different VMCS.
237 struct loaded_vmcs vmcs01;
238 struct loaded_vmcs *loaded_vmcs;
240 struct msr_autoload {
241 struct vmx_msrs guest;
242 struct vmx_msrs host;
245 struct msr_autostore {
246 struct vmx_msrs guest;
252 struct kvm_segment segs[8];
255 u32 bitmask; /* 4 bits per segment (1 bit per field) */
256 struct kvm_save_segment {
264 bool emulation_required;
268 /* Posted interrupt descriptor */
269 struct pi_desc pi_desc;
271 /* Support for a guest hypervisor (nested VMX) */
272 struct nested_vmx nested;
274 /* Dynamic PLE window. */
275 unsigned int ple_window;
276 bool ple_window_dirty;
278 bool req_immediate_exit;
280 /* Support for PML */
281 #define PML_ENTITY_NUM 512
284 /* apic deadline value in host tsc */
287 u64 current_tsc_ratio;
291 unsigned long host_debugctlmsr;
294 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
295 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
296 * in msr_ia32_feature_control_valid_bits.
298 u64 msr_ia32_feature_control;
299 u64 msr_ia32_feature_control_valid_bits;
302 struct pt_desc pt_desc;
305 enum ept_pointers_status {
306 EPT_POINTERS_CHECK = 0,
307 EPT_POINTERS_MATCH = 1,
308 EPT_POINTERS_MISMATCH = 2
314 unsigned int tss_addr;
315 bool ept_identity_pagetable_done;
316 gpa_t ept_identity_map_addr;
318 enum ept_pointers_status ept_pointers_match;
319 spinlock_t ept_pointer_lock;
322 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
323 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
324 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
325 int allocate_vpid(void);
326 void free_vpid(int vpid);
327 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
328 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
329 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
330 unsigned long fs_base, unsigned long gs_base);
331 int vmx_get_cpl(struct kvm_vcpu *vcpu);
332 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
333 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
334 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
335 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
336 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
337 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
338 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
339 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
340 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3);
341 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
342 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
343 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
344 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
345 void update_exception_bitmap(struct kvm_vcpu *vcpu);
346 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
347 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
348 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
349 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
350 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
351 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
352 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
353 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
355 #define POSTED_INTR_ON 0
356 #define POSTED_INTR_SN 1
358 static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
360 return test_and_set_bit(POSTED_INTR_ON,
361 (unsigned long *)&pi_desc->control);
364 static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
366 return test_and_clear_bit(POSTED_INTR_ON,
367 (unsigned long *)&pi_desc->control);
370 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
372 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
375 static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
377 return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
380 static inline void pi_set_sn(struct pi_desc *pi_desc)
382 set_bit(POSTED_INTR_SN,
383 (unsigned long *)&pi_desc->control);
386 static inline void pi_set_on(struct pi_desc *pi_desc)
388 set_bit(POSTED_INTR_ON,
389 (unsigned long *)&pi_desc->control);
392 static inline void pi_clear_on(struct pi_desc *pi_desc)
394 clear_bit(POSTED_INTR_ON,
395 (unsigned long *)&pi_desc->control);
398 static inline void pi_clear_sn(struct pi_desc *pi_desc)
400 clear_bit(POSTED_INTR_SN,
401 (unsigned long *)&pi_desc->control);
404 static inline int pi_test_on(struct pi_desc *pi_desc)
406 return test_bit(POSTED_INTR_ON,
407 (unsigned long *)&pi_desc->control);
410 static inline int pi_test_sn(struct pi_desc *pi_desc)
412 return test_bit(POSTED_INTR_SN,
413 (unsigned long *)&pi_desc->control);
416 static inline u8 vmx_get_rvi(void)
418 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
421 #define BUILD_CONTROLS_SHADOW(lname, uname) \
422 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
424 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
425 vmcs_write32(uname, val); \
426 vmx->loaded_vmcs->controls_shadow.lname = val; \
429 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
431 return vmx->loaded_vmcs->controls_shadow.lname; \
433 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
435 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
437 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
439 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
441 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
442 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
443 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
444 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
445 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
447 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
449 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
450 | (1 << VCPU_EXREG_RFLAGS)
451 | (1 << VCPU_EXREG_PDPTR)
452 | (1 << VCPU_EXREG_SEGMENTS)
453 | (1 << VCPU_EXREG_CR3)
454 | (1 << VCPU_EXREG_EXIT_INFO_1)
455 | (1 << VCPU_EXREG_EXIT_INFO_2));
456 vcpu->arch.regs_dirty = 0;
459 static inline u32 vmx_vmentry_ctrl(void)
461 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
462 if (vmx_pt_mode_is_system())
463 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
464 VM_ENTRY_LOAD_IA32_RTIT_CTL);
465 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
466 return vmentry_ctrl &
467 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
470 static inline u32 vmx_vmexit_ctrl(void)
472 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
473 if (vmx_pt_mode_is_system())
474 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
475 VM_EXIT_CLEAR_IA32_RTIT_CTL);
476 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
478 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
481 u32 vmx_exec_control(struct vcpu_vmx *vmx);
482 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
484 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
486 return container_of(kvm, struct kvm_vmx, kvm);
489 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
491 return container_of(vcpu, struct vcpu_vmx, vcpu);
494 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
496 return &(to_vmx(vcpu)->pi_desc);
499 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
501 struct vcpu_vmx *vmx = to_vmx(vcpu);
503 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
504 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
505 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
507 return vmx->exit_qualification;
510 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
512 struct vcpu_vmx *vmx = to_vmx(vcpu);
514 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
515 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
516 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
518 return vmx->exit_intr_info;
521 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
522 void free_vmcs(struct vmcs *vmcs);
523 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
524 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
525 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
527 static inline struct vmcs *alloc_vmcs(bool shadow)
529 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
533 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
535 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
537 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
538 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
541 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
543 return vmx->secondary_exec_control &
544 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
547 void dump_vmcs(void);
549 #endif /* __KVM_X86_VMX_H */