d7baedda79e54193cd9ab232a5caaf03a31628df
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
13 #include "vmcs.h"
14 #include "vmx_ops.h"
15 #include "cpuid.h"
16
17 #define MSR_TYPE_R      1
18 #define MSR_TYPE_W      2
19 #define MSR_TYPE_RW     3
20
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
23 #ifdef CONFIG_X86_64
24 #define MAX_NR_USER_RETURN_MSRS 7
25 #else
26 #define MAX_NR_USER_RETURN_MSRS 4
27 #endif
28
29 #define MAX_NR_LOADSTORE_MSRS   8
30
31 struct vmx_msrs {
32         unsigned int            nr;
33         struct vmx_msr_entry    val[MAX_NR_LOADSTORE_MSRS];
34 };
35
36 struct vmx_uret_msr {
37         bool load_into_hardware;
38         u64 data;
39         u64 mask;
40 };
41
42 enum segment_cache_field {
43         SEG_FIELD_SEL = 0,
44         SEG_FIELD_BASE = 1,
45         SEG_FIELD_LIMIT = 2,
46         SEG_FIELD_AR = 3,
47
48         SEG_FIELD_NR = 4
49 };
50
51 #define RTIT_ADDR_RANGE         4
52
53 struct pt_ctx {
54         u64 ctl;
55         u64 status;
56         u64 output_base;
57         u64 output_mask;
58         u64 cr3_match;
59         u64 addr_a[RTIT_ADDR_RANGE];
60         u64 addr_b[RTIT_ADDR_RANGE];
61 };
62
63 struct pt_desc {
64         u64 ctl_bitmask;
65         u32 num_address_ranges;
66         u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
67         struct pt_ctx host;
68         struct pt_ctx guest;
69 };
70
71 union vmx_exit_reason {
72         struct {
73                 u32     basic                   : 16;
74                 u32     reserved16              : 1;
75                 u32     reserved17              : 1;
76                 u32     reserved18              : 1;
77                 u32     reserved19              : 1;
78                 u32     reserved20              : 1;
79                 u32     reserved21              : 1;
80                 u32     reserved22              : 1;
81                 u32     reserved23              : 1;
82                 u32     reserved24              : 1;
83                 u32     reserved25              : 1;
84                 u32     bus_lock_detected       : 1;
85                 u32     enclave_mode            : 1;
86                 u32     smi_pending_mtf         : 1;
87                 u32     smi_from_vmx_root       : 1;
88                 u32     reserved30              : 1;
89                 u32     failed_vmentry          : 1;
90         };
91         u32 full;
92 };
93
94 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
96
97 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
98 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
99
100 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
101 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
102
103 struct lbr_desc {
104         /* Basic info about guest LBR records. */
105         struct x86_pmu_lbr records;
106
107         /*
108          * Emulate LBR feature via passthrough LBR registers when the
109          * per-vcpu guest LBR event is scheduled on the current pcpu.
110          *
111          * The records may be inaccurate if the host reclaims the LBR.
112          */
113         struct perf_event *event;
114
115         /* True if LBRs are marked as not intercepted in the MSR bitmap */
116         bool msr_passthrough;
117 };
118
119 /*
120  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
121  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
122  */
123 struct nested_vmx {
124         /* Has the level1 guest done vmxon? */
125         bool vmxon;
126         gpa_t vmxon_ptr;
127         bool pml_full;
128
129         /* The guest-physical address of the current VMCS L1 keeps for L2 */
130         gpa_t current_vmptr;
131         /*
132          * Cache of the guest's VMCS, existing outside of guest memory.
133          * Loaded from guest memory during VMPTRLD. Flushed to guest
134          * memory during VMCLEAR and VMPTRLD.
135          */
136         struct vmcs12 *cached_vmcs12;
137         /*
138          * Cache of the guest's shadow VMCS, existing outside of guest
139          * memory. Loaded from guest memory during VM entry. Flushed
140          * to guest memory during VM exit.
141          */
142         struct vmcs12 *cached_shadow_vmcs12;
143
144         /*
145          * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
146          */
147         struct gfn_to_hva_cache shadow_vmcs12_cache;
148
149         /*
150          * GPA to HVA cache for VMCS12
151          */
152         struct gfn_to_hva_cache vmcs12_cache;
153
154         /*
155          * Indicates if the shadow vmcs or enlightened vmcs must be updated
156          * with the data held by struct vmcs12.
157          */
158         bool need_vmcs12_to_shadow_sync;
159         bool dirty_vmcs12;
160
161         /*
162          * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
163          * changes in MSR bitmap for L1 or switching to a different L2. Note,
164          * this flag can only be used reliably in conjunction with a paravirt L1
165          * which informs L0 whether any changes to MSR bitmap for L2 were done
166          * on its side.
167          */
168         bool force_msr_bitmap_recalc;
169
170         /*
171          * Indicates lazily loaded guest state has not yet been decached from
172          * vmcs02.
173          */
174         bool need_sync_vmcs02_to_vmcs12_rare;
175
176         /*
177          * vmcs02 has been initialized, i.e. state that is constant for
178          * vmcs02 has been written to the backing VMCS.  Initialization
179          * is delayed until L1 actually attempts to run a nested VM.
180          */
181         bool vmcs02_initialized;
182
183         bool change_vmcs01_virtual_apic_mode;
184         bool reload_vmcs01_apic_access_page;
185         bool update_vmcs01_cpu_dirty_logging;
186         bool update_vmcs01_apicv_status;
187
188         /*
189          * Enlightened VMCS has been enabled. It does not mean that L1 has to
190          * use it. However, VMX features available to L1 will be limited based
191          * on what the enlightened VMCS supports.
192          */
193         bool enlightened_vmcs_enabled;
194
195         /* L2 must run next, and mustn't decide to exit to L1. */
196         bool nested_run_pending;
197
198         /* Pending MTF VM-exit into L1.  */
199         bool mtf_pending;
200
201         struct loaded_vmcs vmcs02;
202
203         /*
204          * Guest pages referred to in the vmcs02 with host-physical
205          * pointers, so we must keep them pinned while L2 runs.
206          */
207         struct page *apic_access_page;
208         struct kvm_host_map virtual_apic_map;
209         struct kvm_host_map pi_desc_map;
210
211         struct kvm_host_map msr_bitmap_map;
212
213         struct pi_desc *pi_desc;
214         bool pi_pending;
215         u16 posted_intr_nv;
216
217         struct hrtimer preemption_timer;
218         u64 preemption_timer_deadline;
219         bool has_preemption_timer_deadline;
220         bool preemption_timer_expired;
221
222         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
223         u64 vmcs01_debugctl;
224         u64 vmcs01_guest_bndcfgs;
225
226         /* to migrate it to L1 if L2 writes to L1's CR8 directly */
227         int l1_tpr_threshold;
228
229         u16 vpid02;
230         u16 last_vpid;
231
232         struct nested_vmx_msrs msrs;
233
234         /* SMM related state */
235         struct {
236                 /* in VMX operation on SMM entry? */
237                 bool vmxon;
238                 /* in guest mode on SMM entry? */
239                 bool guest_mode;
240         } smm;
241
242         gpa_t hv_evmcs_vmptr;
243         struct kvm_host_map hv_evmcs_map;
244         struct hv_enlightened_vmcs *hv_evmcs;
245 };
246
247 struct vcpu_vmx {
248         struct kvm_vcpu       vcpu;
249         u8                    fail;
250         u8                    x2apic_msr_bitmap_mode;
251
252         /*
253          * If true, host state has been stored in vmx->loaded_vmcs for
254          * the CPU registers that only need to be switched when transitioning
255          * to/from the kernel, and the registers have been loaded with guest
256          * values.  If false, host state is loaded in the CPU registers
257          * and vmx->loaded_vmcs->host_state is invalid.
258          */
259         bool                  guest_state_loaded;
260
261         unsigned long         exit_qualification;
262         u32                   exit_intr_info;
263         u32                   idt_vectoring_info;
264         ulong                 rflags;
265
266         /*
267          * User return MSRs are always emulated when enabled in the guest, but
268          * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
269          * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
270          * be loaded into hardware if those conditions aren't met.
271          */
272         struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
273         bool                  guest_uret_msrs_loaded;
274 #ifdef CONFIG_X86_64
275         u64                   msr_host_kernel_gs_base;
276         u64                   msr_guest_kernel_gs_base;
277 #endif
278
279         u64                   spec_ctrl;
280         u32                   msr_ia32_umwait_control;
281
282         /*
283          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
284          * non-nested (L1) guest, it always points to vmcs01. For a nested
285          * guest (L2), it points to a different VMCS.
286          */
287         struct loaded_vmcs    vmcs01;
288         struct loaded_vmcs   *loaded_vmcs;
289
290         struct msr_autoload {
291                 struct vmx_msrs guest;
292                 struct vmx_msrs host;
293         } msr_autoload;
294
295         struct msr_autostore {
296                 struct vmx_msrs guest;
297         } msr_autostore;
298
299         struct {
300                 int vm86_active;
301                 ulong save_rflags;
302                 struct kvm_segment segs[8];
303         } rmode;
304         struct {
305                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
306                 struct kvm_save_segment {
307                         u16 selector;
308                         unsigned long base;
309                         u32 limit;
310                         u32 ar;
311                 } seg[8];
312         } segment_cache;
313         int vpid;
314         bool emulation_required;
315
316         union vmx_exit_reason exit_reason;
317
318         /* Posted interrupt descriptor */
319         struct pi_desc pi_desc;
320
321         /* Used if this vCPU is waiting for PI notification wakeup. */
322         struct list_head pi_wakeup_list;
323
324         /* Support for a guest hypervisor (nested VMX) */
325         struct nested_vmx nested;
326
327         /* Dynamic PLE window. */
328         unsigned int ple_window;
329         bool ple_window_dirty;
330
331         bool req_immediate_exit;
332
333         /* Support for PML */
334 #define PML_ENTITY_NUM          512
335         struct page *pml_pg;
336
337         /* apic deadline value in host tsc */
338         u64 hv_deadline_tsc;
339
340         unsigned long host_debugctlmsr;
341
342         /*
343          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
344          * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
345          * in msr_ia32_feature_control_valid_bits.
346          */
347         u64 msr_ia32_feature_control;
348         u64 msr_ia32_feature_control_valid_bits;
349         /* SGX Launch Control public key hash */
350         u64 msr_ia32_sgxlepubkeyhash[4];
351
352         struct pt_desc pt_desc;
353         struct lbr_desc lbr_desc;
354
355         /* Save desired MSR intercept (read: pass-through) state */
356 #define MAX_POSSIBLE_PASSTHROUGH_MSRS   15
357         struct {
358                 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
359                 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
360         } shadow_msr_intercept;
361 };
362
363 struct kvm_vmx {
364         struct kvm kvm;
365
366         unsigned int tss_addr;
367         bool ept_identity_pagetable_done;
368         gpa_t ept_identity_map_addr;
369         /* Posted Interrupt Descriptor (PID) table for IPI virtualization */
370         u64 *pid_table;
371 };
372
373 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
374 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
375                         struct loaded_vmcs *buddy);
376 int allocate_vpid(void);
377 void free_vpid(int vpid);
378 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
379 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
380 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
381                         unsigned long fs_base, unsigned long gs_base);
382 int vmx_get_cpl(struct kvm_vcpu *vcpu);
383 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
384 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
385 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
386 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
387 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
388 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
389 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
390 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
391 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
392 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
393 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
394 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
395 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
396
397 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
398 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
399 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
400 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
401 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
402 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
403 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
404 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
405 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
406 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
407 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
408 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
409 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
410
411 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
412 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
413
414 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
415 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
416
417 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
418                                              int type, bool value)
419 {
420         if (value)
421                 vmx_enable_intercept_for_msr(vcpu, msr, type);
422         else
423                 vmx_disable_intercept_for_msr(vcpu, msr, type);
424 }
425
426 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
427
428 /*
429  * Note, early Intel manuals have the write-low and read-high bitmap offsets
430  * the wrong way round.  The bitmaps control MSRs 0x00000000-0x00001fff and
431  * 0xc0000000-0xc0001fff.  The former (low) uses bytes 0-0x3ff for reads and
432  * 0x800-0xbff for writes.  The latter (high) uses 0x400-0x7ff for reads and
433  * 0xc00-0xfff for writes.  MSRs not covered by either of the ranges always
434  * VM-Exit.
435  */
436 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)      \
437 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap,  \
438                                                        u32 msr)                \
439 {                                                                              \
440         int f = sizeof(unsigned long);                                         \
441                                                                                \
442         if (msr <= 0x1fff)                                                     \
443                 return bitop##_bit(msr, bitmap + base / f);                    \
444         else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))                   \
445                 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
446         return (rtype)true;                                                    \
447 }
448 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)                  \
449         __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0x0)     \
450         __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
451
452 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
453 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
454 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
455
456 static inline u8 vmx_get_rvi(void)
457 {
458         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
459 }
460
461 #define BUILD_CONTROLS_SHADOW(lname, uname, bits)                               \
462 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val)      \
463 {                                                                               \
464         if (vmx->loaded_vmcs->controls_shadow.lname != val) {                   \
465                 vmcs_write##bits(uname, val);                                   \
466                 vmx->loaded_vmcs->controls_shadow.lname = val;                  \
467         }                                                                       \
468 }                                                                               \
469 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs)        \
470 {                                                                               \
471         return vmcs->controls_shadow.lname;                                     \
472 }                                                                               \
473 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx)                \
474 {                                                                               \
475         return __##lname##_controls_get(vmx->loaded_vmcs);                      \
476 }                                                                               \
477 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val)   \
478 {                                                                               \
479         lname##_controls_set(vmx, lname##_controls_get(vmx) | val);             \
480 }                                                                               \
481 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
482 {                                                                               \
483         lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);            \
484 }
485 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
486 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
487 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
488 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
489 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
490 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
491
492 /*
493  * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
494  * cache on demand.  Other registers not listed here are synced to
495  * the cache immediately after VM-Exit.
496  */
497 #define VMX_REGS_LAZY_LOAD_SET  ((1 << VCPU_REGS_RIP) |         \
498                                 (1 << VCPU_REGS_RSP) |          \
499                                 (1 << VCPU_EXREG_RFLAGS) |      \
500                                 (1 << VCPU_EXREG_PDPTR) |       \
501                                 (1 << VCPU_EXREG_SEGMENTS) |    \
502                                 (1 << VCPU_EXREG_CR0) |         \
503                                 (1 << VCPU_EXREG_CR3) |         \
504                                 (1 << VCPU_EXREG_CR4) |         \
505                                 (1 << VCPU_EXREG_EXIT_INFO_1) | \
506                                 (1 << VCPU_EXREG_EXIT_INFO_2))
507
508 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
509 {
510         return container_of(kvm, struct kvm_vmx, kvm);
511 }
512
513 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
514 {
515         return container_of(vcpu, struct vcpu_vmx, vcpu);
516 }
517
518 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
519 {
520         struct vcpu_vmx *vmx = to_vmx(vcpu);
521
522         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
523                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
524                 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
525         }
526         return vmx->exit_qualification;
527 }
528
529 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
530 {
531         struct vcpu_vmx *vmx = to_vmx(vcpu);
532
533         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
534                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
535                 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
536         }
537         return vmx->exit_intr_info;
538 }
539
540 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
541 void free_vmcs(struct vmcs *vmcs);
542 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
543 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
544 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
545
546 static inline struct vmcs *alloc_vmcs(bool shadow)
547 {
548         return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
549                               GFP_KERNEL_ACCOUNT);
550 }
551
552 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
553 {
554         return secondary_exec_controls_get(vmx) &
555                 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
556 }
557
558 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
559 {
560         if (!enable_ept)
561                 return true;
562
563         return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
564 }
565
566 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
567 {
568         return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
569             (secondary_exec_controls_get(to_vmx(vcpu)) &
570             SECONDARY_EXEC_UNRESTRICTED_GUEST));
571 }
572
573 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
574 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
575 {
576         return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
577 }
578
579 void dump_vmcs(struct kvm_vcpu *vcpu);
580
581 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
582 {
583         return (vmx_instr_info >> 28) & 0xf;
584 }
585
586 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
587 {
588         return  lapic_in_kernel(vcpu) && enable_ipiv;
589 }
590
591 #endif /* __KVM_X86_VMX_H */