KVM: x86: Move vendor CR4 validity check to dedicated kvm_x86_ops hook
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
13 #include "vmcs.h"
14 #include "vmx_ops.h"
15 #include "cpuid.h"
16
17 extern const u32 vmx_msr_index[];
18
19 #define MSR_TYPE_R      1
20 #define MSR_TYPE_W      2
21 #define MSR_TYPE_RW     3
22
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24
25 #ifdef CONFIG_X86_64
26 #define MAX_NR_USER_RETURN_MSRS 7
27 #else
28 #define MAX_NR_USER_RETURN_MSRS 4
29 #endif
30
31 #define MAX_NR_LOADSTORE_MSRS   8
32
33 struct vmx_msrs {
34         unsigned int            nr;
35         struct vmx_msr_entry    val[MAX_NR_LOADSTORE_MSRS];
36 };
37
38 struct vmx_uret_msr {
39         unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
40         u64 data;
41         u64 mask;
42 };
43
44 enum segment_cache_field {
45         SEG_FIELD_SEL = 0,
46         SEG_FIELD_BASE = 1,
47         SEG_FIELD_LIMIT = 2,
48         SEG_FIELD_AR = 3,
49
50         SEG_FIELD_NR = 4
51 };
52
53 #define RTIT_ADDR_RANGE         4
54
55 struct pt_ctx {
56         u64 ctl;
57         u64 status;
58         u64 output_base;
59         u64 output_mask;
60         u64 cr3_match;
61         u64 addr_a[RTIT_ADDR_RANGE];
62         u64 addr_b[RTIT_ADDR_RANGE];
63 };
64
65 struct pt_desc {
66         u64 ctl_bitmask;
67         u32 addr_range;
68         u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
69         struct pt_ctx host;
70         struct pt_ctx guest;
71 };
72
73 /*
74  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
75  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
76  */
77 struct nested_vmx {
78         /* Has the level1 guest done vmxon? */
79         bool vmxon;
80         gpa_t vmxon_ptr;
81         bool pml_full;
82
83         /* The guest-physical address of the current VMCS L1 keeps for L2 */
84         gpa_t current_vmptr;
85         /*
86          * Cache of the guest's VMCS, existing outside of guest memory.
87          * Loaded from guest memory during VMPTRLD. Flushed to guest
88          * memory during VMCLEAR and VMPTRLD.
89          */
90         struct vmcs12 *cached_vmcs12;
91         /*
92          * Cache of the guest's shadow VMCS, existing outside of guest
93          * memory. Loaded from guest memory during VM entry. Flushed
94          * to guest memory during VM exit.
95          */
96         struct vmcs12 *cached_shadow_vmcs12;
97
98         /*
99          * Indicates if the shadow vmcs or enlightened vmcs must be updated
100          * with the data held by struct vmcs12.
101          */
102         bool need_vmcs12_to_shadow_sync;
103         bool dirty_vmcs12;
104
105         /*
106          * Indicates lazily loaded guest state has not yet been decached from
107          * vmcs02.
108          */
109         bool need_sync_vmcs02_to_vmcs12_rare;
110
111         /*
112          * vmcs02 has been initialized, i.e. state that is constant for
113          * vmcs02 has been written to the backing VMCS.  Initialization
114          * is delayed until L1 actually attempts to run a nested VM.
115          */
116         bool vmcs02_initialized;
117
118         bool change_vmcs01_virtual_apic_mode;
119         bool reload_vmcs01_apic_access_page;
120
121         /*
122          * Enlightened VMCS has been enabled. It does not mean that L1 has to
123          * use it. However, VMX features available to L1 will be limited based
124          * on what the enlightened VMCS supports.
125          */
126         bool enlightened_vmcs_enabled;
127
128         /* L2 must run next, and mustn't decide to exit to L1. */
129         bool nested_run_pending;
130
131         /* Pending MTF VM-exit into L1.  */
132         bool mtf_pending;
133
134         struct loaded_vmcs vmcs02;
135
136         /*
137          * Guest pages referred to in the vmcs02 with host-physical
138          * pointers, so we must keep them pinned while L2 runs.
139          */
140         struct page *apic_access_page;
141         struct kvm_host_map virtual_apic_map;
142         struct kvm_host_map pi_desc_map;
143
144         struct kvm_host_map msr_bitmap_map;
145
146         struct pi_desc *pi_desc;
147         bool pi_pending;
148         u16 posted_intr_nv;
149
150         struct hrtimer preemption_timer;
151         u64 preemption_timer_deadline;
152         bool has_preemption_timer_deadline;
153         bool preemption_timer_expired;
154
155         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
156         u64 vmcs01_debugctl;
157         u64 vmcs01_guest_bndcfgs;
158
159         /* to migrate it to L1 if L2 writes to L1's CR8 directly */
160         int l1_tpr_threshold;
161
162         u16 vpid02;
163         u16 last_vpid;
164
165         struct nested_vmx_msrs msrs;
166
167         /* SMM related state */
168         struct {
169                 /* in VMX operation on SMM entry? */
170                 bool vmxon;
171                 /* in guest mode on SMM entry? */
172                 bool guest_mode;
173         } smm;
174
175         gpa_t hv_evmcs_vmptr;
176         struct kvm_host_map hv_evmcs_map;
177         struct hv_enlightened_vmcs *hv_evmcs;
178 };
179
180 struct vcpu_vmx {
181         struct kvm_vcpu       vcpu;
182         u8                    fail;
183         u8                    msr_bitmap_mode;
184
185         /*
186          * If true, host state has been stored in vmx->loaded_vmcs for
187          * the CPU registers that only need to be switched when transitioning
188          * to/from the kernel, and the registers have been loaded with guest
189          * values.  If false, host state is loaded in the CPU registers
190          * and vmx->loaded_vmcs->host_state is invalid.
191          */
192         bool                  guest_state_loaded;
193
194         unsigned long         exit_qualification;
195         u32                   exit_intr_info;
196         u32                   idt_vectoring_info;
197         ulong                 rflags;
198
199         struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
200         int                   nr_uret_msrs;
201         int                   nr_active_uret_msrs;
202         bool                  guest_uret_msrs_loaded;
203 #ifdef CONFIG_X86_64
204         u64                   msr_host_kernel_gs_base;
205         u64                   msr_guest_kernel_gs_base;
206 #endif
207
208         u64                   spec_ctrl;
209         u32                   msr_ia32_umwait_control;
210
211         u32 secondary_exec_control;
212
213         /*
214          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
215          * non-nested (L1) guest, it always points to vmcs01. For a nested
216          * guest (L2), it points to a different VMCS.
217          */
218         struct loaded_vmcs    vmcs01;
219         struct loaded_vmcs   *loaded_vmcs;
220
221         struct msr_autoload {
222                 struct vmx_msrs guest;
223                 struct vmx_msrs host;
224         } msr_autoload;
225
226         struct msr_autostore {
227                 struct vmx_msrs guest;
228         } msr_autostore;
229
230         struct {
231                 int vm86_active;
232                 ulong save_rflags;
233                 struct kvm_segment segs[8];
234         } rmode;
235         struct {
236                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
237                 struct kvm_save_segment {
238                         u16 selector;
239                         unsigned long base;
240                         u32 limit;
241                         u32 ar;
242                 } seg[8];
243         } segment_cache;
244         int vpid;
245         bool emulation_required;
246
247         u32 exit_reason;
248
249         /* Posted interrupt descriptor */
250         struct pi_desc pi_desc;
251
252         /* Support for a guest hypervisor (nested VMX) */
253         struct nested_vmx nested;
254
255         /* Dynamic PLE window. */
256         unsigned int ple_window;
257         bool ple_window_dirty;
258
259         bool req_immediate_exit;
260
261         /* Support for PML */
262 #define PML_ENTITY_NUM          512
263         struct page *pml_pg;
264
265         /* apic deadline value in host tsc */
266         u64 hv_deadline_tsc;
267
268         u64 current_tsc_ratio;
269
270         unsigned long host_debugctlmsr;
271
272         /*
273          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
274          * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
275          * in msr_ia32_feature_control_valid_bits.
276          */
277         u64 msr_ia32_feature_control;
278         u64 msr_ia32_feature_control_valid_bits;
279         u64 ept_pointer;
280
281         struct pt_desc pt_desc;
282
283         /* Save desired MSR intercept (read: pass-through) state */
284 #define MAX_POSSIBLE_PASSTHROUGH_MSRS   13
285         struct {
286                 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
287                 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
288         } shadow_msr_intercept;
289 };
290
291 enum ept_pointers_status {
292         EPT_POINTERS_CHECK = 0,
293         EPT_POINTERS_MATCH = 1,
294         EPT_POINTERS_MISMATCH = 2
295 };
296
297 struct kvm_vmx {
298         struct kvm kvm;
299
300         unsigned int tss_addr;
301         bool ept_identity_pagetable_done;
302         gpa_t ept_identity_map_addr;
303
304         enum ept_pointers_status ept_pointers_match;
305         spinlock_t ept_pointer_lock;
306 };
307
308 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
309 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
310                         struct loaded_vmcs *buddy);
311 int allocate_vpid(void);
312 void free_vpid(int vpid);
313 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
314 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
315 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
316                         unsigned long fs_base, unsigned long gs_base);
317 int vmx_get_cpl(struct kvm_vcpu *vcpu);
318 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
319 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
320 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
321 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
322 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
323 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
324 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
325 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
326 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
327 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
328 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
329 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
330                    int root_level);
331
332 void update_exception_bitmap(struct kvm_vcpu *vcpu);
333 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
334 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
335 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
336 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
337 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
338 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
339 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
340 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
341 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
342 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
343 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
344
345 static inline u8 vmx_get_rvi(void)
346 {
347         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
348 }
349
350 #define BUILD_CONTROLS_SHADOW(lname, uname)                                 \
351 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val)      \
352 {                                                                           \
353         if (vmx->loaded_vmcs->controls_shadow.lname != val) {               \
354                 vmcs_write32(uname, val);                                   \
355                 vmx->loaded_vmcs->controls_shadow.lname = val;              \
356         }                                                                   \
357 }                                                                           \
358 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx)                \
359 {                                                                           \
360         return vmx->loaded_vmcs->controls_shadow.lname;                     \
361 }                                                                           \
362 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val)   \
363 {                                                                           \
364         lname##_controls_set(vmx, lname##_controls_get(vmx) | val);         \
365 }                                                                           \
366 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
367 {                                                                           \
368         lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);        \
369 }
370 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
371 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
372 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
373 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
374 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
375
376 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
377 {
378         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
379                                   | (1 << VCPU_EXREG_RFLAGS)
380                                   | (1 << VCPU_EXREG_PDPTR)
381                                   | (1 << VCPU_EXREG_SEGMENTS)
382                                   | (1 << VCPU_EXREG_CR0)
383                                   | (1 << VCPU_EXREG_CR3)
384                                   | (1 << VCPU_EXREG_CR4)
385                                   | (1 << VCPU_EXREG_EXIT_INFO_1)
386                                   | (1 << VCPU_EXREG_EXIT_INFO_2));
387         vcpu->arch.regs_dirty = 0;
388 }
389
390 static inline u32 vmx_vmentry_ctrl(void)
391 {
392         u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
393         if (vmx_pt_mode_is_system())
394                 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
395                                   VM_ENTRY_LOAD_IA32_RTIT_CTL);
396         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
397         return vmentry_ctrl &
398                 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
399 }
400
401 static inline u32 vmx_vmexit_ctrl(void)
402 {
403         u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
404         if (vmx_pt_mode_is_system())
405                 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
406                                  VM_EXIT_CLEAR_IA32_RTIT_CTL);
407         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
408         return vmexit_ctrl &
409                 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
410 }
411
412 u32 vmx_exec_control(struct vcpu_vmx *vmx);
413 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
414
415 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
416 {
417         return container_of(kvm, struct kvm_vmx, kvm);
418 }
419
420 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
421 {
422         return container_of(vcpu, struct vcpu_vmx, vcpu);
423 }
424
425 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
426 {
427         struct vcpu_vmx *vmx = to_vmx(vcpu);
428
429         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
430                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
431                 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
432         }
433         return vmx->exit_qualification;
434 }
435
436 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
437 {
438         struct vcpu_vmx *vmx = to_vmx(vcpu);
439
440         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
441                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
442                 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
443         }
444         return vmx->exit_intr_info;
445 }
446
447 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
448 void free_vmcs(struct vmcs *vmcs);
449 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
450 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
451 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
452
453 static inline struct vmcs *alloc_vmcs(bool shadow)
454 {
455         return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
456                               GFP_KERNEL_ACCOUNT);
457 }
458
459 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
460 {
461         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
462         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
463 }
464
465 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
466 {
467         return vmx->secondary_exec_control &
468                 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
469 }
470
471 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
472 {
473         if (!enable_ept)
474                 return true;
475
476         return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
477 }
478
479 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
480 {
481         return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
482             (secondary_exec_controls_get(to_vmx(vcpu)) &
483             SECONDARY_EXEC_UNRESTRICTED_GUEST));
484 }
485
486 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
487 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
488 {
489         return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
490 }
491
492 void dump_vmcs(void);
493
494 #endif /* __KVM_X86_VMX_H */