Merge tag 'kbuild-v5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
13 #include "vmcs.h"
14 #include "vmx_ops.h"
15 #include "cpuid.h"
16
17 extern const u32 vmx_msr_index[];
18
19 #define MSR_TYPE_R      1
20 #define MSR_TYPE_W      2
21 #define MSR_TYPE_RW     3
22
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24
25 #ifdef CONFIG_X86_64
26 #define MAX_NR_USER_RETURN_MSRS 7
27 #else
28 #define MAX_NR_USER_RETURN_MSRS 4
29 #endif
30
31 #define MAX_NR_LOADSTORE_MSRS   8
32
33 struct vmx_msrs {
34         unsigned int            nr;
35         struct vmx_msr_entry    val[MAX_NR_LOADSTORE_MSRS];
36 };
37
38 struct vmx_uret_msr {
39         unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
40         u64 data;
41         u64 mask;
42 };
43
44 enum segment_cache_field {
45         SEG_FIELD_SEL = 0,
46         SEG_FIELD_BASE = 1,
47         SEG_FIELD_LIMIT = 2,
48         SEG_FIELD_AR = 3,
49
50         SEG_FIELD_NR = 4
51 };
52
53 #define RTIT_ADDR_RANGE         4
54
55 struct pt_ctx {
56         u64 ctl;
57         u64 status;
58         u64 output_base;
59         u64 output_mask;
60         u64 cr3_match;
61         u64 addr_a[RTIT_ADDR_RANGE];
62         u64 addr_b[RTIT_ADDR_RANGE];
63 };
64
65 struct pt_desc {
66         u64 ctl_bitmask;
67         u32 addr_range;
68         u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
69         struct pt_ctx host;
70         struct pt_ctx guest;
71 };
72
73 union vmx_exit_reason {
74         struct {
75                 u32     basic                   : 16;
76                 u32     reserved16              : 1;
77                 u32     reserved17              : 1;
78                 u32     reserved18              : 1;
79                 u32     reserved19              : 1;
80                 u32     reserved20              : 1;
81                 u32     reserved21              : 1;
82                 u32     reserved22              : 1;
83                 u32     reserved23              : 1;
84                 u32     reserved24              : 1;
85                 u32     reserved25              : 1;
86                 u32     bus_lock_detected       : 1;
87                 u32     enclave_mode            : 1;
88                 u32     smi_pending_mtf         : 1;
89                 u32     smi_from_vmx_root       : 1;
90                 u32     reserved30              : 1;
91                 u32     failed_vmentry          : 1;
92         };
93         u32 full;
94 };
95
96 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
97 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
98
99 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
100 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
101
102 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
103 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
104
105 struct lbr_desc {
106         /* Basic info about guest LBR records. */
107         struct x86_pmu_lbr records;
108
109         /*
110          * Emulate LBR feature via passthrough LBR registers when the
111          * per-vcpu guest LBR event is scheduled on the current pcpu.
112          *
113          * The records may be inaccurate if the host reclaims the LBR.
114          */
115         struct perf_event *event;
116
117         /* True if LBRs are marked as not intercepted in the MSR bitmap */
118         bool msr_passthrough;
119 };
120
121 /*
122  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
123  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
124  */
125 struct nested_vmx {
126         /* Has the level1 guest done vmxon? */
127         bool vmxon;
128         gpa_t vmxon_ptr;
129         bool pml_full;
130
131         /* The guest-physical address of the current VMCS L1 keeps for L2 */
132         gpa_t current_vmptr;
133         /*
134          * Cache of the guest's VMCS, existing outside of guest memory.
135          * Loaded from guest memory during VMPTRLD. Flushed to guest
136          * memory during VMCLEAR and VMPTRLD.
137          */
138         struct vmcs12 *cached_vmcs12;
139         /*
140          * Cache of the guest's shadow VMCS, existing outside of guest
141          * memory. Loaded from guest memory during VM entry. Flushed
142          * to guest memory during VM exit.
143          */
144         struct vmcs12 *cached_shadow_vmcs12;
145
146         /*
147          * Indicates if the shadow vmcs or enlightened vmcs must be updated
148          * with the data held by struct vmcs12.
149          */
150         bool need_vmcs12_to_shadow_sync;
151         bool dirty_vmcs12;
152
153         /*
154          * Indicates lazily loaded guest state has not yet been decached from
155          * vmcs02.
156          */
157         bool need_sync_vmcs02_to_vmcs12_rare;
158
159         /*
160          * vmcs02 has been initialized, i.e. state that is constant for
161          * vmcs02 has been written to the backing VMCS.  Initialization
162          * is delayed until L1 actually attempts to run a nested VM.
163          */
164         bool vmcs02_initialized;
165
166         bool change_vmcs01_virtual_apic_mode;
167         bool reload_vmcs01_apic_access_page;
168         bool update_vmcs01_cpu_dirty_logging;
169
170         /*
171          * Enlightened VMCS has been enabled. It does not mean that L1 has to
172          * use it. However, VMX features available to L1 will be limited based
173          * on what the enlightened VMCS supports.
174          */
175         bool enlightened_vmcs_enabled;
176
177         /* L2 must run next, and mustn't decide to exit to L1. */
178         bool nested_run_pending;
179
180         /* Pending MTF VM-exit into L1.  */
181         bool mtf_pending;
182
183         struct loaded_vmcs vmcs02;
184
185         /*
186          * Guest pages referred to in the vmcs02 with host-physical
187          * pointers, so we must keep them pinned while L2 runs.
188          */
189         struct page *apic_access_page;
190         struct kvm_host_map virtual_apic_map;
191         struct kvm_host_map pi_desc_map;
192
193         struct kvm_host_map msr_bitmap_map;
194
195         struct pi_desc *pi_desc;
196         bool pi_pending;
197         u16 posted_intr_nv;
198
199         struct hrtimer preemption_timer;
200         u64 preemption_timer_deadline;
201         bool has_preemption_timer_deadline;
202         bool preemption_timer_expired;
203
204         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
205         u64 vmcs01_debugctl;
206         u64 vmcs01_guest_bndcfgs;
207
208         /* to migrate it to L1 if L2 writes to L1's CR8 directly */
209         int l1_tpr_threshold;
210
211         u16 vpid02;
212         u16 last_vpid;
213
214         struct nested_vmx_msrs msrs;
215
216         /* SMM related state */
217         struct {
218                 /* in VMX operation on SMM entry? */
219                 bool vmxon;
220                 /* in guest mode on SMM entry? */
221                 bool guest_mode;
222         } smm;
223
224         gpa_t hv_evmcs_vmptr;
225         struct kvm_host_map hv_evmcs_map;
226         struct hv_enlightened_vmcs *hv_evmcs;
227 };
228
229 struct vcpu_vmx {
230         struct kvm_vcpu       vcpu;
231         u8                    fail;
232         u8                    msr_bitmap_mode;
233
234         /*
235          * If true, host state has been stored in vmx->loaded_vmcs for
236          * the CPU registers that only need to be switched when transitioning
237          * to/from the kernel, and the registers have been loaded with guest
238          * values.  If false, host state is loaded in the CPU registers
239          * and vmx->loaded_vmcs->host_state is invalid.
240          */
241         bool                  guest_state_loaded;
242
243         unsigned long         exit_qualification;
244         u32                   exit_intr_info;
245         u32                   idt_vectoring_info;
246         ulong                 rflags;
247
248         struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
249         int                   nr_uret_msrs;
250         int                   nr_active_uret_msrs;
251         bool                  guest_uret_msrs_loaded;
252 #ifdef CONFIG_X86_64
253         u64                   msr_host_kernel_gs_base;
254         u64                   msr_guest_kernel_gs_base;
255 #endif
256
257         u64                   spec_ctrl;
258         u32                   msr_ia32_umwait_control;
259
260         u32 secondary_exec_control;
261
262         /*
263          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
264          * non-nested (L1) guest, it always points to vmcs01. For a nested
265          * guest (L2), it points to a different VMCS.
266          */
267         struct loaded_vmcs    vmcs01;
268         struct loaded_vmcs   *loaded_vmcs;
269
270         struct msr_autoload {
271                 struct vmx_msrs guest;
272                 struct vmx_msrs host;
273         } msr_autoload;
274
275         struct msr_autostore {
276                 struct vmx_msrs guest;
277         } msr_autostore;
278
279         struct {
280                 int vm86_active;
281                 ulong save_rflags;
282                 struct kvm_segment segs[8];
283         } rmode;
284         struct {
285                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
286                 struct kvm_save_segment {
287                         u16 selector;
288                         unsigned long base;
289                         u32 limit;
290                         u32 ar;
291                 } seg[8];
292         } segment_cache;
293         int vpid;
294         bool emulation_required;
295
296         union vmx_exit_reason exit_reason;
297
298         /* Posted interrupt descriptor */
299         struct pi_desc pi_desc;
300
301         /* Support for a guest hypervisor (nested VMX) */
302         struct nested_vmx nested;
303
304         /* Dynamic PLE window. */
305         unsigned int ple_window;
306         bool ple_window_dirty;
307
308         bool req_immediate_exit;
309
310         /* Support for PML */
311 #define PML_ENTITY_NUM          512
312         struct page *pml_pg;
313
314         /* apic deadline value in host tsc */
315         u64 hv_deadline_tsc;
316
317         u64 current_tsc_ratio;
318
319         unsigned long host_debugctlmsr;
320
321         /*
322          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
323          * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
324          * in msr_ia32_feature_control_valid_bits.
325          */
326         u64 msr_ia32_feature_control;
327         u64 msr_ia32_feature_control_valid_bits;
328         /* SGX Launch Control public key hash */
329         u64 msr_ia32_sgxlepubkeyhash[4];
330
331 #if IS_ENABLED(CONFIG_HYPERV)
332         u64 hv_root_ept;
333 #endif
334
335         struct pt_desc pt_desc;
336         struct lbr_desc lbr_desc;
337
338         /* Save desired MSR intercept (read: pass-through) state */
339 #define MAX_POSSIBLE_PASSTHROUGH_MSRS   13
340         struct {
341                 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
342                 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
343         } shadow_msr_intercept;
344 };
345
346 struct kvm_vmx {
347         struct kvm kvm;
348
349         unsigned int tss_addr;
350         bool ept_identity_pagetable_done;
351         gpa_t ept_identity_map_addr;
352
353 #if IS_ENABLED(CONFIG_HYPERV)
354         hpa_t hv_root_ept;
355         spinlock_t hv_root_ept_lock;
356 #endif
357 };
358
359 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
360 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
361                         struct loaded_vmcs *buddy);
362 int allocate_vpid(void);
363 void free_vpid(int vpid);
364 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
365 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
366 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
367                         unsigned long fs_base, unsigned long gs_base);
368 int vmx_get_cpl(struct kvm_vcpu *vcpu);
369 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
370 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
371 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
372 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
373 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
374 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
375 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
376 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
377 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
378 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
379 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
380 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
381
382 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
383 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
384 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
385 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
386 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
387 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
388 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
389 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
390 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
391 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
392 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
393 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
394 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
395
396 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
397 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
398
399 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
400                                              int type, bool value)
401 {
402         if (value)
403                 vmx_enable_intercept_for_msr(vcpu, msr, type);
404         else
405                 vmx_disable_intercept_for_msr(vcpu, msr, type);
406 }
407
408 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
409
410 static inline u8 vmx_get_rvi(void)
411 {
412         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
413 }
414
415 #define BUILD_CONTROLS_SHADOW(lname, uname)                                 \
416 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val)      \
417 {                                                                           \
418         if (vmx->loaded_vmcs->controls_shadow.lname != val) {               \
419                 vmcs_write32(uname, val);                                   \
420                 vmx->loaded_vmcs->controls_shadow.lname = val;              \
421         }                                                                   \
422 }                                                                           \
423 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx)                \
424 {                                                                           \
425         return vmx->loaded_vmcs->controls_shadow.lname;                     \
426 }                                                                           \
427 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val)   \
428 {                                                                           \
429         lname##_controls_set(vmx, lname##_controls_get(vmx) | val);         \
430 }                                                                           \
431 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
432 {                                                                           \
433         lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);        \
434 }
435 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
436 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
437 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
438 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
439 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
440
441 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
442 {
443         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
444                                   | (1 << VCPU_EXREG_RFLAGS)
445                                   | (1 << VCPU_EXREG_PDPTR)
446                                   | (1 << VCPU_EXREG_SEGMENTS)
447                                   | (1 << VCPU_EXREG_CR0)
448                                   | (1 << VCPU_EXREG_CR3)
449                                   | (1 << VCPU_EXREG_CR4)
450                                   | (1 << VCPU_EXREG_EXIT_INFO_1)
451                                   | (1 << VCPU_EXREG_EXIT_INFO_2));
452         vcpu->arch.regs_dirty = 0;
453 }
454
455 static inline u32 vmx_vmentry_ctrl(void)
456 {
457         u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
458         if (vmx_pt_mode_is_system())
459                 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
460                                   VM_ENTRY_LOAD_IA32_RTIT_CTL);
461         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
462         return vmentry_ctrl &
463                 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
464 }
465
466 static inline u32 vmx_vmexit_ctrl(void)
467 {
468         u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
469         if (vmx_pt_mode_is_system())
470                 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
471                                  VM_EXIT_CLEAR_IA32_RTIT_CTL);
472         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
473         return vmexit_ctrl &
474                 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
475 }
476
477 u32 vmx_exec_control(struct vcpu_vmx *vmx);
478 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
479
480 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
481 {
482         return container_of(kvm, struct kvm_vmx, kvm);
483 }
484
485 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
486 {
487         return container_of(vcpu, struct vcpu_vmx, vcpu);
488 }
489
490 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
491 {
492         struct vcpu_vmx *vmx = to_vmx(vcpu);
493
494         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
495                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
496                 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
497         }
498         return vmx->exit_qualification;
499 }
500
501 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
502 {
503         struct vcpu_vmx *vmx = to_vmx(vcpu);
504
505         if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
506                 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
507                 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
508         }
509         return vmx->exit_intr_info;
510 }
511
512 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
513 void free_vmcs(struct vmcs *vmcs);
514 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
515 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
516 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
517
518 static inline struct vmcs *alloc_vmcs(bool shadow)
519 {
520         return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
521                               GFP_KERNEL_ACCOUNT);
522 }
523
524 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
525 {
526         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
527         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
528 }
529
530 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
531 {
532         return vmx->secondary_exec_control &
533                 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
534 }
535
536 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
537 {
538         if (!enable_ept)
539                 return true;
540
541         return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
542 }
543
544 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
545 {
546         return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
547             (secondary_exec_controls_get(to_vmx(vcpu)) &
548             SECONDARY_EXEC_UNRESTRICTED_GUEST));
549 }
550
551 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
552 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
553 {
554         return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
555 }
556
557 void dump_vmcs(struct kvm_vcpu *vcpu);
558
559 #endif /* __KVM_X86_VMX_H */