2d120de9054ea5b3765f97a4e75026e9fc2c30e8
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/asm.h>
42 #include <asm/cpu.h>
43 #include <asm/io.h>
44 #include <asm/desc.h>
45 #include <asm/vmx.h>
46 #include <asm/virtext.h>
47 #include <asm/mce.h>
48 #include <asm/fpu/internal.h>
49 #include <asm/perf_event.h>
50 #include <asm/debugreg.h>
51 #include <asm/kexec.h>
52 #include <asm/apic.h>
53 #include <asm/irq_remapping.h>
54 #include <asm/mmu_context.h>
55 #include <asm/spec-ctrl.h>
56 #include <asm/mshyperv.h>
57
58 #include "trace.h"
59 #include "pmu.h"
60 #include "vmx_evmcs.h"
61
62 #define __ex(x) __kvm_handle_fault_on_reboot(x)
63 #define __ex_clear(x, reg) \
64         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
65
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
68
69 static const struct x86_cpu_id vmx_cpu_id[] = {
70         X86_FEATURE_MATCH(X86_FEATURE_VMX),
71         {}
72 };
73 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
74
75 static bool __read_mostly enable_vpid = 1;
76 module_param_named(vpid, enable_vpid, bool, 0444);
77
78 static bool __read_mostly enable_vnmi = 1;
79 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
80
81 static bool __read_mostly flexpriority_enabled = 1;
82 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
83
84 static bool __read_mostly enable_ept = 1;
85 module_param_named(ept, enable_ept, bool, S_IRUGO);
86
87 static bool __read_mostly enable_unrestricted_guest = 1;
88 module_param_named(unrestricted_guest,
89                         enable_unrestricted_guest, bool, S_IRUGO);
90
91 static bool __read_mostly enable_ept_ad_bits = 1;
92 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
93
94 static bool __read_mostly emulate_invalid_guest_state = true;
95 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
96
97 static bool __read_mostly fasteoi = 1;
98 module_param(fasteoi, bool, S_IRUGO);
99
100 static bool __read_mostly enable_apicv = 1;
101 module_param(enable_apicv, bool, S_IRUGO);
102
103 static bool __read_mostly enable_shadow_vmcs = 1;
104 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
105 /*
106  * If nested=1, nested virtualization is supported, i.e., guests may use
107  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
108  * use VMX instructions.
109  */
110 static bool __read_mostly nested = 0;
111 module_param(nested, bool, S_IRUGO);
112
113 static u64 __read_mostly host_xss;
114
115 static bool __read_mostly enable_pml = 1;
116 module_param_named(pml, enable_pml, bool, S_IRUGO);
117
118 #define MSR_TYPE_R      1
119 #define MSR_TYPE_W      2
120 #define MSR_TYPE_RW     3
121
122 #define MSR_BITMAP_MODE_X2APIC          1
123 #define MSR_BITMAP_MODE_X2APIC_APICV    2
124
125 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
126
127 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
128 static int __read_mostly cpu_preemption_timer_multi;
129 static bool __read_mostly enable_preemption_timer = 1;
130 #ifdef CONFIG_X86_64
131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 #endif
133
134 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
135 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
136 #define KVM_VM_CR0_ALWAYS_ON                            \
137         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
138          X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
139 #define KVM_CR4_GUEST_OWNED_BITS                                      \
140         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
141          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
142
143 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
144 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
145 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
146
147 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
148
149 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
150
151 /*
152  * Hyper-V requires all of these, so mark them as supported even though
153  * they are just treated the same as all-context.
154  */
155 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
156         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
157         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
158         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
159         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
160
161 /*
162  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
163  * ple_gap:    upper bound on the amount of time between two successive
164  *             executions of PAUSE in a loop. Also indicate if ple enabled.
165  *             According to test, this time is usually smaller than 128 cycles.
166  * ple_window: upper bound on the amount of time a guest is allowed to execute
167  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
168  *             less than 2^12 cycles
169  * Time is measured based on a counter that runs at the same rate as the TSC,
170  * refer SDM volume 3b section 21.6.13 & 22.1.3.
171  */
172 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
173
174 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
175 module_param(ple_window, uint, 0444);
176
177 /* Default doubles per-vcpu window every exit. */
178 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
179 module_param(ple_window_grow, uint, 0444);
180
181 /* Default resets per-vcpu window every exit to ple_window. */
182 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
183 module_param(ple_window_shrink, uint, 0444);
184
185 /* Default is to compute the maximum so we can never overflow. */
186 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
187 module_param(ple_window_max, uint, 0444);
188
189 extern const ulong vmx_return;
190
191 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
192 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
193 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
194
195 /* Storage for pre module init parameter parsing */
196 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
197
198 static const struct {
199         const char *option;
200         bool for_parse;
201 } vmentry_l1d_param[] = {
202         [VMENTER_L1D_FLUSH_AUTO]         = {"auto", true},
203         [VMENTER_L1D_FLUSH_NEVER]        = {"never", true},
204         [VMENTER_L1D_FLUSH_COND]         = {"cond", true},
205         [VMENTER_L1D_FLUSH_ALWAYS]       = {"always", true},
206         [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
207         [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
208 };
209
210 #define L1D_CACHE_ORDER 4
211 static void *vmx_l1d_flush_pages;
212
213 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
214 {
215         struct page *page;
216         unsigned int i;
217
218         if (!enable_ept) {
219                 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
220                 return 0;
221         }
222
223         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
224                 u64 msr;
225
226                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
227                 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
228                         l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
229                         return 0;
230                 }
231         }
232
233         /* If set to auto use the default l1tf mitigation method */
234         if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
235                 switch (l1tf_mitigation) {
236                 case L1TF_MITIGATION_OFF:
237                         l1tf = VMENTER_L1D_FLUSH_NEVER;
238                         break;
239                 case L1TF_MITIGATION_FLUSH_NOWARN:
240                 case L1TF_MITIGATION_FLUSH:
241                 case L1TF_MITIGATION_FLUSH_NOSMT:
242                         l1tf = VMENTER_L1D_FLUSH_COND;
243                         break;
244                 case L1TF_MITIGATION_FULL:
245                 case L1TF_MITIGATION_FULL_FORCE:
246                         l1tf = VMENTER_L1D_FLUSH_ALWAYS;
247                         break;
248                 }
249         } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
250                 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
251         }
252
253         if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
254             !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
255                 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
256                 if (!page)
257                         return -ENOMEM;
258                 vmx_l1d_flush_pages = page_address(page);
259
260                 /*
261                  * Initialize each page with a different pattern in
262                  * order to protect against KSM in the nested
263                  * virtualization case.
264                  */
265                 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
266                         memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
267                                PAGE_SIZE);
268                 }
269         }
270
271         l1tf_vmx_mitigation = l1tf;
272
273         if (l1tf != VMENTER_L1D_FLUSH_NEVER)
274                 static_branch_enable(&vmx_l1d_should_flush);
275         else
276                 static_branch_disable(&vmx_l1d_should_flush);
277
278         if (l1tf == VMENTER_L1D_FLUSH_COND)
279                 static_branch_enable(&vmx_l1d_flush_cond);
280         else
281                 static_branch_disable(&vmx_l1d_flush_cond);
282         return 0;
283 }
284
285 static int vmentry_l1d_flush_parse(const char *s)
286 {
287         unsigned int i;
288
289         if (s) {
290                 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
291                         if (vmentry_l1d_param[i].for_parse &&
292                             sysfs_streq(s, vmentry_l1d_param[i].option))
293                                 return i;
294                 }
295         }
296         return -EINVAL;
297 }
298
299 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
300 {
301         int l1tf, ret;
302
303         l1tf = vmentry_l1d_flush_parse(s);
304         if (l1tf < 0)
305                 return l1tf;
306
307         if (!boot_cpu_has(X86_BUG_L1TF))
308                 return 0;
309
310         /*
311          * Has vmx_init() run already? If not then this is the pre init
312          * parameter parsing. In that case just store the value and let
313          * vmx_init() do the proper setup after enable_ept has been
314          * established.
315          */
316         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
317                 vmentry_l1d_flush_param = l1tf;
318                 return 0;
319         }
320
321         mutex_lock(&vmx_l1d_flush_mutex);
322         ret = vmx_setup_l1d_flush(l1tf);
323         mutex_unlock(&vmx_l1d_flush_mutex);
324         return ret;
325 }
326
327 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
328 {
329         if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
330                 return sprintf(s, "???\n");
331
332         return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
333 }
334
335 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
336         .set = vmentry_l1d_flush_set,
337         .get = vmentry_l1d_flush_get,
338 };
339 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
340
341 enum ept_pointers_status {
342         EPT_POINTERS_CHECK = 0,
343         EPT_POINTERS_MATCH = 1,
344         EPT_POINTERS_MISMATCH = 2
345 };
346
347 struct kvm_vmx {
348         struct kvm kvm;
349
350         unsigned int tss_addr;
351         bool ept_identity_pagetable_done;
352         gpa_t ept_identity_map_addr;
353
354         enum ept_pointers_status ept_pointers_match;
355         spinlock_t ept_pointer_lock;
356 };
357
358 #define NR_AUTOLOAD_MSRS 8
359
360 struct vmcs_hdr {
361         u32 revision_id:31;
362         u32 shadow_vmcs:1;
363 };
364
365 struct vmcs {
366         struct vmcs_hdr hdr;
367         u32 abort;
368         char data[0];
369 };
370
371 /*
372  * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
373  * and whose values change infrequently, but are not constant.  I.e. this is
374  * used as a write-through cache of the corresponding VMCS fields.
375  */
376 struct vmcs_host_state {
377         unsigned long cr3;      /* May not match real cr3 */
378         unsigned long cr4;      /* May not match real cr4 */
379         unsigned long gs_base;
380         unsigned long fs_base;
381
382         u16           fs_sel, gs_sel, ldt_sel;
383 #ifdef CONFIG_X86_64
384         u16           ds_sel, es_sel;
385 #endif
386 };
387
388 /*
389  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
390  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
391  * loaded on this CPU (so we can clear them if the CPU goes down).
392  */
393 struct loaded_vmcs {
394         struct vmcs *vmcs;
395         struct vmcs *shadow_vmcs;
396         int cpu;
397         bool launched;
398         bool nmi_known_unmasked;
399         bool hv_timer_armed;
400         /* Support for vnmi-less CPUs */
401         int soft_vnmi_blocked;
402         ktime_t entry_time;
403         s64 vnmi_blocked_time;
404         unsigned long *msr_bitmap;
405         struct list_head loaded_vmcss_on_cpu_link;
406         struct vmcs_host_state host_state;
407 };
408
409 struct shared_msr_entry {
410         unsigned index;
411         u64 data;
412         u64 mask;
413 };
414
415 /*
416  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
417  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
418  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
419  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
420  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
421  * More than one of these structures may exist, if L1 runs multiple L2 guests.
422  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
423  * underlying hardware which will be used to run L2.
424  * This structure is packed to ensure that its layout is identical across
425  * machines (necessary for live migration).
426  *
427  * IMPORTANT: Changing the layout of existing fields in this structure
428  * will break save/restore compatibility with older kvm releases. When
429  * adding new fields, either use space in the reserved padding* arrays
430  * or add the new fields to the end of the structure.
431  */
432 typedef u64 natural_width;
433 struct __packed vmcs12 {
434         /* According to the Intel spec, a VMCS region must start with the
435          * following two fields. Then follow implementation-specific data.
436          */
437         struct vmcs_hdr hdr;
438         u32 abort;
439
440         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
441         u32 padding[7]; /* room for future expansion */
442
443         u64 io_bitmap_a;
444         u64 io_bitmap_b;
445         u64 msr_bitmap;
446         u64 vm_exit_msr_store_addr;
447         u64 vm_exit_msr_load_addr;
448         u64 vm_entry_msr_load_addr;
449         u64 tsc_offset;
450         u64 virtual_apic_page_addr;
451         u64 apic_access_addr;
452         u64 posted_intr_desc_addr;
453         u64 ept_pointer;
454         u64 eoi_exit_bitmap0;
455         u64 eoi_exit_bitmap1;
456         u64 eoi_exit_bitmap2;
457         u64 eoi_exit_bitmap3;
458         u64 xss_exit_bitmap;
459         u64 guest_physical_address;
460         u64 vmcs_link_pointer;
461         u64 guest_ia32_debugctl;
462         u64 guest_ia32_pat;
463         u64 guest_ia32_efer;
464         u64 guest_ia32_perf_global_ctrl;
465         u64 guest_pdptr0;
466         u64 guest_pdptr1;
467         u64 guest_pdptr2;
468         u64 guest_pdptr3;
469         u64 guest_bndcfgs;
470         u64 host_ia32_pat;
471         u64 host_ia32_efer;
472         u64 host_ia32_perf_global_ctrl;
473         u64 vmread_bitmap;
474         u64 vmwrite_bitmap;
475         u64 vm_function_control;
476         u64 eptp_list_address;
477         u64 pml_address;
478         u64 padding64[3]; /* room for future expansion */
479         /*
480          * To allow migration of L1 (complete with its L2 guests) between
481          * machines of different natural widths (32 or 64 bit), we cannot have
482          * unsigned long fields with no explict size. We use u64 (aliased
483          * natural_width) instead. Luckily, x86 is little-endian.
484          */
485         natural_width cr0_guest_host_mask;
486         natural_width cr4_guest_host_mask;
487         natural_width cr0_read_shadow;
488         natural_width cr4_read_shadow;
489         natural_width cr3_target_value0;
490         natural_width cr3_target_value1;
491         natural_width cr3_target_value2;
492         natural_width cr3_target_value3;
493         natural_width exit_qualification;
494         natural_width guest_linear_address;
495         natural_width guest_cr0;
496         natural_width guest_cr3;
497         natural_width guest_cr4;
498         natural_width guest_es_base;
499         natural_width guest_cs_base;
500         natural_width guest_ss_base;
501         natural_width guest_ds_base;
502         natural_width guest_fs_base;
503         natural_width guest_gs_base;
504         natural_width guest_ldtr_base;
505         natural_width guest_tr_base;
506         natural_width guest_gdtr_base;
507         natural_width guest_idtr_base;
508         natural_width guest_dr7;
509         natural_width guest_rsp;
510         natural_width guest_rip;
511         natural_width guest_rflags;
512         natural_width guest_pending_dbg_exceptions;
513         natural_width guest_sysenter_esp;
514         natural_width guest_sysenter_eip;
515         natural_width host_cr0;
516         natural_width host_cr3;
517         natural_width host_cr4;
518         natural_width host_fs_base;
519         natural_width host_gs_base;
520         natural_width host_tr_base;
521         natural_width host_gdtr_base;
522         natural_width host_idtr_base;
523         natural_width host_ia32_sysenter_esp;
524         natural_width host_ia32_sysenter_eip;
525         natural_width host_rsp;
526         natural_width host_rip;
527         natural_width paddingl[8]; /* room for future expansion */
528         u32 pin_based_vm_exec_control;
529         u32 cpu_based_vm_exec_control;
530         u32 exception_bitmap;
531         u32 page_fault_error_code_mask;
532         u32 page_fault_error_code_match;
533         u32 cr3_target_count;
534         u32 vm_exit_controls;
535         u32 vm_exit_msr_store_count;
536         u32 vm_exit_msr_load_count;
537         u32 vm_entry_controls;
538         u32 vm_entry_msr_load_count;
539         u32 vm_entry_intr_info_field;
540         u32 vm_entry_exception_error_code;
541         u32 vm_entry_instruction_len;
542         u32 tpr_threshold;
543         u32 secondary_vm_exec_control;
544         u32 vm_instruction_error;
545         u32 vm_exit_reason;
546         u32 vm_exit_intr_info;
547         u32 vm_exit_intr_error_code;
548         u32 idt_vectoring_info_field;
549         u32 idt_vectoring_error_code;
550         u32 vm_exit_instruction_len;
551         u32 vmx_instruction_info;
552         u32 guest_es_limit;
553         u32 guest_cs_limit;
554         u32 guest_ss_limit;
555         u32 guest_ds_limit;
556         u32 guest_fs_limit;
557         u32 guest_gs_limit;
558         u32 guest_ldtr_limit;
559         u32 guest_tr_limit;
560         u32 guest_gdtr_limit;
561         u32 guest_idtr_limit;
562         u32 guest_es_ar_bytes;
563         u32 guest_cs_ar_bytes;
564         u32 guest_ss_ar_bytes;
565         u32 guest_ds_ar_bytes;
566         u32 guest_fs_ar_bytes;
567         u32 guest_gs_ar_bytes;
568         u32 guest_ldtr_ar_bytes;
569         u32 guest_tr_ar_bytes;
570         u32 guest_interruptibility_info;
571         u32 guest_activity_state;
572         u32 guest_sysenter_cs;
573         u32 host_ia32_sysenter_cs;
574         u32 vmx_preemption_timer_value;
575         u32 padding32[7]; /* room for future expansion */
576         u16 virtual_processor_id;
577         u16 posted_intr_nv;
578         u16 guest_es_selector;
579         u16 guest_cs_selector;
580         u16 guest_ss_selector;
581         u16 guest_ds_selector;
582         u16 guest_fs_selector;
583         u16 guest_gs_selector;
584         u16 guest_ldtr_selector;
585         u16 guest_tr_selector;
586         u16 guest_intr_status;
587         u16 host_es_selector;
588         u16 host_cs_selector;
589         u16 host_ss_selector;
590         u16 host_ds_selector;
591         u16 host_fs_selector;
592         u16 host_gs_selector;
593         u16 host_tr_selector;
594         u16 guest_pml_index;
595 };
596
597 /*
598  * For save/restore compatibility, the vmcs12 field offsets must not change.
599  */
600 #define CHECK_OFFSET(field, loc)                                \
601         BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc),       \
602                 "Offset of " #field " in struct vmcs12 has changed.")
603
604 static inline void vmx_check_vmcs12_offsets(void) {
605         CHECK_OFFSET(hdr, 0);
606         CHECK_OFFSET(abort, 4);
607         CHECK_OFFSET(launch_state, 8);
608         CHECK_OFFSET(io_bitmap_a, 40);
609         CHECK_OFFSET(io_bitmap_b, 48);
610         CHECK_OFFSET(msr_bitmap, 56);
611         CHECK_OFFSET(vm_exit_msr_store_addr, 64);
612         CHECK_OFFSET(vm_exit_msr_load_addr, 72);
613         CHECK_OFFSET(vm_entry_msr_load_addr, 80);
614         CHECK_OFFSET(tsc_offset, 88);
615         CHECK_OFFSET(virtual_apic_page_addr, 96);
616         CHECK_OFFSET(apic_access_addr, 104);
617         CHECK_OFFSET(posted_intr_desc_addr, 112);
618         CHECK_OFFSET(ept_pointer, 120);
619         CHECK_OFFSET(eoi_exit_bitmap0, 128);
620         CHECK_OFFSET(eoi_exit_bitmap1, 136);
621         CHECK_OFFSET(eoi_exit_bitmap2, 144);
622         CHECK_OFFSET(eoi_exit_bitmap3, 152);
623         CHECK_OFFSET(xss_exit_bitmap, 160);
624         CHECK_OFFSET(guest_physical_address, 168);
625         CHECK_OFFSET(vmcs_link_pointer, 176);
626         CHECK_OFFSET(guest_ia32_debugctl, 184);
627         CHECK_OFFSET(guest_ia32_pat, 192);
628         CHECK_OFFSET(guest_ia32_efer, 200);
629         CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
630         CHECK_OFFSET(guest_pdptr0, 216);
631         CHECK_OFFSET(guest_pdptr1, 224);
632         CHECK_OFFSET(guest_pdptr2, 232);
633         CHECK_OFFSET(guest_pdptr3, 240);
634         CHECK_OFFSET(guest_bndcfgs, 248);
635         CHECK_OFFSET(host_ia32_pat, 256);
636         CHECK_OFFSET(host_ia32_efer, 264);
637         CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
638         CHECK_OFFSET(vmread_bitmap, 280);
639         CHECK_OFFSET(vmwrite_bitmap, 288);
640         CHECK_OFFSET(vm_function_control, 296);
641         CHECK_OFFSET(eptp_list_address, 304);
642         CHECK_OFFSET(pml_address, 312);
643         CHECK_OFFSET(cr0_guest_host_mask, 344);
644         CHECK_OFFSET(cr4_guest_host_mask, 352);
645         CHECK_OFFSET(cr0_read_shadow, 360);
646         CHECK_OFFSET(cr4_read_shadow, 368);
647         CHECK_OFFSET(cr3_target_value0, 376);
648         CHECK_OFFSET(cr3_target_value1, 384);
649         CHECK_OFFSET(cr3_target_value2, 392);
650         CHECK_OFFSET(cr3_target_value3, 400);
651         CHECK_OFFSET(exit_qualification, 408);
652         CHECK_OFFSET(guest_linear_address, 416);
653         CHECK_OFFSET(guest_cr0, 424);
654         CHECK_OFFSET(guest_cr3, 432);
655         CHECK_OFFSET(guest_cr4, 440);
656         CHECK_OFFSET(guest_es_base, 448);
657         CHECK_OFFSET(guest_cs_base, 456);
658         CHECK_OFFSET(guest_ss_base, 464);
659         CHECK_OFFSET(guest_ds_base, 472);
660         CHECK_OFFSET(guest_fs_base, 480);
661         CHECK_OFFSET(guest_gs_base, 488);
662         CHECK_OFFSET(guest_ldtr_base, 496);
663         CHECK_OFFSET(guest_tr_base, 504);
664         CHECK_OFFSET(guest_gdtr_base, 512);
665         CHECK_OFFSET(guest_idtr_base, 520);
666         CHECK_OFFSET(guest_dr7, 528);
667         CHECK_OFFSET(guest_rsp, 536);
668         CHECK_OFFSET(guest_rip, 544);
669         CHECK_OFFSET(guest_rflags, 552);
670         CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
671         CHECK_OFFSET(guest_sysenter_esp, 568);
672         CHECK_OFFSET(guest_sysenter_eip, 576);
673         CHECK_OFFSET(host_cr0, 584);
674         CHECK_OFFSET(host_cr3, 592);
675         CHECK_OFFSET(host_cr4, 600);
676         CHECK_OFFSET(host_fs_base, 608);
677         CHECK_OFFSET(host_gs_base, 616);
678         CHECK_OFFSET(host_tr_base, 624);
679         CHECK_OFFSET(host_gdtr_base, 632);
680         CHECK_OFFSET(host_idtr_base, 640);
681         CHECK_OFFSET(host_ia32_sysenter_esp, 648);
682         CHECK_OFFSET(host_ia32_sysenter_eip, 656);
683         CHECK_OFFSET(host_rsp, 664);
684         CHECK_OFFSET(host_rip, 672);
685         CHECK_OFFSET(pin_based_vm_exec_control, 744);
686         CHECK_OFFSET(cpu_based_vm_exec_control, 748);
687         CHECK_OFFSET(exception_bitmap, 752);
688         CHECK_OFFSET(page_fault_error_code_mask, 756);
689         CHECK_OFFSET(page_fault_error_code_match, 760);
690         CHECK_OFFSET(cr3_target_count, 764);
691         CHECK_OFFSET(vm_exit_controls, 768);
692         CHECK_OFFSET(vm_exit_msr_store_count, 772);
693         CHECK_OFFSET(vm_exit_msr_load_count, 776);
694         CHECK_OFFSET(vm_entry_controls, 780);
695         CHECK_OFFSET(vm_entry_msr_load_count, 784);
696         CHECK_OFFSET(vm_entry_intr_info_field, 788);
697         CHECK_OFFSET(vm_entry_exception_error_code, 792);
698         CHECK_OFFSET(vm_entry_instruction_len, 796);
699         CHECK_OFFSET(tpr_threshold, 800);
700         CHECK_OFFSET(secondary_vm_exec_control, 804);
701         CHECK_OFFSET(vm_instruction_error, 808);
702         CHECK_OFFSET(vm_exit_reason, 812);
703         CHECK_OFFSET(vm_exit_intr_info, 816);
704         CHECK_OFFSET(vm_exit_intr_error_code, 820);
705         CHECK_OFFSET(idt_vectoring_info_field, 824);
706         CHECK_OFFSET(idt_vectoring_error_code, 828);
707         CHECK_OFFSET(vm_exit_instruction_len, 832);
708         CHECK_OFFSET(vmx_instruction_info, 836);
709         CHECK_OFFSET(guest_es_limit, 840);
710         CHECK_OFFSET(guest_cs_limit, 844);
711         CHECK_OFFSET(guest_ss_limit, 848);
712         CHECK_OFFSET(guest_ds_limit, 852);
713         CHECK_OFFSET(guest_fs_limit, 856);
714         CHECK_OFFSET(guest_gs_limit, 860);
715         CHECK_OFFSET(guest_ldtr_limit, 864);
716         CHECK_OFFSET(guest_tr_limit, 868);
717         CHECK_OFFSET(guest_gdtr_limit, 872);
718         CHECK_OFFSET(guest_idtr_limit, 876);
719         CHECK_OFFSET(guest_es_ar_bytes, 880);
720         CHECK_OFFSET(guest_cs_ar_bytes, 884);
721         CHECK_OFFSET(guest_ss_ar_bytes, 888);
722         CHECK_OFFSET(guest_ds_ar_bytes, 892);
723         CHECK_OFFSET(guest_fs_ar_bytes, 896);
724         CHECK_OFFSET(guest_gs_ar_bytes, 900);
725         CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
726         CHECK_OFFSET(guest_tr_ar_bytes, 908);
727         CHECK_OFFSET(guest_interruptibility_info, 912);
728         CHECK_OFFSET(guest_activity_state, 916);
729         CHECK_OFFSET(guest_sysenter_cs, 920);
730         CHECK_OFFSET(host_ia32_sysenter_cs, 924);
731         CHECK_OFFSET(vmx_preemption_timer_value, 928);
732         CHECK_OFFSET(virtual_processor_id, 960);
733         CHECK_OFFSET(posted_intr_nv, 962);
734         CHECK_OFFSET(guest_es_selector, 964);
735         CHECK_OFFSET(guest_cs_selector, 966);
736         CHECK_OFFSET(guest_ss_selector, 968);
737         CHECK_OFFSET(guest_ds_selector, 970);
738         CHECK_OFFSET(guest_fs_selector, 972);
739         CHECK_OFFSET(guest_gs_selector, 974);
740         CHECK_OFFSET(guest_ldtr_selector, 976);
741         CHECK_OFFSET(guest_tr_selector, 978);
742         CHECK_OFFSET(guest_intr_status, 980);
743         CHECK_OFFSET(host_es_selector, 982);
744         CHECK_OFFSET(host_cs_selector, 984);
745         CHECK_OFFSET(host_ss_selector, 986);
746         CHECK_OFFSET(host_ds_selector, 988);
747         CHECK_OFFSET(host_fs_selector, 990);
748         CHECK_OFFSET(host_gs_selector, 992);
749         CHECK_OFFSET(host_tr_selector, 994);
750         CHECK_OFFSET(guest_pml_index, 996);
751 }
752
753 /*
754  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
755  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
756  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
757  *
758  * IMPORTANT: Changing this value will break save/restore compatibility with
759  * older kvm releases.
760  */
761 #define VMCS12_REVISION 0x11e57ed0
762
763 /*
764  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
765  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
766  * current implementation, 4K are reserved to avoid future complications.
767  */
768 #define VMCS12_SIZE 0x1000
769
770 /*
771  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
772  * supported VMCS12 field encoding.
773  */
774 #define VMCS12_MAX_FIELD_INDEX 0x17
775
776 struct nested_vmx_msrs {
777         /*
778          * We only store the "true" versions of the VMX capability MSRs. We
779          * generate the "non-true" versions by setting the must-be-1 bits
780          * according to the SDM.
781          */
782         u32 procbased_ctls_low;
783         u32 procbased_ctls_high;
784         u32 secondary_ctls_low;
785         u32 secondary_ctls_high;
786         u32 pinbased_ctls_low;
787         u32 pinbased_ctls_high;
788         u32 exit_ctls_low;
789         u32 exit_ctls_high;
790         u32 entry_ctls_low;
791         u32 entry_ctls_high;
792         u32 misc_low;
793         u32 misc_high;
794         u32 ept_caps;
795         u32 vpid_caps;
796         u64 basic;
797         u64 cr0_fixed0;
798         u64 cr0_fixed1;
799         u64 cr4_fixed0;
800         u64 cr4_fixed1;
801         u64 vmcs_enum;
802         u64 vmfunc_controls;
803 };
804
805 /*
806  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
807  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
808  */
809 struct nested_vmx {
810         /* Has the level1 guest done vmxon? */
811         bool vmxon;
812         gpa_t vmxon_ptr;
813         bool pml_full;
814
815         /* The guest-physical address of the current VMCS L1 keeps for L2 */
816         gpa_t current_vmptr;
817         /*
818          * Cache of the guest's VMCS, existing outside of guest memory.
819          * Loaded from guest memory during VMPTRLD. Flushed to guest
820          * memory during VMCLEAR and VMPTRLD.
821          */
822         struct vmcs12 *cached_vmcs12;
823         /*
824          * Cache of the guest's shadow VMCS, existing outside of guest
825          * memory. Loaded from guest memory during VM entry. Flushed
826          * to guest memory during VM exit.
827          */
828         struct vmcs12 *cached_shadow_vmcs12;
829         /*
830          * Indicates if the shadow vmcs must be updated with the
831          * data hold by vmcs12
832          */
833         bool sync_shadow_vmcs;
834         bool dirty_vmcs12;
835
836         /*
837          * vmcs02 has been initialized, i.e. state that is constant for
838          * vmcs02 has been written to the backing VMCS.  Initialization
839          * is delayed until L1 actually attempts to run a nested VM.
840          */
841         bool vmcs02_initialized;
842
843         bool change_vmcs01_virtual_apic_mode;
844
845         /* L2 must run next, and mustn't decide to exit to L1. */
846         bool nested_run_pending;
847
848         struct loaded_vmcs vmcs02;
849
850         /*
851          * Guest pages referred to in the vmcs02 with host-physical
852          * pointers, so we must keep them pinned while L2 runs.
853          */
854         struct page *apic_access_page;
855         struct page *virtual_apic_page;
856         struct page *pi_desc_page;
857         struct pi_desc *pi_desc;
858         bool pi_pending;
859         u16 posted_intr_nv;
860
861         struct hrtimer preemption_timer;
862         bool preemption_timer_expired;
863
864         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
865         u64 vmcs01_debugctl;
866         u64 vmcs01_guest_bndcfgs;
867
868         u16 vpid02;
869         u16 last_vpid;
870
871         struct nested_vmx_msrs msrs;
872
873         /* SMM related state */
874         struct {
875                 /* in VMX operation on SMM entry? */
876                 bool vmxon;
877                 /* in guest mode on SMM entry? */
878                 bool guest_mode;
879         } smm;
880 };
881
882 #define POSTED_INTR_ON  0
883 #define POSTED_INTR_SN  1
884
885 /* Posted-Interrupt Descriptor */
886 struct pi_desc {
887         u32 pir[8];     /* Posted interrupt requested */
888         union {
889                 struct {
890                                 /* bit 256 - Outstanding Notification */
891                         u16     on      : 1,
892                                 /* bit 257 - Suppress Notification */
893                                 sn      : 1,
894                                 /* bit 271:258 - Reserved */
895                                 rsvd_1  : 14;
896                                 /* bit 279:272 - Notification Vector */
897                         u8      nv;
898                                 /* bit 287:280 - Reserved */
899                         u8      rsvd_2;
900                                 /* bit 319:288 - Notification Destination */
901                         u32     ndst;
902                 };
903                 u64 control;
904         };
905         u32 rsvd[6];
906 } __aligned(64);
907
908 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
909 {
910         return test_and_set_bit(POSTED_INTR_ON,
911                         (unsigned long *)&pi_desc->control);
912 }
913
914 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
915 {
916         return test_and_clear_bit(POSTED_INTR_ON,
917                         (unsigned long *)&pi_desc->control);
918 }
919
920 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
921 {
922         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
923 }
924
925 static inline void pi_clear_sn(struct pi_desc *pi_desc)
926 {
927         return clear_bit(POSTED_INTR_SN,
928                         (unsigned long *)&pi_desc->control);
929 }
930
931 static inline void pi_set_sn(struct pi_desc *pi_desc)
932 {
933         return set_bit(POSTED_INTR_SN,
934                         (unsigned long *)&pi_desc->control);
935 }
936
937 static inline void pi_clear_on(struct pi_desc *pi_desc)
938 {
939         clear_bit(POSTED_INTR_ON,
940                   (unsigned long *)&pi_desc->control);
941 }
942
943 static inline int pi_test_on(struct pi_desc *pi_desc)
944 {
945         return test_bit(POSTED_INTR_ON,
946                         (unsigned long *)&pi_desc->control);
947 }
948
949 static inline int pi_test_sn(struct pi_desc *pi_desc)
950 {
951         return test_bit(POSTED_INTR_SN,
952                         (unsigned long *)&pi_desc->control);
953 }
954
955 struct vmx_msrs {
956         unsigned int            nr;
957         struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
958 };
959
960 struct vcpu_vmx {
961         struct kvm_vcpu       vcpu;
962         unsigned long         host_rsp;
963         u8                    fail;
964         u8                    msr_bitmap_mode;
965         u32                   exit_intr_info;
966         u32                   idt_vectoring_info;
967         ulong                 rflags;
968         struct shared_msr_entry *guest_msrs;
969         int                   nmsrs;
970         int                   save_nmsrs;
971         unsigned long         host_idt_base;
972 #ifdef CONFIG_X86_64
973         u64                   msr_host_kernel_gs_base;
974         u64                   msr_guest_kernel_gs_base;
975 #endif
976
977         u64                   arch_capabilities;
978         u64                   spec_ctrl;
979
980         u32 vm_entry_controls_shadow;
981         u32 vm_exit_controls_shadow;
982         u32 secondary_exec_control;
983
984         /*
985          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
986          * non-nested (L1) guest, it always points to vmcs01. For a nested
987          * guest (L2), it points to a different VMCS.  loaded_cpu_state points
988          * to the VMCS whose state is loaded into the CPU registers that only
989          * need to be switched when transitioning to/from the kernel; a NULL
990          * value indicates that host state is loaded.
991          */
992         struct loaded_vmcs    vmcs01;
993         struct loaded_vmcs   *loaded_vmcs;
994         struct loaded_vmcs   *loaded_cpu_state;
995         bool                  __launched; /* temporary, used in vmx_vcpu_run */
996         struct msr_autoload {
997                 struct vmx_msrs guest;
998                 struct vmx_msrs host;
999         } msr_autoload;
1000
1001         struct {
1002                 int vm86_active;
1003                 ulong save_rflags;
1004                 struct kvm_segment segs[8];
1005         } rmode;
1006         struct {
1007                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
1008                 struct kvm_save_segment {
1009                         u16 selector;
1010                         unsigned long base;
1011                         u32 limit;
1012                         u32 ar;
1013                 } seg[8];
1014         } segment_cache;
1015         int vpid;
1016         bool emulation_required;
1017
1018         u32 exit_reason;
1019
1020         /* Posted interrupt descriptor */
1021         struct pi_desc pi_desc;
1022
1023         /* Support for a guest hypervisor (nested VMX) */
1024         struct nested_vmx nested;
1025
1026         /* Dynamic PLE window. */
1027         int ple_window;
1028         bool ple_window_dirty;
1029
1030         bool req_immediate_exit;
1031
1032         /* Support for PML */
1033 #define PML_ENTITY_NUM          512
1034         struct page *pml_pg;
1035
1036         /* apic deadline value in host tsc */
1037         u64 hv_deadline_tsc;
1038
1039         u64 current_tsc_ratio;
1040
1041         u32 host_pkru;
1042
1043         unsigned long host_debugctlmsr;
1044
1045         /*
1046          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
1047          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
1048          * in msr_ia32_feature_control_valid_bits.
1049          */
1050         u64 msr_ia32_feature_control;
1051         u64 msr_ia32_feature_control_valid_bits;
1052         u64 ept_pointer;
1053 };
1054
1055 enum segment_cache_field {
1056         SEG_FIELD_SEL = 0,
1057         SEG_FIELD_BASE = 1,
1058         SEG_FIELD_LIMIT = 2,
1059         SEG_FIELD_AR = 3,
1060
1061         SEG_FIELD_NR = 4
1062 };
1063
1064 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
1065 {
1066         return container_of(kvm, struct kvm_vmx, kvm);
1067 }
1068
1069 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
1070 {
1071         return container_of(vcpu, struct vcpu_vmx, vcpu);
1072 }
1073
1074 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
1075 {
1076         return &(to_vmx(vcpu)->pi_desc);
1077 }
1078
1079 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
1080 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
1081 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
1082 #define FIELD64(number, name)                                           \
1083         FIELD(number, name),                                            \
1084         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
1085
1086
1087 static u16 shadow_read_only_fields[] = {
1088 #define SHADOW_FIELD_RO(x) x,
1089 #include "vmx_shadow_fields.h"
1090 };
1091 static int max_shadow_read_only_fields =
1092         ARRAY_SIZE(shadow_read_only_fields);
1093
1094 static u16 shadow_read_write_fields[] = {
1095 #define SHADOW_FIELD_RW(x) x,
1096 #include "vmx_shadow_fields.h"
1097 };
1098 static int max_shadow_read_write_fields =
1099         ARRAY_SIZE(shadow_read_write_fields);
1100
1101 static const unsigned short vmcs_field_to_offset_table[] = {
1102         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
1103         FIELD(POSTED_INTR_NV, posted_intr_nv),
1104         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1105         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
1106         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
1107         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
1108         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
1109         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
1110         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
1111         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
1112         FIELD(GUEST_INTR_STATUS, guest_intr_status),
1113         FIELD(GUEST_PML_INDEX, guest_pml_index),
1114         FIELD(HOST_ES_SELECTOR, host_es_selector),
1115         FIELD(HOST_CS_SELECTOR, host_cs_selector),
1116         FIELD(HOST_SS_SELECTOR, host_ss_selector),
1117         FIELD(HOST_DS_SELECTOR, host_ds_selector),
1118         FIELD(HOST_FS_SELECTOR, host_fs_selector),
1119         FIELD(HOST_GS_SELECTOR, host_gs_selector),
1120         FIELD(HOST_TR_SELECTOR, host_tr_selector),
1121         FIELD64(IO_BITMAP_A, io_bitmap_a),
1122         FIELD64(IO_BITMAP_B, io_bitmap_b),
1123         FIELD64(MSR_BITMAP, msr_bitmap),
1124         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
1125         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
1126         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
1127         FIELD64(PML_ADDRESS, pml_address),
1128         FIELD64(TSC_OFFSET, tsc_offset),
1129         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
1130         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
1131         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
1132         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
1133         FIELD64(EPT_POINTER, ept_pointer),
1134         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
1135         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
1136         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
1137         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
1138         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
1139         FIELD64(VMREAD_BITMAP, vmread_bitmap),
1140         FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
1141         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
1142         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
1143         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
1144         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
1145         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
1146         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
1147         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
1148         FIELD64(GUEST_PDPTR0, guest_pdptr0),
1149         FIELD64(GUEST_PDPTR1, guest_pdptr1),
1150         FIELD64(GUEST_PDPTR2, guest_pdptr2),
1151         FIELD64(GUEST_PDPTR3, guest_pdptr3),
1152         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
1153         FIELD64(HOST_IA32_PAT, host_ia32_pat),
1154         FIELD64(HOST_IA32_EFER, host_ia32_efer),
1155         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
1156         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
1157         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
1158         FIELD(EXCEPTION_BITMAP, exception_bitmap),
1159         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
1160         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
1161         FIELD(CR3_TARGET_COUNT, cr3_target_count),
1162         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
1163         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
1164         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
1165         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
1166         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
1167         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
1168         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
1169         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
1170         FIELD(TPR_THRESHOLD, tpr_threshold),
1171         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
1172         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
1173         FIELD(VM_EXIT_REASON, vm_exit_reason),
1174         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
1175         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
1176         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
1177         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
1178         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
1179         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
1180         FIELD(GUEST_ES_LIMIT, guest_es_limit),
1181         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
1182         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
1183         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
1184         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
1185         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
1186         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
1187         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
1188         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
1189         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
1190         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
1191         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
1192         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
1193         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
1194         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
1195         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
1196         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1197         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1198         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1199         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1200         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1201         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
1202         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
1203         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1204         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1205         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1206         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1207         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1208         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1209         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1210         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1211         FIELD(EXIT_QUALIFICATION, exit_qualification),
1212         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1213         FIELD(GUEST_CR0, guest_cr0),
1214         FIELD(GUEST_CR3, guest_cr3),
1215         FIELD(GUEST_CR4, guest_cr4),
1216         FIELD(GUEST_ES_BASE, guest_es_base),
1217         FIELD(GUEST_CS_BASE, guest_cs_base),
1218         FIELD(GUEST_SS_BASE, guest_ss_base),
1219         FIELD(GUEST_DS_BASE, guest_ds_base),
1220         FIELD(GUEST_FS_BASE, guest_fs_base),
1221         FIELD(GUEST_GS_BASE, guest_gs_base),
1222         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1223         FIELD(GUEST_TR_BASE, guest_tr_base),
1224         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1225         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1226         FIELD(GUEST_DR7, guest_dr7),
1227         FIELD(GUEST_RSP, guest_rsp),
1228         FIELD(GUEST_RIP, guest_rip),
1229         FIELD(GUEST_RFLAGS, guest_rflags),
1230         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1231         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1232         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1233         FIELD(HOST_CR0, host_cr0),
1234         FIELD(HOST_CR3, host_cr3),
1235         FIELD(HOST_CR4, host_cr4),
1236         FIELD(HOST_FS_BASE, host_fs_base),
1237         FIELD(HOST_GS_BASE, host_gs_base),
1238         FIELD(HOST_TR_BASE, host_tr_base),
1239         FIELD(HOST_GDTR_BASE, host_gdtr_base),
1240         FIELD(HOST_IDTR_BASE, host_idtr_base),
1241         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1242         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1243         FIELD(HOST_RSP, host_rsp),
1244         FIELD(HOST_RIP, host_rip),
1245 };
1246
1247 static inline short vmcs_field_to_offset(unsigned long field)
1248 {
1249         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1250         unsigned short offset;
1251         unsigned index;
1252
1253         if (field >> 15)
1254                 return -ENOENT;
1255
1256         index = ROL16(field, 6);
1257         if (index >= size)
1258                 return -ENOENT;
1259
1260         index = array_index_nospec(index, size);
1261         offset = vmcs_field_to_offset_table[index];
1262         if (offset == 0)
1263                 return -ENOENT;
1264         return offset;
1265 }
1266
1267 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1268 {
1269         return to_vmx(vcpu)->nested.cached_vmcs12;
1270 }
1271
1272 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
1273 {
1274         return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
1275 }
1276
1277 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
1278 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
1279 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
1280 static bool vmx_xsaves_supported(void);
1281 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1282                             struct kvm_segment *var, int seg);
1283 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1284                             struct kvm_segment *var, int seg);
1285 static bool guest_state_valid(struct kvm_vcpu *vcpu);
1286 static u32 vmx_segment_access_rights(struct kvm_segment *var);
1287 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1288 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1289 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1290 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1291                                             u16 error_code);
1292 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1293 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1294                                                           u32 msr, int type);
1295
1296 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1297 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
1298 /*
1299  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1300  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1301  */
1302 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
1303
1304 /*
1305  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1306  * can find which vCPU should be waken up.
1307  */
1308 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1309 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1310
1311 enum {
1312         VMX_VMREAD_BITMAP,
1313         VMX_VMWRITE_BITMAP,
1314         VMX_BITMAP_NR
1315 };
1316
1317 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1318
1319 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
1320 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
1321
1322 static bool cpu_has_load_ia32_efer;
1323 static bool cpu_has_load_perf_global_ctrl;
1324
1325 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1326 static DEFINE_SPINLOCK(vmx_vpid_lock);
1327
1328 static struct vmcs_config {
1329         int size;
1330         int order;
1331         u32 basic_cap;
1332         u32 revision_id;
1333         u32 pin_based_exec_ctrl;
1334         u32 cpu_based_exec_ctrl;
1335         u32 cpu_based_2nd_exec_ctrl;
1336         u32 vmexit_ctrl;
1337         u32 vmentry_ctrl;
1338         struct nested_vmx_msrs nested;
1339 } vmcs_config;
1340
1341 static struct vmx_capability {
1342         u32 ept;
1343         u32 vpid;
1344 } vmx_capability;
1345
1346 #define VMX_SEGMENT_FIELD(seg)                                  \
1347         [VCPU_SREG_##seg] = {                                   \
1348                 .selector = GUEST_##seg##_SELECTOR,             \
1349                 .base = GUEST_##seg##_BASE,                     \
1350                 .limit = GUEST_##seg##_LIMIT,                   \
1351                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1352         }
1353
1354 static const struct kvm_vmx_segment_field {
1355         unsigned selector;
1356         unsigned base;
1357         unsigned limit;
1358         unsigned ar_bytes;
1359 } kvm_vmx_segment_fields[] = {
1360         VMX_SEGMENT_FIELD(CS),
1361         VMX_SEGMENT_FIELD(DS),
1362         VMX_SEGMENT_FIELD(ES),
1363         VMX_SEGMENT_FIELD(FS),
1364         VMX_SEGMENT_FIELD(GS),
1365         VMX_SEGMENT_FIELD(SS),
1366         VMX_SEGMENT_FIELD(TR),
1367         VMX_SEGMENT_FIELD(LDTR),
1368 };
1369
1370 static u64 host_efer;
1371
1372 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1373
1374 /*
1375  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1376  * away by decrementing the array size.
1377  */
1378 static const u32 vmx_msr_index[] = {
1379 #ifdef CONFIG_X86_64
1380         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1381 #endif
1382         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1383 };
1384
1385 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1386
1387 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1388
1389 #define KVM_EVMCS_VERSION 1
1390
1391 #if IS_ENABLED(CONFIG_HYPERV)
1392 static bool __read_mostly enlightened_vmcs = true;
1393 module_param(enlightened_vmcs, bool, 0444);
1394
1395 static inline void evmcs_write64(unsigned long field, u64 value)
1396 {
1397         u16 clean_field;
1398         int offset = get_evmcs_offset(field, &clean_field);
1399
1400         if (offset < 0)
1401                 return;
1402
1403         *(u64 *)((char *)current_evmcs + offset) = value;
1404
1405         current_evmcs->hv_clean_fields &= ~clean_field;
1406 }
1407
1408 static inline void evmcs_write32(unsigned long field, u32 value)
1409 {
1410         u16 clean_field;
1411         int offset = get_evmcs_offset(field, &clean_field);
1412
1413         if (offset < 0)
1414                 return;
1415
1416         *(u32 *)((char *)current_evmcs + offset) = value;
1417         current_evmcs->hv_clean_fields &= ~clean_field;
1418 }
1419
1420 static inline void evmcs_write16(unsigned long field, u16 value)
1421 {
1422         u16 clean_field;
1423         int offset = get_evmcs_offset(field, &clean_field);
1424
1425         if (offset < 0)
1426                 return;
1427
1428         *(u16 *)((char *)current_evmcs + offset) = value;
1429         current_evmcs->hv_clean_fields &= ~clean_field;
1430 }
1431
1432 static inline u64 evmcs_read64(unsigned long field)
1433 {
1434         int offset = get_evmcs_offset(field, NULL);
1435
1436         if (offset < 0)
1437                 return 0;
1438
1439         return *(u64 *)((char *)current_evmcs + offset);
1440 }
1441
1442 static inline u32 evmcs_read32(unsigned long field)
1443 {
1444         int offset = get_evmcs_offset(field, NULL);
1445
1446         if (offset < 0)
1447                 return 0;
1448
1449         return *(u32 *)((char *)current_evmcs + offset);
1450 }
1451
1452 static inline u16 evmcs_read16(unsigned long field)
1453 {
1454         int offset = get_evmcs_offset(field, NULL);
1455
1456         if (offset < 0)
1457                 return 0;
1458
1459         return *(u16 *)((char *)current_evmcs + offset);
1460 }
1461
1462 static inline void evmcs_touch_msr_bitmap(void)
1463 {
1464         if (unlikely(!current_evmcs))
1465                 return;
1466
1467         if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1468                 current_evmcs->hv_clean_fields &=
1469                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1470 }
1471
1472 static void evmcs_load(u64 phys_addr)
1473 {
1474         struct hv_vp_assist_page *vp_ap =
1475                 hv_get_vp_assist_page(smp_processor_id());
1476
1477         vp_ap->current_nested_vmcs = phys_addr;
1478         vp_ap->enlighten_vmentry = 1;
1479 }
1480
1481 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1482 {
1483         /*
1484          * Enlightened VMCSv1 doesn't support these:
1485          *
1486          *      POSTED_INTR_NV                  = 0x00000002,
1487          *      GUEST_INTR_STATUS               = 0x00000810,
1488          *      APIC_ACCESS_ADDR                = 0x00002014,
1489          *      POSTED_INTR_DESC_ADDR           = 0x00002016,
1490          *      EOI_EXIT_BITMAP0                = 0x0000201c,
1491          *      EOI_EXIT_BITMAP1                = 0x0000201e,
1492          *      EOI_EXIT_BITMAP2                = 0x00002020,
1493          *      EOI_EXIT_BITMAP3                = 0x00002022,
1494          */
1495         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1496         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1497                 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1498         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1499                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1500         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1501                 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1502
1503         /*
1504          *      GUEST_PML_INDEX                 = 0x00000812,
1505          *      PML_ADDRESS                     = 0x0000200e,
1506          */
1507         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1508
1509         /*      VM_FUNCTION_CONTROL             = 0x00002018, */
1510         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1511
1512         /*
1513          *      EPTP_LIST_ADDRESS               = 0x00002024,
1514          *      VMREAD_BITMAP                   = 0x00002026,
1515          *      VMWRITE_BITMAP                  = 0x00002028,
1516          */
1517         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1518
1519         /*
1520          *      TSC_MULTIPLIER                  = 0x00002032,
1521          */
1522         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1523
1524         /*
1525          *      PLE_GAP                         = 0x00004020,
1526          *      PLE_WINDOW                      = 0x00004022,
1527          */
1528         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1529
1530         /*
1531          *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
1532          */
1533         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1534
1535         /*
1536          *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
1537          *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
1538          */
1539         vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1540         vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1541
1542         /*
1543          * Currently unsupported in KVM:
1544          *      GUEST_IA32_RTIT_CTL             = 0x00002814,
1545          */
1546 }
1547
1548 /* check_ept_pointer() should be under protection of ept_pointer_lock. */
1549 static void check_ept_pointer_match(struct kvm *kvm)
1550 {
1551         struct kvm_vcpu *vcpu;
1552         u64 tmp_eptp = INVALID_PAGE;
1553         int i;
1554
1555         kvm_for_each_vcpu(i, vcpu, kvm) {
1556                 if (!VALID_PAGE(tmp_eptp)) {
1557                         tmp_eptp = to_vmx(vcpu)->ept_pointer;
1558                 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
1559                         to_kvm_vmx(kvm)->ept_pointers_match
1560                                 = EPT_POINTERS_MISMATCH;
1561                         return;
1562                 }
1563         }
1564
1565         to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
1566 }
1567
1568 static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
1569 {
1570         int ret;
1571
1572         spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1573
1574         if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
1575                 check_ept_pointer_match(kvm);
1576
1577         if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
1578                 ret = -ENOTSUPP;
1579                 goto out;
1580         }
1581
1582         ret = hyperv_flush_guest_mapping(
1583                         to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
1584
1585 out:
1586         spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1587         return ret;
1588 }
1589 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1590 static inline void evmcs_write64(unsigned long field, u64 value) {}
1591 static inline void evmcs_write32(unsigned long field, u32 value) {}
1592 static inline void evmcs_write16(unsigned long field, u16 value) {}
1593 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1594 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1595 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1596 static inline void evmcs_load(u64 phys_addr) {}
1597 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1598 static inline void evmcs_touch_msr_bitmap(void) {}
1599 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1600
1601 static inline bool is_exception_n(u32 intr_info, u8 vector)
1602 {
1603         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1604                              INTR_INFO_VALID_MASK)) ==
1605                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1606 }
1607
1608 static inline bool is_debug(u32 intr_info)
1609 {
1610         return is_exception_n(intr_info, DB_VECTOR);
1611 }
1612
1613 static inline bool is_breakpoint(u32 intr_info)
1614 {
1615         return is_exception_n(intr_info, BP_VECTOR);
1616 }
1617
1618 static inline bool is_page_fault(u32 intr_info)
1619 {
1620         return is_exception_n(intr_info, PF_VECTOR);
1621 }
1622
1623 static inline bool is_invalid_opcode(u32 intr_info)
1624 {
1625         return is_exception_n(intr_info, UD_VECTOR);
1626 }
1627
1628 static inline bool is_gp_fault(u32 intr_info)
1629 {
1630         return is_exception_n(intr_info, GP_VECTOR);
1631 }
1632
1633 static inline bool is_machine_check(u32 intr_info)
1634 {
1635         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1636                              INTR_INFO_VALID_MASK)) ==
1637                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1638 }
1639
1640 /* Undocumented: icebp/int1 */
1641 static inline bool is_icebp(u32 intr_info)
1642 {
1643         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1644                 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1645 }
1646
1647 static inline bool cpu_has_vmx_msr_bitmap(void)
1648 {
1649         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1650 }
1651
1652 static inline bool cpu_has_vmx_tpr_shadow(void)
1653 {
1654         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1655 }
1656
1657 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1658 {
1659         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1660 }
1661
1662 static inline bool cpu_has_secondary_exec_ctrls(void)
1663 {
1664         return vmcs_config.cpu_based_exec_ctrl &
1665                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1666 }
1667
1668 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1669 {
1670         return vmcs_config.cpu_based_2nd_exec_ctrl &
1671                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1672 }
1673
1674 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1675 {
1676         return vmcs_config.cpu_based_2nd_exec_ctrl &
1677                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1678 }
1679
1680 static inline bool cpu_has_vmx_apic_register_virt(void)
1681 {
1682         return vmcs_config.cpu_based_2nd_exec_ctrl &
1683                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1684 }
1685
1686 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1687 {
1688         return vmcs_config.cpu_based_2nd_exec_ctrl &
1689                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1690 }
1691
1692 static inline bool cpu_has_vmx_encls_vmexit(void)
1693 {
1694         return vmcs_config.cpu_based_2nd_exec_ctrl &
1695                 SECONDARY_EXEC_ENCLS_EXITING;
1696 }
1697
1698 /*
1699  * Comment's format: document - errata name - stepping - processor name.
1700  * Refer from
1701  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1702  */
1703 static u32 vmx_preemption_cpu_tfms[] = {
1704 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1705 0x000206E6,
1706 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1707 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1708 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1709 0x00020652,
1710 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1711 0x00020655,
1712 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1713 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1714 /*
1715  * 320767.pdf - AAP86  - B1 -
1716  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1717  */
1718 0x000106E5,
1719 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1720 0x000106A0,
1721 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1722 0x000106A1,
1723 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1724 0x000106A4,
1725  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1726  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1727  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1728 0x000106A5,
1729 };
1730
1731 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1732 {
1733         u32 eax = cpuid_eax(0x00000001), i;
1734
1735         /* Clear the reserved bits */
1736         eax &= ~(0x3U << 14 | 0xfU << 28);
1737         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1738                 if (eax == vmx_preemption_cpu_tfms[i])
1739                         return true;
1740
1741         return false;
1742 }
1743
1744 static inline bool cpu_has_vmx_preemption_timer(void)
1745 {
1746         return vmcs_config.pin_based_exec_ctrl &
1747                 PIN_BASED_VMX_PREEMPTION_TIMER;
1748 }
1749
1750 static inline bool cpu_has_vmx_posted_intr(void)
1751 {
1752         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1753                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1754 }
1755
1756 static inline bool cpu_has_vmx_apicv(void)
1757 {
1758         return cpu_has_vmx_apic_register_virt() &&
1759                 cpu_has_vmx_virtual_intr_delivery() &&
1760                 cpu_has_vmx_posted_intr();
1761 }
1762
1763 static inline bool cpu_has_vmx_flexpriority(void)
1764 {
1765         return cpu_has_vmx_tpr_shadow() &&
1766                 cpu_has_vmx_virtualize_apic_accesses();
1767 }
1768
1769 static inline bool cpu_has_vmx_ept_execute_only(void)
1770 {
1771         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1772 }
1773
1774 static inline bool cpu_has_vmx_ept_2m_page(void)
1775 {
1776         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1777 }
1778
1779 static inline bool cpu_has_vmx_ept_1g_page(void)
1780 {
1781         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1782 }
1783
1784 static inline bool cpu_has_vmx_ept_4levels(void)
1785 {
1786         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1787 }
1788
1789 static inline bool cpu_has_vmx_ept_mt_wb(void)
1790 {
1791         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1792 }
1793
1794 static inline bool cpu_has_vmx_ept_5levels(void)
1795 {
1796         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1797 }
1798
1799 static inline bool cpu_has_vmx_ept_ad_bits(void)
1800 {
1801         return vmx_capability.ept & VMX_EPT_AD_BIT;
1802 }
1803
1804 static inline bool cpu_has_vmx_invept_context(void)
1805 {
1806         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1807 }
1808
1809 static inline bool cpu_has_vmx_invept_global(void)
1810 {
1811         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1812 }
1813
1814 static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1815 {
1816         return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1817 }
1818
1819 static inline bool cpu_has_vmx_invvpid_single(void)
1820 {
1821         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1822 }
1823
1824 static inline bool cpu_has_vmx_invvpid_global(void)
1825 {
1826         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1827 }
1828
1829 static inline bool cpu_has_vmx_invvpid(void)
1830 {
1831         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1832 }
1833
1834 static inline bool cpu_has_vmx_ept(void)
1835 {
1836         return vmcs_config.cpu_based_2nd_exec_ctrl &
1837                 SECONDARY_EXEC_ENABLE_EPT;
1838 }
1839
1840 static inline bool cpu_has_vmx_unrestricted_guest(void)
1841 {
1842         return vmcs_config.cpu_based_2nd_exec_ctrl &
1843                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1844 }
1845
1846 static inline bool cpu_has_vmx_ple(void)
1847 {
1848         return vmcs_config.cpu_based_2nd_exec_ctrl &
1849                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1850 }
1851
1852 static inline bool cpu_has_vmx_basic_inout(void)
1853 {
1854         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1855 }
1856
1857 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1858 {
1859         return flexpriority_enabled && lapic_in_kernel(vcpu);
1860 }
1861
1862 static inline bool cpu_has_vmx_vpid(void)
1863 {
1864         return vmcs_config.cpu_based_2nd_exec_ctrl &
1865                 SECONDARY_EXEC_ENABLE_VPID;
1866 }
1867
1868 static inline bool cpu_has_vmx_rdtscp(void)
1869 {
1870         return vmcs_config.cpu_based_2nd_exec_ctrl &
1871                 SECONDARY_EXEC_RDTSCP;
1872 }
1873
1874 static inline bool cpu_has_vmx_invpcid(void)
1875 {
1876         return vmcs_config.cpu_based_2nd_exec_ctrl &
1877                 SECONDARY_EXEC_ENABLE_INVPCID;
1878 }
1879
1880 static inline bool cpu_has_virtual_nmis(void)
1881 {
1882         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1883 }
1884
1885 static inline bool cpu_has_vmx_wbinvd_exit(void)
1886 {
1887         return vmcs_config.cpu_based_2nd_exec_ctrl &
1888                 SECONDARY_EXEC_WBINVD_EXITING;
1889 }
1890
1891 static inline bool cpu_has_vmx_shadow_vmcs(void)
1892 {
1893         u64 vmx_msr;
1894         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1895         /* check if the cpu supports writing r/o exit information fields */
1896         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1897                 return false;
1898
1899         return vmcs_config.cpu_based_2nd_exec_ctrl &
1900                 SECONDARY_EXEC_SHADOW_VMCS;
1901 }
1902
1903 static inline bool cpu_has_vmx_pml(void)
1904 {
1905         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1906 }
1907
1908 static inline bool cpu_has_vmx_tsc_scaling(void)
1909 {
1910         return vmcs_config.cpu_based_2nd_exec_ctrl &
1911                 SECONDARY_EXEC_TSC_SCALING;
1912 }
1913
1914 static inline bool cpu_has_vmx_vmfunc(void)
1915 {
1916         return vmcs_config.cpu_based_2nd_exec_ctrl &
1917                 SECONDARY_EXEC_ENABLE_VMFUNC;
1918 }
1919
1920 static bool vmx_umip_emulated(void)
1921 {
1922         return vmcs_config.cpu_based_2nd_exec_ctrl &
1923                 SECONDARY_EXEC_DESC;
1924 }
1925
1926 static inline bool report_flexpriority(void)
1927 {
1928         return flexpriority_enabled;
1929 }
1930
1931 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1932 {
1933         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1934 }
1935
1936 /*
1937  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1938  * to modify any valid field of the VMCS, or are the VM-exit
1939  * information fields read-only?
1940  */
1941 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1942 {
1943         return to_vmx(vcpu)->nested.msrs.misc_low &
1944                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1945 }
1946
1947 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
1948 {
1949         return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
1950 }
1951
1952 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
1953 {
1954         return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
1955                         CPU_BASED_MONITOR_TRAP_FLAG;
1956 }
1957
1958 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
1959 {
1960         return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
1961                 SECONDARY_EXEC_SHADOW_VMCS;
1962 }
1963
1964 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1965 {
1966         return vmcs12->cpu_based_vm_exec_control & bit;
1967 }
1968
1969 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1970 {
1971         return (vmcs12->cpu_based_vm_exec_control &
1972                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1973                 (vmcs12->secondary_vm_exec_control & bit);
1974 }
1975
1976 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1977 {
1978         return vmcs12->pin_based_vm_exec_control &
1979                 PIN_BASED_VMX_PREEMPTION_TIMER;
1980 }
1981
1982 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1983 {
1984         return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1985 }
1986
1987 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1988 {
1989         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1990 }
1991
1992 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1993 {
1994         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1995 }
1996
1997 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1998 {
1999         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
2000 }
2001
2002 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
2003 {
2004         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
2005 }
2006
2007 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
2008 {
2009         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
2010 }
2011
2012 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
2013 {
2014         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
2015 }
2016
2017 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
2018 {
2019         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
2020 }
2021
2022 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
2023 {
2024         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2025 }
2026
2027 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
2028 {
2029         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
2030 }
2031
2032 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
2033 {
2034         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
2035 }
2036
2037 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
2038 {
2039         return nested_cpu_has_vmfunc(vmcs12) &&
2040                 (vmcs12->vm_function_control &
2041                  VMX_VMFUNC_EPTP_SWITCHING);
2042 }
2043
2044 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
2045 {
2046         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
2047 }
2048
2049 static inline bool is_nmi(u32 intr_info)
2050 {
2051         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
2052                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
2053 }
2054
2055 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
2056                               u32 exit_intr_info,
2057                               unsigned long exit_qualification);
2058
2059 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
2060 {
2061         int i;
2062
2063         for (i = 0; i < vmx->nmsrs; ++i)
2064                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
2065                         return i;
2066         return -1;
2067 }
2068
2069 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
2070 {
2071     struct {
2072         u64 vpid : 16;
2073         u64 rsvd : 48;
2074         u64 gva;
2075     } operand = { vpid, 0, gva };
2076     bool error;
2077
2078     asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
2079                   : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
2080                   : "memory");
2081     BUG_ON(error);
2082 }
2083
2084 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
2085 {
2086         struct {
2087                 u64 eptp, gpa;
2088         } operand = {eptp, gpa};
2089         bool error;
2090
2091         asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
2092                       : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
2093                       : "memory");
2094         BUG_ON(error);
2095 }
2096
2097 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
2098 {
2099         int i;
2100
2101         i = __find_msr_index(vmx, msr);
2102         if (i >= 0)
2103                 return &vmx->guest_msrs[i];
2104         return NULL;
2105 }
2106
2107 static void vmcs_clear(struct vmcs *vmcs)
2108 {
2109         u64 phys_addr = __pa(vmcs);
2110         bool error;
2111
2112         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
2113                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2114                       : "memory");
2115         if (unlikely(error))
2116                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
2117                        vmcs, phys_addr);
2118 }
2119
2120 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
2121 {
2122         vmcs_clear(loaded_vmcs->vmcs);
2123         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
2124                 vmcs_clear(loaded_vmcs->shadow_vmcs);
2125         loaded_vmcs->cpu = -1;
2126         loaded_vmcs->launched = 0;
2127 }
2128
2129 static void vmcs_load(struct vmcs *vmcs)
2130 {
2131         u64 phys_addr = __pa(vmcs);
2132         bool error;
2133
2134         if (static_branch_unlikely(&enable_evmcs))
2135                 return evmcs_load(phys_addr);
2136
2137         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
2138                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2139                       : "memory");
2140         if (unlikely(error))
2141                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
2142                        vmcs, phys_addr);
2143 }
2144
2145 #ifdef CONFIG_KEXEC_CORE
2146 /*
2147  * This bitmap is used to indicate whether the vmclear
2148  * operation is enabled on all cpus. All disabled by
2149  * default.
2150  */
2151 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
2152
2153 static inline void crash_enable_local_vmclear(int cpu)
2154 {
2155         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
2156 }
2157
2158 static inline void crash_disable_local_vmclear(int cpu)
2159 {
2160         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
2161 }
2162
2163 static inline int crash_local_vmclear_enabled(int cpu)
2164 {
2165         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
2166 }
2167
2168 static void crash_vmclear_local_loaded_vmcss(void)
2169 {
2170         int cpu = raw_smp_processor_id();
2171         struct loaded_vmcs *v;
2172
2173         if (!crash_local_vmclear_enabled(cpu))
2174                 return;
2175
2176         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
2177                             loaded_vmcss_on_cpu_link)
2178                 vmcs_clear(v->vmcs);
2179 }
2180 #else
2181 static inline void crash_enable_local_vmclear(int cpu) { }
2182 static inline void crash_disable_local_vmclear(int cpu) { }
2183 #endif /* CONFIG_KEXEC_CORE */
2184
2185 static void __loaded_vmcs_clear(void *arg)
2186 {
2187         struct loaded_vmcs *loaded_vmcs = arg;
2188         int cpu = raw_smp_processor_id();
2189
2190         if (loaded_vmcs->cpu != cpu)
2191                 return; /* vcpu migration can race with cpu offline */
2192         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
2193                 per_cpu(current_vmcs, cpu) = NULL;
2194         crash_disable_local_vmclear(cpu);
2195         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
2196
2197         /*
2198          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
2199          * is before setting loaded_vmcs->vcpu to -1 which is done in
2200          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
2201          * then adds the vmcs into percpu list before it is deleted.
2202          */
2203         smp_wmb();
2204
2205         loaded_vmcs_init(loaded_vmcs);
2206         crash_enable_local_vmclear(cpu);
2207 }
2208
2209 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
2210 {
2211         int cpu = loaded_vmcs->cpu;
2212
2213         if (cpu != -1)
2214                 smp_call_function_single(cpu,
2215                          __loaded_vmcs_clear, loaded_vmcs, 1);
2216 }
2217
2218 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
2219 {
2220         if (vpid == 0)
2221                 return true;
2222
2223         if (cpu_has_vmx_invvpid_individual_addr()) {
2224                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
2225                 return true;
2226         }
2227
2228         return false;
2229 }
2230
2231 static inline void vpid_sync_vcpu_single(int vpid)
2232 {
2233         if (vpid == 0)
2234                 return;
2235
2236         if (cpu_has_vmx_invvpid_single())
2237                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
2238 }
2239
2240 static inline void vpid_sync_vcpu_global(void)
2241 {
2242         if (cpu_has_vmx_invvpid_global())
2243                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
2244 }
2245
2246 static inline void vpid_sync_context(int vpid)
2247 {
2248         if (cpu_has_vmx_invvpid_single())
2249                 vpid_sync_vcpu_single(vpid);
2250         else
2251                 vpid_sync_vcpu_global();
2252 }
2253
2254 static inline void ept_sync_global(void)
2255 {
2256         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
2257 }
2258
2259 static inline void ept_sync_context(u64 eptp)
2260 {
2261         if (cpu_has_vmx_invept_context())
2262                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
2263         else
2264                 ept_sync_global();
2265 }
2266
2267 static __always_inline void vmcs_check16(unsigned long field)
2268 {
2269         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2270                          "16-bit accessor invalid for 64-bit field");
2271         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2272                          "16-bit accessor invalid for 64-bit high field");
2273         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2274                          "16-bit accessor invalid for 32-bit high field");
2275         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2276                          "16-bit accessor invalid for natural width field");
2277 }
2278
2279 static __always_inline void vmcs_check32(unsigned long field)
2280 {
2281         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2282                          "32-bit accessor invalid for 16-bit field");
2283         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2284                          "32-bit accessor invalid for natural width field");
2285 }
2286
2287 static __always_inline void vmcs_check64(unsigned long field)
2288 {
2289         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2290                          "64-bit accessor invalid for 16-bit field");
2291         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2292                          "64-bit accessor invalid for 64-bit high field");
2293         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2294                          "64-bit accessor invalid for 32-bit field");
2295         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2296                          "64-bit accessor invalid for natural width field");
2297 }
2298
2299 static __always_inline void vmcs_checkl(unsigned long field)
2300 {
2301         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2302                          "Natural width accessor invalid for 16-bit field");
2303         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2304                          "Natural width accessor invalid for 64-bit field");
2305         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2306                          "Natural width accessor invalid for 64-bit high field");
2307         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2308                          "Natural width accessor invalid for 32-bit field");
2309 }
2310
2311 static __always_inline unsigned long __vmcs_readl(unsigned long field)
2312 {
2313         unsigned long value;
2314
2315         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2316                       : "=a"(value) : "d"(field) : "cc");
2317         return value;
2318 }
2319
2320 static __always_inline u16 vmcs_read16(unsigned long field)
2321 {
2322         vmcs_check16(field);
2323         if (static_branch_unlikely(&enable_evmcs))
2324                 return evmcs_read16(field);
2325         return __vmcs_readl(field);
2326 }
2327
2328 static __always_inline u32 vmcs_read32(unsigned long field)
2329 {
2330         vmcs_check32(field);
2331         if (static_branch_unlikely(&enable_evmcs))
2332                 return evmcs_read32(field);
2333         return __vmcs_readl(field);
2334 }
2335
2336 static __always_inline u64 vmcs_read64(unsigned long field)
2337 {
2338         vmcs_check64(field);
2339         if (static_branch_unlikely(&enable_evmcs))
2340                 return evmcs_read64(field);
2341 #ifdef CONFIG_X86_64
2342         return __vmcs_readl(field);
2343 #else
2344         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
2345 #endif
2346 }
2347
2348 static __always_inline unsigned long vmcs_readl(unsigned long field)
2349 {
2350         vmcs_checkl(field);
2351         if (static_branch_unlikely(&enable_evmcs))
2352                 return evmcs_read64(field);
2353         return __vmcs_readl(field);
2354 }
2355
2356 static noinline void vmwrite_error(unsigned long field, unsigned long value)
2357 {
2358         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2359                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2360         dump_stack();
2361 }
2362
2363 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
2364 {
2365         bool error;
2366
2367         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
2368                       : CC_OUT(na) (error) : "a"(value), "d"(field));
2369         if (unlikely(error))
2370                 vmwrite_error(field, value);
2371 }
2372
2373 static __always_inline void vmcs_write16(unsigned long field, u16 value)
2374 {
2375         vmcs_check16(field);
2376         if (static_branch_unlikely(&enable_evmcs))
2377                 return evmcs_write16(field, value);
2378
2379         __vmcs_writel(field, value);
2380 }
2381
2382 static __always_inline void vmcs_write32(unsigned long field, u32 value)
2383 {
2384         vmcs_check32(field);
2385         if (static_branch_unlikely(&enable_evmcs))
2386                 return evmcs_write32(field, value);
2387
2388         __vmcs_writel(field, value);
2389 }
2390
2391 static __always_inline void vmcs_write64(unsigned long field, u64 value)
2392 {
2393         vmcs_check64(field);
2394         if (static_branch_unlikely(&enable_evmcs))
2395                 return evmcs_write64(field, value);
2396
2397         __vmcs_writel(field, value);
2398 #ifndef CONFIG_X86_64
2399         asm volatile ("");
2400         __vmcs_writel(field+1, value >> 32);
2401 #endif
2402 }
2403
2404 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
2405 {
2406         vmcs_checkl(field);
2407         if (static_branch_unlikely(&enable_evmcs))
2408                 return evmcs_write64(field, value);
2409
2410         __vmcs_writel(field, value);
2411 }
2412
2413 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
2414 {
2415         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2416                          "vmcs_clear_bits does not support 64-bit fields");
2417         if (static_branch_unlikely(&enable_evmcs))
2418                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2419
2420         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2421 }
2422
2423 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2424 {
2425         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2426                          "vmcs_set_bits does not support 64-bit fields");
2427         if (static_branch_unlikely(&enable_evmcs))
2428                 return evmcs_write32(field, evmcs_read32(field) | mask);
2429
2430         __vmcs_writel(field, __vmcs_readl(field) | mask);
2431 }
2432
2433 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2434 {
2435         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2436 }
2437
2438 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2439 {
2440         vmcs_write32(VM_ENTRY_CONTROLS, val);
2441         vmx->vm_entry_controls_shadow = val;
2442 }
2443
2444 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2445 {
2446         if (vmx->vm_entry_controls_shadow != val)
2447                 vm_entry_controls_init(vmx, val);
2448 }
2449
2450 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2451 {
2452         return vmx->vm_entry_controls_shadow;
2453 }
2454
2455
2456 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2457 {
2458         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2459 }
2460
2461 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2462 {
2463         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2464 }
2465
2466 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2467 {
2468         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2469 }
2470
2471 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2472 {
2473         vmcs_write32(VM_EXIT_CONTROLS, val);
2474         vmx->vm_exit_controls_shadow = val;
2475 }
2476
2477 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2478 {
2479         if (vmx->vm_exit_controls_shadow != val)
2480                 vm_exit_controls_init(vmx, val);
2481 }
2482
2483 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2484 {
2485         return vmx->vm_exit_controls_shadow;
2486 }
2487
2488
2489 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2490 {
2491         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2492 }
2493
2494 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2495 {
2496         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2497 }
2498
2499 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2500 {
2501         vmx->segment_cache.bitmask = 0;
2502 }
2503
2504 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2505                                        unsigned field)
2506 {
2507         bool ret;
2508         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2509
2510         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2511                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2512                 vmx->segment_cache.bitmask = 0;
2513         }
2514         ret = vmx->segment_cache.bitmask & mask;
2515         vmx->segment_cache.bitmask |= mask;
2516         return ret;
2517 }
2518
2519 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2520 {
2521         u16 *p = &vmx->segment_cache.seg[seg].selector;
2522
2523         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2524                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2525         return *p;
2526 }
2527
2528 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2529 {
2530         ulong *p = &vmx->segment_cache.seg[seg].base;
2531
2532         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2533                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2534         return *p;
2535 }
2536
2537 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2538 {
2539         u32 *p = &vmx->segment_cache.seg[seg].limit;
2540
2541         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2542                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2543         return *p;
2544 }
2545
2546 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2547 {
2548         u32 *p = &vmx->segment_cache.seg[seg].ar;
2549
2550         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2551                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2552         return *p;
2553 }
2554
2555 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2556 {
2557         u32 eb;
2558
2559         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2560              (1u << DB_VECTOR) | (1u << AC_VECTOR);
2561         /*
2562          * Guest access to VMware backdoor ports could legitimately
2563          * trigger #GP because of TSS I/O permission bitmap.
2564          * We intercept those #GP and allow access to them anyway
2565          * as VMware does.
2566          */
2567         if (enable_vmware_backdoor)
2568                 eb |= (1u << GP_VECTOR);
2569         if ((vcpu->guest_debug &
2570              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2571             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2572                 eb |= 1u << BP_VECTOR;
2573         if (to_vmx(vcpu)->rmode.vm86_active)
2574                 eb = ~0;
2575         if (enable_ept)
2576                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2577
2578         /* When we are running a nested L2 guest and L1 specified for it a
2579          * certain exception bitmap, we must trap the same exceptions and pass
2580          * them to L1. When running L2, we will only handle the exceptions
2581          * specified above if L1 did not want them.
2582          */
2583         if (is_guest_mode(vcpu))
2584                 eb |= get_vmcs12(vcpu)->exception_bitmap;
2585
2586         vmcs_write32(EXCEPTION_BITMAP, eb);
2587 }
2588
2589 /*
2590  * Check if MSR is intercepted for currently loaded MSR bitmap.
2591  */
2592 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2593 {
2594         unsigned long *msr_bitmap;
2595         int f = sizeof(unsigned long);
2596
2597         if (!cpu_has_vmx_msr_bitmap())
2598                 return true;
2599
2600         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2601
2602         if (msr <= 0x1fff) {
2603                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2604         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2605                 msr &= 0x1fff;
2606                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2607         }
2608
2609         return true;
2610 }
2611
2612 /*
2613  * Check if MSR is intercepted for L01 MSR bitmap.
2614  */
2615 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2616 {
2617         unsigned long *msr_bitmap;
2618         int f = sizeof(unsigned long);
2619
2620         if (!cpu_has_vmx_msr_bitmap())
2621                 return true;
2622
2623         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2624
2625         if (msr <= 0x1fff) {
2626                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2627         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2628                 msr &= 0x1fff;
2629                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2630         }
2631
2632         return true;
2633 }
2634
2635 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2636                 unsigned long entry, unsigned long exit)
2637 {
2638         vm_entry_controls_clearbit(vmx, entry);
2639         vm_exit_controls_clearbit(vmx, exit);
2640 }
2641
2642 static int find_msr(struct vmx_msrs *m, unsigned int msr)
2643 {
2644         unsigned int i;
2645
2646         for (i = 0; i < m->nr; ++i) {
2647                 if (m->val[i].index == msr)
2648                         return i;
2649         }
2650         return -ENOENT;
2651 }
2652
2653 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2654 {
2655         int i;
2656         struct msr_autoload *m = &vmx->msr_autoload;
2657
2658         switch (msr) {
2659         case MSR_EFER:
2660                 if (cpu_has_load_ia32_efer) {
2661                         clear_atomic_switch_msr_special(vmx,
2662                                         VM_ENTRY_LOAD_IA32_EFER,
2663                                         VM_EXIT_LOAD_IA32_EFER);
2664                         return;
2665                 }
2666                 break;
2667         case MSR_CORE_PERF_GLOBAL_CTRL:
2668                 if (cpu_has_load_perf_global_ctrl) {
2669                         clear_atomic_switch_msr_special(vmx,
2670                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2671                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2672                         return;
2673                 }
2674                 break;
2675         }
2676         i = find_msr(&m->guest, msr);
2677         if (i < 0)
2678                 goto skip_guest;
2679         --m->guest.nr;
2680         m->guest.val[i] = m->guest.val[m->guest.nr];
2681         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2682
2683 skip_guest:
2684         i = find_msr(&m->host, msr);
2685         if (i < 0)
2686                 return;
2687
2688         --m->host.nr;
2689         m->host.val[i] = m->host.val[m->host.nr];
2690         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2691 }
2692
2693 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2694                 unsigned long entry, unsigned long exit,
2695                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2696                 u64 guest_val, u64 host_val)
2697 {
2698         vmcs_write64(guest_val_vmcs, guest_val);
2699         vmcs_write64(host_val_vmcs, host_val);
2700         vm_entry_controls_setbit(vmx, entry);
2701         vm_exit_controls_setbit(vmx, exit);
2702 }
2703
2704 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2705                                   u64 guest_val, u64 host_val, bool entry_only)
2706 {
2707         int i, j = 0;
2708         struct msr_autoload *m = &vmx->msr_autoload;
2709
2710         switch (msr) {
2711         case MSR_EFER:
2712                 if (cpu_has_load_ia32_efer) {
2713                         add_atomic_switch_msr_special(vmx,
2714                                         VM_ENTRY_LOAD_IA32_EFER,
2715                                         VM_EXIT_LOAD_IA32_EFER,
2716                                         GUEST_IA32_EFER,
2717                                         HOST_IA32_EFER,
2718                                         guest_val, host_val);
2719                         return;
2720                 }
2721                 break;
2722         case MSR_CORE_PERF_GLOBAL_CTRL:
2723                 if (cpu_has_load_perf_global_ctrl) {
2724                         add_atomic_switch_msr_special(vmx,
2725                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2726                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2727                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2728                                         HOST_IA32_PERF_GLOBAL_CTRL,
2729                                         guest_val, host_val);
2730                         return;
2731                 }
2732                 break;
2733         case MSR_IA32_PEBS_ENABLE:
2734                 /* PEBS needs a quiescent period after being disabled (to write
2735                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2736                  * provide that period, so a CPU could write host's record into
2737                  * guest's memory.
2738                  */
2739                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2740         }
2741
2742         i = find_msr(&m->guest, msr);
2743         if (!entry_only)
2744                 j = find_msr(&m->host, msr);
2745
2746         if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
2747                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2748                                 "Can't add msr %x\n", msr);
2749                 return;
2750         }
2751         if (i < 0) {
2752                 i = m->guest.nr++;
2753                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2754         }
2755         m->guest.val[i].index = msr;
2756         m->guest.val[i].value = guest_val;
2757
2758         if (entry_only)
2759                 return;
2760
2761         if (j < 0) {
2762                 j = m->host.nr++;
2763                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2764         }
2765         m->host.val[j].index = msr;
2766         m->host.val[j].value = host_val;
2767 }
2768
2769 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2770 {
2771         u64 guest_efer = vmx->vcpu.arch.efer;
2772         u64 ignore_bits = 0;
2773
2774         if (!enable_ept) {
2775                 /*
2776                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2777                  * host CPUID is more efficient than testing guest CPUID
2778                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2779                  */
2780                 if (boot_cpu_has(X86_FEATURE_SMEP))
2781                         guest_efer |= EFER_NX;
2782                 else if (!(guest_efer & EFER_NX))
2783                         ignore_bits |= EFER_NX;
2784         }
2785
2786         /*
2787          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2788          */
2789         ignore_bits |= EFER_SCE;
2790 #ifdef CONFIG_X86_64
2791         ignore_bits |= EFER_LMA | EFER_LME;
2792         /* SCE is meaningful only in long mode on Intel */
2793         if (guest_efer & EFER_LMA)
2794                 ignore_bits &= ~(u64)EFER_SCE;
2795 #endif
2796
2797         /*
2798          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2799          * On CPUs that support "load IA32_EFER", always switch EFER
2800          * atomically, since it's faster than switching it manually.
2801          */
2802         if (cpu_has_load_ia32_efer ||
2803             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2804                 if (!(guest_efer & EFER_LMA))
2805                         guest_efer &= ~EFER_LME;
2806                 if (guest_efer != host_efer)
2807                         add_atomic_switch_msr(vmx, MSR_EFER,
2808                                               guest_efer, host_efer, false);
2809                 else
2810                         clear_atomic_switch_msr(vmx, MSR_EFER);
2811                 return false;
2812         } else {
2813                 clear_atomic_switch_msr(vmx, MSR_EFER);
2814
2815                 guest_efer &= ~ignore_bits;
2816                 guest_efer |= host_efer & ignore_bits;
2817
2818                 vmx->guest_msrs[efer_offset].data = guest_efer;
2819                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2820
2821                 return true;
2822         }
2823 }
2824
2825 #ifdef CONFIG_X86_32
2826 /*
2827  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2828  * VMCS rather than the segment table.  KVM uses this helper to figure
2829  * out the current bases to poke them into the VMCS before entry.
2830  */
2831 static unsigned long segment_base(u16 selector)
2832 {
2833         struct desc_struct *table;
2834         unsigned long v;
2835
2836         if (!(selector & ~SEGMENT_RPL_MASK))
2837                 return 0;
2838
2839         table = get_current_gdt_ro();
2840
2841         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2842                 u16 ldt_selector = kvm_read_ldt();
2843
2844                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2845                         return 0;
2846
2847                 table = (struct desc_struct *)segment_base(ldt_selector);
2848         }
2849         v = get_desc_base(&table[selector >> 3]);
2850         return v;
2851 }
2852 #endif
2853
2854 static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2855 {
2856         struct vcpu_vmx *vmx = to_vmx(vcpu);
2857         struct vmcs_host_state *host_state;
2858 #ifdef CONFIG_X86_64
2859         int cpu = raw_smp_processor_id();
2860 #endif
2861         unsigned long fs_base, gs_base;
2862         u16 fs_sel, gs_sel;
2863         int i;
2864
2865         vmx->req_immediate_exit = false;
2866
2867         if (vmx->loaded_cpu_state)
2868                 return;
2869
2870         vmx->loaded_cpu_state = vmx->loaded_vmcs;
2871         host_state = &vmx->loaded_cpu_state->host_state;
2872
2873         /*
2874          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2875          * allow segment selectors with cpl > 0 or ti == 1.
2876          */
2877         host_state->ldt_sel = kvm_read_ldt();
2878
2879 #ifdef CONFIG_X86_64
2880         savesegment(ds, host_state->ds_sel);
2881         savesegment(es, host_state->es_sel);
2882
2883         gs_base = cpu_kernelmode_gs_base(cpu);
2884         if (likely(is_64bit_mm(current->mm))) {
2885                 save_fsgs_for_kvm();
2886                 fs_sel = current->thread.fsindex;
2887                 gs_sel = current->thread.gsindex;
2888                 fs_base = current->thread.fsbase;
2889                 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2890         } else {
2891                 savesegment(fs, fs_sel);
2892                 savesegment(gs, gs_sel);
2893                 fs_base = read_msr(MSR_FS_BASE);
2894                 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
2895         }
2896
2897         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2898 #else
2899         savesegment(fs, fs_sel);
2900         savesegment(gs, gs_sel);
2901         fs_base = segment_base(fs_sel);
2902         gs_base = segment_base(gs_sel);
2903 #endif
2904
2905         if (unlikely(fs_sel != host_state->fs_sel)) {
2906                 if (!(fs_sel & 7))
2907                         vmcs_write16(HOST_FS_SELECTOR, fs_sel);
2908                 else
2909                         vmcs_write16(HOST_FS_SELECTOR, 0);
2910                 host_state->fs_sel = fs_sel;
2911         }
2912         if (unlikely(gs_sel != host_state->gs_sel)) {
2913                 if (!(gs_sel & 7))
2914                         vmcs_write16(HOST_GS_SELECTOR, gs_sel);
2915                 else
2916                         vmcs_write16(HOST_GS_SELECTOR, 0);
2917                 host_state->gs_sel = gs_sel;
2918         }
2919         if (unlikely(fs_base != host_state->fs_base)) {
2920                 vmcs_writel(HOST_FS_BASE, fs_base);
2921                 host_state->fs_base = fs_base;
2922         }
2923         if (unlikely(gs_base != host_state->gs_base)) {
2924                 vmcs_writel(HOST_GS_BASE, gs_base);
2925                 host_state->gs_base = gs_base;
2926         }
2927
2928         for (i = 0; i < vmx->save_nmsrs; ++i)
2929                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2930                                    vmx->guest_msrs[i].data,
2931                                    vmx->guest_msrs[i].mask);
2932 }
2933
2934 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2935 {
2936         struct vmcs_host_state *host_state;
2937
2938         if (!vmx->loaded_cpu_state)
2939                 return;
2940
2941         WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
2942         host_state = &vmx->loaded_cpu_state->host_state;
2943
2944         ++vmx->vcpu.stat.host_state_reload;
2945         vmx->loaded_cpu_state = NULL;
2946
2947 #ifdef CONFIG_X86_64
2948         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2949 #endif
2950         if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
2951                 kvm_load_ldt(host_state->ldt_sel);
2952 #ifdef CONFIG_X86_64
2953                 load_gs_index(host_state->gs_sel);
2954 #else
2955                 loadsegment(gs, host_state->gs_sel);
2956 #endif
2957         }
2958         if (host_state->fs_sel & 7)
2959                 loadsegment(fs, host_state->fs_sel);
2960 #ifdef CONFIG_X86_64
2961         if (unlikely(host_state->ds_sel | host_state->es_sel)) {
2962                 loadsegment(ds, host_state->ds_sel);
2963                 loadsegment(es, host_state->es_sel);
2964         }
2965 #endif
2966         invalidate_tss_limit();
2967 #ifdef CONFIG_X86_64
2968         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2969 #endif
2970         load_fixmap_gdt(raw_smp_processor_id());
2971 }
2972
2973 #ifdef CONFIG_X86_64
2974 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
2975 {
2976         preempt_disable();
2977         if (vmx->loaded_cpu_state)
2978                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2979         preempt_enable();
2980         return vmx->msr_guest_kernel_gs_base;
2981 }
2982
2983 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
2984 {
2985         preempt_disable();
2986         if (vmx->loaded_cpu_state)
2987                 wrmsrl(MSR_KERNEL_GS_BASE, data);
2988         preempt_enable();
2989         vmx->msr_guest_kernel_gs_base = data;
2990 }
2991 #endif
2992
2993 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2994 {
2995         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2996         struct pi_desc old, new;
2997         unsigned int dest;
2998
2999         /*
3000          * In case of hot-plug or hot-unplug, we may have to undo
3001          * vmx_vcpu_pi_put even if there is no assigned device.  And we
3002          * always keep PI.NDST up to date for simplicity: it makes the
3003          * code easier, and CPU migration is not a fast path.
3004          */
3005         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
3006                 return;
3007
3008         /*
3009          * First handle the simple case where no cmpxchg is necessary; just
3010          * allow posting non-urgent interrupts.
3011          *
3012          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
3013          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
3014          * expects the VCPU to be on the blocked_vcpu_list that matches
3015          * PI.NDST.
3016          */
3017         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
3018             vcpu->cpu == cpu) {
3019                 pi_clear_sn(pi_desc);
3020                 return;
3021         }
3022
3023         /* The full case.  */
3024         do {
3025                 old.control = new.control = pi_desc->control;
3026
3027                 dest = cpu_physical_id(cpu);
3028
3029                 if (x2apic_enabled())
3030                         new.ndst = dest;
3031                 else
3032                         new.ndst = (dest << 8) & 0xFF00;
3033
3034                 new.sn = 0;
3035         } while (cmpxchg64(&pi_desc->control, old.control,
3036                            new.control) != old.control);
3037 }
3038
3039 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
3040 {
3041         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
3042         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
3043 }
3044
3045 /*
3046  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
3047  * vcpu mutex is already taken.
3048  */
3049 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3050 {
3051         struct vcpu_vmx *vmx = to_vmx(vcpu);
3052         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
3053
3054         if (!already_loaded) {
3055                 loaded_vmcs_clear(vmx->loaded_vmcs);
3056                 local_irq_disable();
3057                 crash_disable_local_vmclear(cpu);
3058
3059                 /*
3060                  * Read loaded_vmcs->cpu should be before fetching
3061                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
3062                  * See the comments in __loaded_vmcs_clear().
3063                  */
3064                 smp_rmb();
3065
3066                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
3067                          &per_cpu(loaded_vmcss_on_cpu, cpu));
3068                 crash_enable_local_vmclear(cpu);
3069                 local_irq_enable();
3070         }
3071
3072         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
3073                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
3074                 vmcs_load(vmx->loaded_vmcs->vmcs);
3075                 indirect_branch_prediction_barrier();
3076         }
3077
3078         if (!already_loaded) {
3079                 void *gdt = get_current_gdt_ro();
3080                 unsigned long sysenter_esp;
3081
3082                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3083
3084                 /*
3085                  * Linux uses per-cpu TSS and GDT, so set these when switching
3086                  * processors.  See 22.2.4.
3087                  */
3088                 vmcs_writel(HOST_TR_BASE,
3089                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
3090                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
3091
3092                 /*
3093                  * VM exits change the host TR limit to 0x67 after a VM
3094                  * exit.  This is okay, since 0x67 covers everything except
3095                  * the IO bitmap and have have code to handle the IO bitmap
3096                  * being lost after a VM exit.
3097                  */
3098                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
3099
3100                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
3101                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
3102
3103                 vmx->loaded_vmcs->cpu = cpu;
3104         }
3105
3106         /* Setup TSC multiplier */
3107         if (kvm_has_tsc_control &&
3108             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
3109                 decache_tsc_multiplier(vmx);
3110
3111         vmx_vcpu_pi_load(vcpu, cpu);
3112         vmx->host_pkru = read_pkru();
3113         vmx->host_debugctlmsr = get_debugctlmsr();
3114 }
3115
3116 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
3117 {
3118         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
3119
3120         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
3121                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
3122                 !kvm_vcpu_apicv_active(vcpu))
3123                 return;
3124
3125         /* Set SN when the vCPU is preempted */
3126         if (vcpu->preempted)
3127                 pi_set_sn(pi_desc);
3128 }
3129
3130 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
3131 {
3132         vmx_vcpu_pi_put(vcpu);
3133
3134         vmx_prepare_switch_to_host(to_vmx(vcpu));
3135 }
3136
3137 static bool emulation_required(struct kvm_vcpu *vcpu)
3138 {
3139         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3140 }
3141
3142 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
3143
3144 /*
3145  * Return the cr0 value that a nested guest would read. This is a combination
3146  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
3147  * its hypervisor (cr0_read_shadow).
3148  */
3149 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
3150 {
3151         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
3152                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
3153 }
3154 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
3155 {
3156         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
3157                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
3158 }
3159
3160 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
3161 {
3162         unsigned long rflags, save_rflags;
3163
3164         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
3165                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3166                 rflags = vmcs_readl(GUEST_RFLAGS);
3167                 if (to_vmx(vcpu)->rmode.vm86_active) {
3168                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3169                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
3170                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3171                 }
3172                 to_vmx(vcpu)->rflags = rflags;
3173         }
3174         return to_vmx(vcpu)->rflags;
3175 }
3176
3177 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
3178 {
3179         unsigned long old_rflags = vmx_get_rflags(vcpu);
3180
3181         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3182         to_vmx(vcpu)->rflags = rflags;
3183         if (to_vmx(vcpu)->rmode.vm86_active) {
3184                 to_vmx(vcpu)->rmode.save_rflags = rflags;
3185                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3186         }
3187         vmcs_writel(GUEST_RFLAGS, rflags);
3188
3189         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
3190                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
3191 }
3192
3193 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
3194 {
3195         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3196         int ret = 0;
3197
3198         if (interruptibility & GUEST_INTR_STATE_STI)
3199                 ret |= KVM_X86_SHADOW_INT_STI;
3200         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
3201                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
3202
3203         return ret;
3204 }
3205
3206 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
3207 {
3208         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3209         u32 interruptibility = interruptibility_old;
3210
3211         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
3212
3213         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
3214                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
3215         else if (mask & KVM_X86_SHADOW_INT_STI)
3216                 interruptibility |= GUEST_INTR_STATE_STI;
3217
3218         if ((interruptibility != interruptibility_old))
3219                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
3220 }
3221
3222 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
3223 {
3224         unsigned long rip;
3225
3226         rip = kvm_rip_read(vcpu);
3227         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3228         kvm_rip_write(vcpu, rip);
3229
3230         /* skipping an emulated instruction also counts */
3231         vmx_set_interrupt_shadow(vcpu, 0);
3232 }
3233
3234 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3235                                                unsigned long exit_qual)
3236 {
3237         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3238         unsigned int nr = vcpu->arch.exception.nr;
3239         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3240
3241         if (vcpu->arch.exception.has_error_code) {
3242                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3243                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3244         }
3245
3246         if (kvm_exception_is_soft(nr))
3247                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3248         else
3249                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3250
3251         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3252             vmx_get_nmi_mask(vcpu))
3253                 intr_info |= INTR_INFO_UNBLOCK_NMI;
3254
3255         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3256 }
3257
3258 /*
3259  * KVM wants to inject page-faults which it got to the guest. This function
3260  * checks whether in a nested guest, we need to inject them to L1 or L2.
3261  */
3262 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
3263 {
3264         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3265         unsigned int nr = vcpu->arch.exception.nr;
3266
3267         if (nr == PF_VECTOR) {
3268                 if (vcpu->arch.exception.nested_apf) {
3269                         *exit_qual = vcpu->arch.apf.nested_apf_token;
3270                         return 1;
3271                 }
3272                 /*
3273                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
3274                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
3275                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
3276                  * can be written only when inject_pending_event runs.  This should be
3277                  * conditional on a new capability---if the capability is disabled,
3278                  * kvm_multiple_exception would write the ancillary information to
3279                  * CR2 or DR6, for backwards ABI-compatibility.
3280                  */
3281                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
3282                                                     vcpu->arch.exception.error_code)) {
3283                         *exit_qual = vcpu->arch.cr2;
3284                         return 1;
3285                 }
3286         } else {
3287                 if (vmcs12->exception_bitmap & (1u << nr)) {
3288                         if (nr == DB_VECTOR) {
3289                                 *exit_qual = vcpu->arch.dr6;
3290                                 *exit_qual &= ~(DR6_FIXED_1 | DR6_BT);
3291                                 *exit_qual ^= DR6_RTM;
3292                         } else {
3293                                 *exit_qual = 0;
3294                         }
3295                         return 1;
3296                 }
3297         }
3298
3299         return 0;
3300 }
3301
3302 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
3303 {
3304         /*
3305          * Ensure that we clear the HLT state in the VMCS.  We don't need to
3306          * explicitly skip the instruction because if the HLT state is set,
3307          * then the instruction is already executing and RIP has already been
3308          * advanced.
3309          */
3310         if (kvm_hlt_in_guest(vcpu->kvm) &&
3311                         vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
3312                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3313 }
3314
3315 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
3316 {
3317         struct vcpu_vmx *vmx = to_vmx(vcpu);
3318         unsigned nr = vcpu->arch.exception.nr;
3319         bool has_error_code = vcpu->arch.exception.has_error_code;
3320         u32 error_code = vcpu->arch.exception.error_code;
3321         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3322
3323         if (has_error_code) {
3324                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
3325                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3326         }
3327
3328         if (vmx->rmode.vm86_active) {
3329                 int inc_eip = 0;
3330                 if (kvm_exception_is_soft(nr))
3331                         inc_eip = vcpu->arch.event_exit_inst_len;
3332                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
3333                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3334                 return;
3335         }
3336
3337         WARN_ON_ONCE(vmx->emulation_required);
3338
3339         if (kvm_exception_is_soft(nr)) {
3340                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3341                              vmx->vcpu.arch.event_exit_inst_len);
3342                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3343         } else
3344                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3345
3346         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
3347
3348         vmx_clear_hlt(vcpu);
3349 }
3350
3351 static bool vmx_rdtscp_supported(void)
3352 {
3353         return cpu_has_vmx_rdtscp();
3354 }
3355
3356 static bool vmx_invpcid_supported(void)
3357 {
3358         return cpu_has_vmx_invpcid();
3359 }
3360
3361 /*
3362  * Swap MSR entry in host/guest MSR entry array.
3363  */
3364 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
3365 {
3366         struct shared_msr_entry tmp;
3367
3368         tmp = vmx->guest_msrs[to];
3369         vmx->guest_msrs[to] = vmx->guest_msrs[from];
3370         vmx->guest_msrs[from] = tmp;
3371 }
3372
3373 /*
3374  * Set up the vmcs to automatically save and restore system
3375  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
3376  * mode, as fiddling with msrs is very expensive.
3377  */
3378 static void setup_msrs(struct vcpu_vmx *vmx)
3379 {
3380         int save_nmsrs, index;
3381
3382         save_nmsrs = 0;
3383 #ifdef CONFIG_X86_64
3384         if (is_long_mode(&vmx->vcpu)) {
3385                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
3386                 if (index >= 0)
3387                         move_msr_up(vmx, index, save_nmsrs++);
3388                 index = __find_msr_index(vmx, MSR_LSTAR);
3389                 if (index >= 0)
3390                         move_msr_up(vmx, index, save_nmsrs++);
3391                 index = __find_msr_index(vmx, MSR_CSTAR);
3392                 if (index >= 0)
3393                         move_msr_up(vmx, index, save_nmsrs++);
3394                 index = __find_msr_index(vmx, MSR_TSC_AUX);
3395                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
3396                         move_msr_up(vmx, index, save_nmsrs++);
3397                 /*
3398                  * MSR_STAR is only needed on long mode guests, and only
3399                  * if efer.sce is enabled.
3400                  */
3401                 index = __find_msr_index(vmx, MSR_STAR);
3402                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
3403                         move_msr_up(vmx, index, save_nmsrs++);
3404         }
3405 #endif
3406         index = __find_msr_index(vmx, MSR_EFER);
3407         if (index >= 0 && update_transition_efer(vmx, index))
3408                 move_msr_up(vmx, index, save_nmsrs++);
3409
3410         vmx->save_nmsrs = save_nmsrs;
3411
3412         if (cpu_has_vmx_msr_bitmap())
3413                 vmx_update_msr_bitmap(&vmx->vcpu);
3414 }
3415
3416 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3417 {
3418         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3419
3420         if (is_guest_mode(vcpu) &&
3421             (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3422                 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3423
3424         return vcpu->arch.tsc_offset;
3425 }
3426
3427 /*
3428  * writes 'offset' into guest's timestamp counter offset register
3429  */
3430 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3431 {
3432         if (is_guest_mode(vcpu)) {
3433                 /*
3434                  * We're here if L1 chose not to trap WRMSR to TSC. According
3435                  * to the spec, this should set L1's TSC; The offset that L1
3436                  * set for L2 remains unchanged, and still needs to be added
3437                  * to the newly set TSC to get L2's TSC.
3438                  */
3439                 struct vmcs12 *vmcs12;
3440                 /* recalculate vmcs02.TSC_OFFSET: */
3441                 vmcs12 = get_vmcs12(vcpu);
3442                 vmcs_write64(TSC_OFFSET, offset +
3443                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3444                          vmcs12->tsc_offset : 0));
3445         } else {
3446                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3447                                            vmcs_read64(TSC_OFFSET), offset);
3448                 vmcs_write64(TSC_OFFSET, offset);
3449         }
3450 }
3451
3452 /*
3453  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3454  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3455  * all guests if the "nested" module option is off, and can also be disabled
3456  * for a single guest by disabling its VMX cpuid bit.
3457  */
3458 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3459 {
3460         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
3461 }
3462
3463 /*
3464  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3465  * returned for the various VMX controls MSRs when nested VMX is enabled.
3466  * The same values should also be used to verify that vmcs12 control fields are
3467  * valid during nested entry from L1 to L2.
3468  * Each of these control msrs has a low and high 32-bit half: A low bit is on
3469  * if the corresponding bit in the (32-bit) control field *must* be on, and a
3470  * bit in the high half is on if the corresponding bit in the control field
3471  * may be on. See also vmx_control_verify().
3472  */
3473 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3474 {
3475         if (!nested) {
3476                 memset(msrs, 0, sizeof(*msrs));
3477                 return;
3478         }
3479
3480         /*
3481          * Note that as a general rule, the high half of the MSRs (bits in
3482          * the control fields which may be 1) should be initialized by the
3483          * intersection of the underlying hardware's MSR (i.e., features which
3484          * can be supported) and the list of features we want to expose -
3485          * because they are known to be properly supported in our code.
3486          * Also, usually, the low half of the MSRs (bits which must be 1) can
3487          * be set to 0, meaning that L1 may turn off any of these bits. The
3488          * reason is that if one of these bits is necessary, it will appear
3489          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3490          * fields of vmcs01 and vmcs02, will turn these bits off - and
3491          * nested_vmx_exit_reflected() will not pass related exits to L1.
3492          * These rules have exceptions below.
3493          */
3494
3495         /* pin-based controls */
3496         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
3497                 msrs->pinbased_ctls_low,
3498                 msrs->pinbased_ctls_high);
3499         msrs->pinbased_ctls_low |=
3500                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3501         msrs->pinbased_ctls_high &=
3502                 PIN_BASED_EXT_INTR_MASK |
3503                 PIN_BASED_NMI_EXITING |
3504                 PIN_BASED_VIRTUAL_NMIS |
3505                 (apicv ? PIN_BASED_POSTED_INTR : 0);
3506         msrs->pinbased_ctls_high |=
3507                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3508                 PIN_BASED_VMX_PREEMPTION_TIMER;
3509
3510         /* exit controls */
3511         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
3512                 msrs->exit_ctls_low,
3513                 msrs->exit_ctls_high);
3514         msrs->exit_ctls_low =
3515                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3516
3517         msrs->exit_ctls_high &=
3518 #ifdef CONFIG_X86_64
3519                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
3520 #endif
3521                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
3522         msrs->exit_ctls_high |=
3523                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
3524                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3525                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3526
3527         /* We support free control of debug control saving. */
3528         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3529
3530         /* entry controls */
3531         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3532                 msrs->entry_ctls_low,
3533                 msrs->entry_ctls_high);
3534         msrs->entry_ctls_low =
3535                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3536         msrs->entry_ctls_high &=
3537 #ifdef CONFIG_X86_64
3538                 VM_ENTRY_IA32E_MODE |
3539 #endif
3540                 VM_ENTRY_LOAD_IA32_PAT;
3541         msrs->entry_ctls_high |=
3542                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3543
3544         /* We support free control of debug control loading. */
3545         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3546
3547         /* cpu-based controls */
3548         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3549                 msrs->procbased_ctls_low,
3550                 msrs->procbased_ctls_high);
3551         msrs->procbased_ctls_low =
3552                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3553         msrs->procbased_ctls_high &=
3554                 CPU_BASED_VIRTUAL_INTR_PENDING |
3555                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3556                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3557                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3558                 CPU_BASED_CR3_STORE_EXITING |
3559 #ifdef CONFIG_X86_64
3560                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3561 #endif
3562                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3563                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3564                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3565                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3566                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3567         /*
3568          * We can allow some features even when not supported by the
3569          * hardware. For example, L1 can specify an MSR bitmap - and we
3570          * can use it to avoid exits to L1 - even when L0 runs L2
3571          * without MSR bitmaps.
3572          */
3573         msrs->procbased_ctls_high |=
3574                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3575                 CPU_BASED_USE_MSR_BITMAPS;
3576
3577         /* We support free control of CR3 access interception. */
3578         msrs->procbased_ctls_low &=
3579                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3580
3581         /*
3582          * secondary cpu-based controls.  Do not include those that
3583          * depend on CPUID bits, they are added later by vmx_cpuid_update.
3584          */
3585         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3586                 msrs->secondary_ctls_low,
3587                 msrs->secondary_ctls_high);
3588         msrs->secondary_ctls_low = 0;
3589         msrs->secondary_ctls_high &=
3590                 SECONDARY_EXEC_DESC |
3591                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3592                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3593                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3594                 SECONDARY_EXEC_WBINVD_EXITING;
3595
3596         /*
3597          * We can emulate "VMCS shadowing," even if the hardware
3598          * doesn't support it.
3599          */
3600         msrs->secondary_ctls_high |=
3601                 SECONDARY_EXEC_SHADOW_VMCS;
3602
3603         if (enable_ept) {
3604                 /* nested EPT: emulate EPT also to L1 */
3605                 msrs->secondary_ctls_high |=
3606                         SECONDARY_EXEC_ENABLE_EPT;
3607                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3608                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3609                 if (cpu_has_vmx_ept_execute_only())
3610                         msrs->ept_caps |=
3611                                 VMX_EPT_EXECUTE_ONLY_BIT;
3612                 msrs->ept_caps &= vmx_capability.ept;
3613                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3614                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3615                         VMX_EPT_1GB_PAGE_BIT;
3616                 if (enable_ept_ad_bits) {
3617                         msrs->secondary_ctls_high |=
3618                                 SECONDARY_EXEC_ENABLE_PML;
3619                         msrs->ept_caps |= VMX_EPT_AD_BIT;
3620                 }
3621         }
3622
3623         if (cpu_has_vmx_vmfunc()) {
3624                 msrs->secondary_ctls_high |=
3625                         SECONDARY_EXEC_ENABLE_VMFUNC;
3626                 /*
3627                  * Advertise EPTP switching unconditionally
3628                  * since we emulate it
3629                  */
3630                 if (enable_ept)
3631                         msrs->vmfunc_controls =
3632                                 VMX_VMFUNC_EPTP_SWITCHING;
3633         }
3634
3635         /*
3636          * Old versions of KVM use the single-context version without
3637          * checking for support, so declare that it is supported even
3638          * though it is treated as global context.  The alternative is
3639          * not failing the single-context invvpid, and it is worse.
3640          */
3641         if (enable_vpid) {
3642                 msrs->secondary_ctls_high |=
3643                         SECONDARY_EXEC_ENABLE_VPID;
3644                 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
3645                         VMX_VPID_EXTENT_SUPPORTED_MASK;
3646         }
3647
3648         if (enable_unrestricted_guest)
3649                 msrs->secondary_ctls_high |=
3650                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
3651
3652         if (flexpriority_enabled)
3653                 msrs->secondary_ctls_high |=
3654                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3655
3656         /* miscellaneous data */
3657         rdmsr(MSR_IA32_VMX_MISC,
3658                 msrs->misc_low,
3659                 msrs->misc_high);
3660         msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
3661         msrs->misc_low |=
3662                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
3663                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
3664                 VMX_MISC_ACTIVITY_HLT;
3665         msrs->misc_high = 0;
3666
3667         /*
3668          * This MSR reports some information about VMX support. We
3669          * should return information about the VMX we emulate for the
3670          * guest, and the VMCS structure we give it - not about the
3671          * VMX support of the underlying hardware.
3672          */
3673         msrs->basic =
3674                 VMCS12_REVISION |
3675                 VMX_BASIC_TRUE_CTLS |
3676                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
3677                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
3678
3679         if (cpu_has_vmx_basic_inout())
3680                 msrs->basic |= VMX_BASIC_INOUT;
3681
3682         /*
3683          * These MSRs specify bits which the guest must keep fixed on
3684          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
3685          * We picked the standard core2 setting.
3686          */
3687 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
3688 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
3689         msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
3690         msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
3691
3692         /* These MSRs specify bits which the guest must keep fixed off. */
3693         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
3694         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
3695
3696         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
3697         msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
3698 }
3699
3700 /*
3701  * if fixed0[i] == 1: val[i] must be 1
3702  * if fixed1[i] == 0: val[i] must be 0
3703  */
3704 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
3705 {
3706         return ((val & fixed1) | fixed0) == val;
3707 }
3708
3709 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
3710 {
3711         return fixed_bits_valid(control, low, high);
3712 }
3713
3714 static inline u64 vmx_control_msr(u32 low, u32 high)
3715 {
3716         return low | ((u64)high << 32);
3717 }
3718
3719 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
3720 {
3721         superset &= mask;
3722         subset &= mask;
3723
3724         return (superset | subset) == superset;
3725 }
3726
3727 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
3728 {
3729         const u64 feature_and_reserved =
3730                 /* feature (except bit 48; see below) */
3731                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
3732                 /* reserved */
3733                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
3734         u64 vmx_basic = vmx->nested.msrs.basic;
3735
3736         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
3737                 return -EINVAL;
3738
3739         /*
3740          * KVM does not emulate a version of VMX that constrains physical
3741          * addresses of VMX structures (e.g. VMCS) to 32-bits.
3742          */
3743         if (data & BIT_ULL(48))
3744                 return -EINVAL;
3745
3746         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
3747             vmx_basic_vmcs_revision_id(data))
3748                 return -EINVAL;
3749
3750         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
3751                 return -EINVAL;
3752
3753         vmx->nested.msrs.basic = data;
3754         return 0;
3755 }
3756
3757 static int
3758 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3759 {
3760         u64 supported;
3761         u32 *lowp, *highp;
3762
3763         switch (msr_index) {
3764         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3765                 lowp = &vmx->nested.msrs.pinbased_ctls_low;
3766                 highp = &vmx->nested.msrs.pinbased_ctls_high;
3767                 break;
3768         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3769                 lowp = &vmx->nested.msrs.procbased_ctls_low;
3770                 highp = &vmx->nested.msrs.procbased_ctls_high;
3771                 break;
3772         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3773                 lowp = &vmx->nested.msrs.exit_ctls_low;
3774                 highp = &vmx->nested.msrs.exit_ctls_high;
3775                 break;
3776         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3777                 lowp = &vmx->nested.msrs.entry_ctls_low;
3778                 highp = &vmx->nested.msrs.entry_ctls_high;
3779                 break;
3780         case MSR_IA32_VMX_PROCBASED_CTLS2:
3781                 lowp = &vmx->nested.msrs.secondary_ctls_low;
3782                 highp = &vmx->nested.msrs.secondary_ctls_high;
3783                 break;
3784         default:
3785                 BUG();
3786         }
3787
3788         supported = vmx_control_msr(*lowp, *highp);
3789
3790         /* Check must-be-1 bits are still 1. */
3791         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3792                 return -EINVAL;
3793
3794         /* Check must-be-0 bits are still 0. */
3795         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3796                 return -EINVAL;
3797
3798         *lowp = data;
3799         *highp = data >> 32;
3800         return 0;
3801 }
3802
3803 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3804 {
3805         const u64 feature_and_reserved_bits =
3806                 /* feature */
3807                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3808                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3809                 /* reserved */
3810                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3811         u64 vmx_misc;
3812
3813         vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
3814                                    vmx->nested.msrs.misc_high);
3815
3816         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3817                 return -EINVAL;
3818
3819         if ((vmx->nested.msrs.pinbased_ctls_high &
3820              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3821             vmx_misc_preemption_timer_rate(data) !=
3822             vmx_misc_preemption_timer_rate(vmx_misc))
3823                 return -EINVAL;
3824
3825         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3826                 return -EINVAL;
3827
3828         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3829                 return -EINVAL;
3830
3831         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3832                 return -EINVAL;
3833
3834         vmx->nested.msrs.misc_low = data;
3835         vmx->nested.msrs.misc_high = data >> 32;
3836
3837         /*
3838          * If L1 has read-only VM-exit information fields, use the
3839          * less permissive vmx_vmwrite_bitmap to specify write
3840          * permissions for the shadow VMCS.
3841          */
3842         if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
3843                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
3844
3845         return 0;
3846 }
3847
3848 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3849 {
3850         u64 vmx_ept_vpid_cap;
3851
3852         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
3853                                            vmx->nested.msrs.vpid_caps);
3854
3855         /* Every bit is either reserved or a feature bit. */
3856         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3857                 return -EINVAL;
3858
3859         vmx->nested.msrs.ept_caps = data;
3860         vmx->nested.msrs.vpid_caps = data >> 32;
3861         return 0;
3862 }
3863
3864 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3865 {
3866         u64 *msr;
3867
3868         switch (msr_index) {
3869         case MSR_IA32_VMX_CR0_FIXED0:
3870                 msr = &vmx->nested.msrs.cr0_fixed0;
3871                 break;
3872         case MSR_IA32_VMX_CR4_FIXED0:
3873                 msr = &vmx->nested.msrs.cr4_fixed0;
3874                 break;
3875         default:
3876                 BUG();
3877         }
3878
3879         /*
3880          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3881          * must be 1 in the restored value.
3882          */
3883         if (!is_bitwise_subset(data, *msr, -1ULL))
3884                 return -EINVAL;
3885
3886         *msr = data;
3887         return 0;
3888 }
3889
3890 /*
3891  * Called when userspace is restoring VMX MSRs.
3892  *
3893  * Returns 0 on success, non-0 otherwise.
3894  */
3895 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3896 {
3897         struct vcpu_vmx *vmx = to_vmx(vcpu);
3898
3899         /*
3900          * Don't allow changes to the VMX capability MSRs while the vCPU
3901          * is in VMX operation.
3902          */
3903         if (vmx->nested.vmxon)
3904                 return -EBUSY;
3905
3906         switch (msr_index) {
3907         case MSR_IA32_VMX_BASIC:
3908                 return vmx_restore_vmx_basic(vmx, data);
3909         case MSR_IA32_VMX_PINBASED_CTLS:
3910         case MSR_IA32_VMX_PROCBASED_CTLS:
3911         case MSR_IA32_VMX_EXIT_CTLS:
3912         case MSR_IA32_VMX_ENTRY_CTLS:
3913                 /*
3914                  * The "non-true" VMX capability MSRs are generated from the
3915                  * "true" MSRs, so we do not support restoring them directly.
3916                  *
3917                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3918                  * should restore the "true" MSRs with the must-be-1 bits
3919                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3920                  * DEFAULT SETTINGS".
3921                  */
3922                 return -EINVAL;
3923         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3924         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3925         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3926         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3927         case MSR_IA32_VMX_PROCBASED_CTLS2:
3928                 return vmx_restore_control_msr(vmx, msr_index, data);
3929         case MSR_IA32_VMX_MISC:
3930                 return vmx_restore_vmx_misc(vmx, data);
3931         case MSR_IA32_VMX_CR0_FIXED0:
3932         case MSR_IA32_VMX_CR4_FIXED0:
3933                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3934         case MSR_IA32_VMX_CR0_FIXED1:
3935         case MSR_IA32_VMX_CR4_FIXED1:
3936                 /*
3937                  * These MSRs are generated based on the vCPU's CPUID, so we
3938                  * do not support restoring them directly.
3939                  */
3940                 return -EINVAL;
3941         case MSR_IA32_VMX_EPT_VPID_CAP:
3942                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3943         case MSR_IA32_VMX_VMCS_ENUM:
3944                 vmx->nested.msrs.vmcs_enum = data;
3945                 return 0;
3946         default:
3947                 /*
3948                  * The rest of the VMX capability MSRs do not support restore.
3949                  */
3950                 return -EINVAL;
3951         }
3952 }
3953
3954 /* Returns 0 on success, non-0 otherwise. */
3955 static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
3956 {
3957         switch (msr_index) {
3958         case MSR_IA32_VMX_BASIC:
3959                 *pdata = msrs->basic;
3960                 break;
3961         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3962         case MSR_IA32_VMX_PINBASED_CTLS:
3963                 *pdata = vmx_control_msr(
3964                         msrs->pinbased_ctls_low,
3965                         msrs->pinbased_ctls_high);
3966                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3967                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3968                 break;
3969         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3970         case MSR_IA32_VMX_PROCBASED_CTLS:
3971                 *pdata = vmx_control_msr(
3972                         msrs->procbased_ctls_low,
3973                         msrs->procbased_ctls_high);
3974                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3975                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3976                 break;
3977         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3978         case MSR_IA32_VMX_EXIT_CTLS:
3979                 *pdata = vmx_control_msr(
3980                         msrs->exit_ctls_low,
3981                         msrs->exit_ctls_high);
3982                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3983                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3984                 break;
3985         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3986         case MSR_IA32_VMX_ENTRY_CTLS:
3987                 *pdata = vmx_control_msr(
3988                         msrs->entry_ctls_low,
3989                         msrs->entry_ctls_high);
3990                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3991                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3992                 break;
3993         case MSR_IA32_VMX_MISC:
3994                 *pdata = vmx_control_msr(
3995                         msrs->misc_low,
3996                         msrs->misc_high);
3997                 break;
3998         case MSR_IA32_VMX_CR0_FIXED0:
3999                 *pdata = msrs->cr0_fixed0;
4000                 break;
4001         case MSR_IA32_VMX_CR0_FIXED1:
4002                 *pdata = msrs->cr0_fixed1;
4003                 break;
4004         case MSR_IA32_VMX_CR4_FIXED0:
4005                 *pdata = msrs->cr4_fixed0;
4006                 break;
4007         case MSR_IA32_VMX_CR4_FIXED1:
4008                 *pdata = msrs->cr4_fixed1;
4009                 break;
4010         case MSR_IA32_VMX_VMCS_ENUM:
4011                 *pdata = msrs->vmcs_enum;
4012                 break;
4013         case MSR_IA32_VMX_PROCBASED_CTLS2:
4014                 *pdata = vmx_control_msr(
4015                         msrs->secondary_ctls_low,
4016                         msrs->secondary_ctls_high);
4017                 break;
4018         case MSR_IA32_VMX_EPT_VPID_CAP:
4019                 *pdata = msrs->ept_caps |
4020                         ((u64)msrs->vpid_caps << 32);
4021                 break;
4022         case MSR_IA32_VMX_VMFUNC:
4023                 *pdata = msrs->vmfunc_controls;
4024                 break;
4025         default:
4026                 return 1;
4027         }
4028
4029         return 0;
4030 }
4031
4032 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
4033                                                  uint64_t val)
4034 {
4035         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
4036
4037         return !(val & ~valid_bits);
4038 }
4039
4040 static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
4041 {
4042         switch (msr->index) {
4043         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4044                 if (!nested)
4045                         return 1;
4046                 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
4047         default:
4048                 return 1;
4049         }
4050
4051         return 0;
4052 }
4053
4054 /*
4055  * Reads an msr value (of 'msr_index') into 'pdata'.
4056  * Returns 0 on success, non-0 otherwise.
4057  * Assumes vcpu_load() was already called.
4058  */
4059 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4060 {
4061         struct vcpu_vmx *vmx = to_vmx(vcpu);
4062         struct shared_msr_entry *msr;
4063
4064         switch (msr_info->index) {
4065 #ifdef CONFIG_X86_64
4066         case MSR_FS_BASE:
4067                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
4068                 break;
4069         case MSR_GS_BASE:
4070                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
4071                 break;
4072         case MSR_KERNEL_GS_BASE:
4073                 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
4074                 break;
4075 #endif
4076         case MSR_EFER:
4077                 return kvm_get_msr_common(vcpu, msr_info);
4078         case MSR_IA32_SPEC_CTRL:
4079                 if (!msr_info->host_initiated &&
4080                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4081                         return 1;
4082
4083                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
4084                 break;
4085         case MSR_IA32_ARCH_CAPABILITIES:
4086                 if (!msr_info->host_initiated &&
4087                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4088                         return 1;
4089                 msr_info->data = to_vmx(vcpu)->arch_capabilities;
4090                 break;
4091         case MSR_IA32_SYSENTER_CS:
4092                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
4093                 break;
4094         case MSR_IA32_SYSENTER_EIP:
4095                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
4096                 break;
4097         case MSR_IA32_SYSENTER_ESP:
4098                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
4099                 break;
4100         case MSR_IA32_BNDCFGS:
4101                 if (!kvm_mpx_supported() ||
4102                     (!msr_info->host_initiated &&
4103                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
4104                         return 1;
4105                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
4106                 break;
4107         case MSR_IA32_MCG_EXT_CTL:
4108                 if (!msr_info->host_initiated &&
4109                     !(vmx->msr_ia32_feature_control &
4110                       FEATURE_CONTROL_LMCE))
4111                         return 1;
4112                 msr_info->data = vcpu->arch.mcg_ext_ctl;
4113                 break;
4114         case MSR_IA32_FEATURE_CONTROL:
4115                 msr_info->data = vmx->msr_ia32_feature_control;
4116                 break;
4117         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4118                 if (!nested_vmx_allowed(vcpu))
4119                         return 1;
4120                 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
4121                                        &msr_info->data);
4122         case MSR_IA32_XSS:
4123                 if (!vmx_xsaves_supported())
4124                         return 1;
4125                 msr_info->data = vcpu->arch.ia32_xss;
4126                 break;
4127         case MSR_TSC_AUX:
4128                 if (!msr_info->host_initiated &&
4129                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
4130                         return 1;
4131                 /* Otherwise falls through */
4132         default:
4133                 msr = find_msr_entry(vmx, msr_info->index);
4134                 if (msr) {
4135                         msr_info->data = msr->data;
4136                         break;
4137                 }
4138                 return kvm_get_msr_common(vcpu, msr_info);
4139         }
4140
4141         return 0;
4142 }
4143
4144 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
4145
4146 /*
4147  * Writes msr value into into the appropriate "register".
4148  * Returns 0 on success, non-0 otherwise.
4149  * Assumes vcpu_load() was already called.
4150  */
4151 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4152 {
4153         struct vcpu_vmx *vmx = to_vmx(vcpu);
4154         struct shared_msr_entry *msr;
4155         int ret = 0;
4156         u32 msr_index = msr_info->index;
4157         u64 data = msr_info->data;
4158
4159         switch (msr_index) {
4160         case MSR_EFER:
4161                 ret = kvm_set_msr_common(vcpu, msr_info);
4162                 break;
4163 #ifdef CONFIG_X86_64
4164         case MSR_FS_BASE:
4165                 vmx_segment_cache_clear(vmx);
4166                 vmcs_writel(GUEST_FS_BASE, data);
4167                 break;
4168         case MSR_GS_BASE:
4169                 vmx_segment_cache_clear(vmx);
4170                 vmcs_writel(GUEST_GS_BASE, data);
4171                 break;
4172         case MSR_KERNEL_GS_BASE:
4173                 vmx_write_guest_kernel_gs_base(vmx, data);
4174                 break;
4175 #endif
4176         case MSR_IA32_SYSENTER_CS:
4177                 vmcs_write32(GUEST_SYSENTER_CS, data);
4178                 break;
4179         case MSR_IA32_SYSENTER_EIP:
4180                 vmcs_writel(GUEST_SYSENTER_EIP, data);
4181                 break;
4182         case MSR_IA32_SYSENTER_ESP:
4183                 vmcs_writel(GUEST_SYSENTER_ESP, data);
4184                 break;
4185         case MSR_IA32_BNDCFGS:
4186                 if (!kvm_mpx_supported() ||
4187                     (!msr_info->host_initiated &&
4188                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
4189                         return 1;
4190                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
4191                     (data & MSR_IA32_BNDCFGS_RSVD))
4192                         return 1;
4193                 vmcs_write64(GUEST_BNDCFGS, data);
4194                 break;
4195         case MSR_IA32_SPEC_CTRL:
4196                 if (!msr_info->host_initiated &&
4197                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4198                         return 1;
4199
4200                 /* The STIBP bit doesn't fault even if it's not advertised */
4201                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4202                         return 1;
4203
4204                 vmx->spec_ctrl = data;
4205
4206                 if (!data)
4207                         break;
4208
4209                 /*
4210                  * For non-nested:
4211                  * When it's written (to non-zero) for the first time, pass
4212                  * it through.
4213                  *
4214                  * For nested:
4215                  * The handling of the MSR bitmap for L2 guests is done in
4216                  * nested_vmx_merge_msr_bitmap. We should not touch the
4217                  * vmcs02.msr_bitmap here since it gets completely overwritten
4218                  * in the merging. We update the vmcs01 here for L1 as well
4219                  * since it will end up touching the MSR anyway now.
4220                  */
4221                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
4222                                               MSR_IA32_SPEC_CTRL,
4223                                               MSR_TYPE_RW);
4224                 break;
4225         case MSR_IA32_PRED_CMD:
4226                 if (!msr_info->host_initiated &&
4227                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4228                         return 1;
4229
4230                 if (data & ~PRED_CMD_IBPB)
4231                         return 1;
4232
4233                 if (!data)
4234                         break;
4235
4236                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4237
4238                 /*
4239                  * For non-nested:
4240                  * When it's written (to non-zero) for the first time, pass
4241                  * it through.
4242                  *
4243                  * For nested:
4244                  * The handling of the MSR bitmap for L2 guests is done in
4245                  * nested_vmx_merge_msr_bitmap. We should not touch the
4246                  * vmcs02.msr_bitmap here since it gets completely overwritten
4247                  * in the merging.
4248                  */
4249                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
4250                                               MSR_TYPE_W);
4251                 break;
4252         case MSR_IA32_ARCH_CAPABILITIES:
4253                 if (!msr_info->host_initiated)
4254                         return 1;
4255                 vmx->arch_capabilities = data;
4256                 break;
4257         case MSR_IA32_CR_PAT:
4258                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
4259                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4260                                 return 1;
4261                         vmcs_write64(GUEST_IA32_PAT, data);
4262                         vcpu->arch.pat = data;
4263                         break;
4264                 }
4265                 ret = kvm_set_msr_common(vcpu, msr_info);
4266                 break;
4267         case MSR_IA32_TSC_ADJUST:
4268                 ret = kvm_set_msr_common(vcpu, msr_info);
4269                 break;
4270         case MSR_IA32_MCG_EXT_CTL:
4271                 if ((!msr_info->host_initiated &&
4272                      !(to_vmx(vcpu)->msr_ia32_feature_control &
4273                        FEATURE_CONTROL_LMCE)) ||
4274                     (data & ~MCG_EXT_CTL_LMCE_EN))
4275                         return 1;
4276                 vcpu->arch.mcg_ext_ctl = data;
4277                 break;
4278         case MSR_IA32_FEATURE_CONTROL:
4279                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
4280                     (to_vmx(vcpu)->msr_ia32_feature_control &
4281                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
4282                         return 1;
4283                 vmx->msr_ia32_feature_control = data;
4284                 if (msr_info->host_initiated && data == 0)
4285                         vmx_leave_nested(vcpu);
4286                 break;
4287         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4288                 if (!msr_info->host_initiated)
4289                         return 1; /* they are read-only */
4290                 if (!nested_vmx_allowed(vcpu))
4291                         return 1;
4292                 return vmx_set_vmx_msr(vcpu, msr_index, data);
4293         case MSR_IA32_XSS:
4294                 if (!vmx_xsaves_supported())
4295                         return 1;
4296                 /*
4297                  * The only supported bit as of Skylake is bit 8, but
4298                  * it is not supported on KVM.
4299                  */
4300                 if (data != 0)
4301                         return 1;
4302                 vcpu->arch.ia32_xss = data;
4303                 if (vcpu->arch.ia32_xss != host_xss)
4304                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
4305                                 vcpu->arch.ia32_xss, host_xss, false);
4306                 else
4307                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
4308                 break;
4309         case MSR_TSC_AUX:
4310                 if (!msr_info->host_initiated &&
4311                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
4312                         return 1;
4313                 /* Check reserved bit, higher 32 bits should be zero */
4314                 if ((data >> 32) != 0)
4315                         return 1;
4316                 /* Otherwise falls through */
4317         default:
4318                 msr = find_msr_entry(vmx, msr_index);
4319                 if (msr) {
4320                         u64 old_msr_data = msr->data;
4321                         msr->data = data;
4322                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
4323                                 preempt_disable();
4324                                 ret = kvm_set_shared_msr(msr->index, msr->data,
4325                                                          msr->mask);
4326                                 preempt_enable();
4327                                 if (ret)
4328                                         msr->data = old_msr_data;
4329                         }
4330                         break;
4331                 }
4332                 ret = kvm_set_msr_common(vcpu, msr_info);
4333         }
4334
4335         return ret;
4336 }
4337
4338 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
4339 {
4340         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
4341         switch (reg) {
4342         case VCPU_REGS_RSP:
4343                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
4344                 break;
4345         case VCPU_REGS_RIP:
4346                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
4347                 break;
4348         case VCPU_EXREG_PDPTR:
4349                 if (enable_ept)
4350                         ept_save_pdptrs(vcpu);
4351                 break;
4352         default:
4353                 break;
4354         }
4355 }
4356
4357 static __init int cpu_has_kvm_support(void)
4358 {
4359         return cpu_has_vmx();
4360 }
4361
4362 static __init int vmx_disabled_by_bios(void)
4363 {
4364         u64 msr;
4365
4366         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
4367         if (msr & FEATURE_CONTROL_LOCKED) {
4368                 /* launched w/ TXT and VMX disabled */
4369                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
4370                         && tboot_enabled())
4371                         return 1;
4372                 /* launched w/o TXT and VMX only enabled w/ TXT */
4373                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
4374                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
4375                         && !tboot_enabled()) {
4376                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
4377                                 "activate TXT before enabling KVM\n");
4378                         return 1;
4379                 }
4380                 /* launched w/o TXT and VMX disabled */
4381                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
4382                         && !tboot_enabled())
4383                         return 1;
4384         }
4385
4386         return 0;
4387 }
4388
4389 static void kvm_cpu_vmxon(u64 addr)
4390 {
4391         cr4_set_bits(X86_CR4_VMXE);
4392         intel_pt_handle_vmx(1);
4393
4394         asm volatile (ASM_VMX_VMXON_RAX
4395                         : : "a"(&addr), "m"(addr)
4396                         : "memory", "cc");
4397 }
4398
4399 static int hardware_enable(void)
4400 {
4401         int cpu = raw_smp_processor_id();
4402         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
4403         u64 old, test_bits;
4404
4405         if (cr4_read_shadow() & X86_CR4_VMXE)
4406                 return -EBUSY;
4407
4408         /*
4409          * This can happen if we hot-added a CPU but failed to allocate
4410          * VP assist page for it.
4411          */
4412         if (static_branch_unlikely(&enable_evmcs) &&
4413             !hv_get_vp_assist_page(cpu))
4414                 return -EFAULT;
4415
4416         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
4417         INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
4418         spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
4419
4420         /*
4421          * Now we can enable the vmclear operation in kdump
4422          * since the loaded_vmcss_on_cpu list on this cpu
4423          * has been initialized.
4424          *
4425          * Though the cpu is not in VMX operation now, there
4426          * is no problem to enable the vmclear operation
4427          * for the loaded_vmcss_on_cpu list is empty!
4428          */
4429         crash_enable_local_vmclear(cpu);
4430
4431         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
4432
4433         test_bits = FEATURE_CONTROL_LOCKED;
4434         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4435         if (tboot_enabled())
4436                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
4437
4438         if ((old & test_bits) != test_bits) {
4439                 /* enable and lock */
4440                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
4441         }
4442         kvm_cpu_vmxon(phys_addr);
4443         if (enable_ept)
4444                 ept_sync_global();
4445
4446         return 0;
4447 }
4448
4449 static void vmclear_local_loaded_vmcss(void)
4450 {
4451         int cpu = raw_smp_processor_id();
4452         struct loaded_vmcs *v, *n;
4453
4454         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
4455                                  loaded_vmcss_on_cpu_link)
4456                 __loaded_vmcs_clear(v);
4457 }
4458
4459
4460 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
4461  * tricks.
4462  */
4463 static void kvm_cpu_vmxoff(void)
4464 {
4465         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
4466
4467         intel_pt_handle_vmx(0);
4468         cr4_clear_bits(X86_CR4_VMXE);
4469 }
4470
4471 static void hardware_disable(void)
4472 {
4473         vmclear_local_loaded_vmcss();
4474         kvm_cpu_vmxoff();
4475 }
4476
4477 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
4478                                       u32 msr, u32 *result)
4479 {
4480         u32 vmx_msr_low, vmx_msr_high;
4481         u32 ctl = ctl_min | ctl_opt;
4482
4483         rdmsr(msr, vmx_msr_low, vmx_msr_high);
4484
4485         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
4486         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
4487
4488         /* Ensure minimum (required) set of control bits are supported. */
4489         if (ctl_min & ~ctl)
4490                 return -EIO;
4491
4492         *result = ctl;
4493         return 0;
4494 }
4495
4496 static __init bool allow_1_setting(u32 msr, u32 ctl)
4497 {
4498         u32 vmx_msr_low, vmx_msr_high;
4499
4500         rdmsr(msr, vmx_msr_low, vmx_msr_high);
4501         return vmx_msr_high & ctl;
4502 }
4503
4504 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
4505 {
4506         u32 vmx_msr_low, vmx_msr_high;
4507         u32 min, opt, min2, opt2;
4508         u32 _pin_based_exec_control = 0;
4509         u32 _cpu_based_exec_control = 0;
4510         u32 _cpu_based_2nd_exec_control = 0;
4511         u32 _vmexit_control = 0;
4512         u32 _vmentry_control = 0;
4513
4514         memset(vmcs_conf, 0, sizeof(*vmcs_conf));
4515         min = CPU_BASED_HLT_EXITING |
4516 #ifdef CONFIG_X86_64
4517               CPU_BASED_CR8_LOAD_EXITING |
4518               CPU_BASED_CR8_STORE_EXITING |
4519 #endif
4520               CPU_BASED_CR3_LOAD_EXITING |
4521               CPU_BASED_CR3_STORE_EXITING |
4522               CPU_BASED_UNCOND_IO_EXITING |
4523               CPU_BASED_MOV_DR_EXITING |
4524               CPU_BASED_USE_TSC_OFFSETING |
4525               CPU_BASED_MWAIT_EXITING |
4526               CPU_BASED_MONITOR_EXITING |
4527               CPU_BASED_INVLPG_EXITING |
4528               CPU_BASED_RDPMC_EXITING;
4529
4530         opt = CPU_BASED_TPR_SHADOW |
4531               CPU_BASED_USE_MSR_BITMAPS |
4532               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
4533         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
4534                                 &_cpu_based_exec_control) < 0)
4535                 return -EIO;
4536 #ifdef CONFIG_X86_64
4537         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4538                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
4539                                            ~CPU_BASED_CR8_STORE_EXITING;
4540 #endif
4541         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
4542                 min2 = 0;
4543                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
4544                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
4545                         SECONDARY_EXEC_WBINVD_EXITING |
4546                         SECONDARY_EXEC_ENABLE_VPID |
4547                         SECONDARY_EXEC_ENABLE_EPT |
4548                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
4549                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
4550                         SECONDARY_EXEC_DESC |
4551                         SECONDARY_EXEC_RDTSCP |
4552                         SECONDARY_EXEC_ENABLE_INVPCID |
4553                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
4554                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
4555                         SECONDARY_EXEC_SHADOW_VMCS |
4556                         SECONDARY_EXEC_XSAVES |
4557                         SECONDARY_EXEC_RDSEED_EXITING |
4558                         SECONDARY_EXEC_RDRAND_EXITING |
4559                         SECONDARY_EXEC_ENABLE_PML |
4560                         SECONDARY_EXEC_TSC_SCALING |
4561                         SECONDARY_EXEC_ENABLE_VMFUNC |
4562                         SECONDARY_EXEC_ENCLS_EXITING;
4563                 if (adjust_vmx_controls(min2, opt2,
4564                                         MSR_IA32_VMX_PROCBASED_CTLS2,
4565                                         &_cpu_based_2nd_exec_control) < 0)
4566                         return -EIO;
4567         }
4568 #ifndef CONFIG_X86_64
4569         if (!(_cpu_based_2nd_exec_control &
4570                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
4571                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
4572 #endif
4573
4574         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4575                 _cpu_based_2nd_exec_control &= ~(
4576                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4577                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
4578                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4579
4580         rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
4581                 &vmx_capability.ept, &vmx_capability.vpid);
4582
4583         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
4584                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
4585                    enabled */
4586                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4587                                              CPU_BASED_CR3_STORE_EXITING |
4588                                              CPU_BASED_INVLPG_EXITING);
4589         } else if (vmx_capability.ept) {
4590                 vmx_capability.ept = 0;
4591                 pr_warn_once("EPT CAP should not exist if not support "
4592                                 "1-setting enable EPT VM-execution control\n");
4593         }
4594         if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
4595                 vmx_capability.vpid) {
4596                 vmx_capability.vpid = 0;
4597                 pr_warn_once("VPID CAP should not exist if not support "
4598                                 "1-setting enable VPID VM-execution control\n");
4599         }
4600
4601         min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
4602 #ifdef CONFIG_X86_64
4603         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
4604 #endif
4605         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
4606                 VM_EXIT_CLEAR_BNDCFGS;
4607         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
4608                                 &_vmexit_control) < 0)
4609                 return -EIO;
4610
4611         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
4612         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
4613                  PIN_BASED_VMX_PREEMPTION_TIMER;
4614         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
4615                                 &_pin_based_exec_control) < 0)
4616                 return -EIO;
4617
4618         if (cpu_has_broken_vmx_preemption_timer())
4619                 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4620         if (!(_cpu_based_2nd_exec_control &
4621                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
4622                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
4623
4624         min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
4625         opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
4626         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
4627                                 &_vmentry_control) < 0)
4628                 return -EIO;
4629
4630         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
4631
4632         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
4633         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
4634                 return -EIO;
4635
4636 #ifdef CONFIG_X86_64
4637         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
4638         if (vmx_msr_high & (1u<<16))
4639                 return -EIO;
4640 #endif
4641
4642         /* Require Write-Back (WB) memory type for VMCS accesses. */
4643         if (((vmx_msr_high >> 18) & 15) != 6)
4644                 return -EIO;
4645
4646         vmcs_conf->size = vmx_msr_high & 0x1fff;
4647         vmcs_conf->order = get_order(vmcs_conf->size);
4648         vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
4649
4650         vmcs_conf->revision_id = vmx_msr_low;
4651
4652         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
4653         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
4654         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
4655         vmcs_conf->vmexit_ctrl         = _vmexit_control;
4656         vmcs_conf->vmentry_ctrl        = _vmentry_control;
4657
4658         if (static_branch_unlikely(&enable_evmcs))
4659                 evmcs_sanitize_exec_ctrls(vmcs_conf);
4660
4661         cpu_has_load_ia32_efer =
4662                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4663                                 VM_ENTRY_LOAD_IA32_EFER)
4664                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4665                                    VM_EXIT_LOAD_IA32_EFER);
4666
4667         cpu_has_load_perf_global_ctrl =
4668                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4669                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
4670                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4671                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
4672
4673         /*
4674          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
4675          * but due to errata below it can't be used. Workaround is to use
4676          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
4677          *
4678          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
4679          *
4680          * AAK155             (model 26)
4681          * AAP115             (model 30)
4682          * AAT100             (model 37)
4683          * BC86,AAY89,BD102   (model 44)
4684          * BA97               (model 46)
4685          *
4686          */
4687         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
4688                 switch (boot_cpu_data.x86_model) {
4689                 case 26:
4690                 case 30:
4691                 case 37:
4692                 case 44:
4693                 case 46:
4694                         cpu_has_load_perf_global_ctrl = false;
4695                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
4696                                         "does not work properly. Using workaround\n");
4697                         break;
4698                 default:
4699                         break;
4700                 }
4701         }
4702
4703         if (boot_cpu_has(X86_FEATURE_XSAVES))
4704                 rdmsrl(MSR_IA32_XSS, host_xss);
4705
4706         return 0;
4707 }
4708
4709 static struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
4710 {
4711         int node = cpu_to_node(cpu);
4712         struct page *pages;
4713         struct vmcs *vmcs;
4714
4715         pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
4716         if (!pages)
4717                 return NULL;
4718         vmcs = page_address(pages);
4719         memset(vmcs, 0, vmcs_config.size);
4720
4721         /* KVM supports Enlightened VMCS v1 only */
4722         if (static_branch_unlikely(&enable_evmcs))
4723                 vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
4724         else
4725                 vmcs->hdr.revision_id = vmcs_config.revision_id;
4726
4727         if (shadow)
4728                 vmcs->hdr.shadow_vmcs = 1;
4729         return vmcs;
4730 }
4731
4732 static void free_vmcs(struct vmcs *vmcs)
4733 {
4734         free_pages((unsigned long)vmcs, vmcs_config.order);
4735 }
4736
4737 /*
4738  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
4739  */
4740 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4741 {
4742         if (!loaded_vmcs->vmcs)
4743                 return;
4744         loaded_vmcs_clear(loaded_vmcs);
4745         free_vmcs(loaded_vmcs->vmcs);
4746         loaded_vmcs->vmcs = NULL;
4747         if (loaded_vmcs->msr_bitmap)
4748                 free_page((unsigned long)loaded_vmcs->msr_bitmap);
4749         WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
4750 }
4751
4752 static struct vmcs *alloc_vmcs(bool shadow)
4753 {
4754         return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
4755 }
4756
4757 static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4758 {
4759         loaded_vmcs->vmcs = alloc_vmcs(false);
4760         if (!loaded_vmcs->vmcs)
4761                 return -ENOMEM;
4762
4763         loaded_vmcs->shadow_vmcs = NULL;
4764         loaded_vmcs_init(loaded_vmcs);
4765
4766         if (cpu_has_vmx_msr_bitmap()) {
4767                 loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
4768                 if (!loaded_vmcs->msr_bitmap)
4769                         goto out_vmcs;
4770                 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
4771
4772                 if (IS_ENABLED(CONFIG_HYPERV) &&
4773                     static_branch_unlikely(&enable_evmcs) &&
4774                     (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
4775                         struct hv_enlightened_vmcs *evmcs =
4776                                 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
4777
4778                         evmcs->hv_enlightenments_control.msr_bitmap = 1;
4779                 }
4780         }
4781
4782         memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
4783
4784         return 0;
4785
4786 out_vmcs:
4787         free_loaded_vmcs(loaded_vmcs);
4788         return -ENOMEM;
4789 }
4790
4791 static void free_kvm_area(void)
4792 {
4793         int cpu;
4794
4795         for_each_possible_cpu(cpu) {
4796                 free_vmcs(per_cpu(vmxarea, cpu));
4797                 per_cpu(vmxarea, cpu) = NULL;
4798         }
4799 }
4800
4801 enum vmcs_field_width {
4802         VMCS_FIELD_WIDTH_U16 = 0,
4803         VMCS_FIELD_WIDTH_U64 = 1,
4804         VMCS_FIELD_WIDTH_U32 = 2,
4805         VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
4806 };
4807
4808 static inline int vmcs_field_width(unsigned long field)
4809 {
4810         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
4811                 return VMCS_FIELD_WIDTH_U32;
4812         return (field >> 13) & 0x3 ;
4813 }
4814
4815 static inline int vmcs_field_readonly(unsigned long field)
4816 {
4817         return (((field >> 10) & 0x3) == 1);
4818 }
4819
4820 static void init_vmcs_shadow_fields(void)
4821 {
4822         int i, j;
4823
4824         for (i = j = 0; i < max_shadow_read_only_fields; i++) {
4825                 u16 field = shadow_read_only_fields[i];
4826                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
4827                     (i + 1 == max_shadow_read_only_fields ||
4828                      shadow_read_only_fields[i + 1] != field + 1))
4829                         pr_err("Missing field from shadow_read_only_field %x\n",
4830                                field + 1);
4831
4832                 clear_bit(field, vmx_vmread_bitmap);
4833 #ifdef CONFIG_X86_64
4834                 if (field & 1)
4835                         continue;
4836 #endif
4837                 if (j < i)
4838                         shadow_read_only_fields[j] = field;
4839                 j++;
4840         }
4841         max_shadow_read_only_fields = j;
4842
4843         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
4844                 u16 field = shadow_read_write_fields[i];
4845                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
4846                     (i + 1 == max_shadow_read_write_fields ||
4847                      shadow_read_write_fields[i + 1] != field + 1))
4848                         pr_err("Missing field from shadow_read_write_field %x\n",
4849                                field + 1);
4850
4851                 /*
4852                  * PML and the preemption timer can be emulated, but the
4853                  * processor cannot vmwrite to fields that don't exist
4854                  * on bare metal.
4855                  */
4856                 switch (field) {
4857                 case GUEST_PML_INDEX:
4858                         if (!cpu_has_vmx_pml())
4859                                 continue;
4860                         break;
4861                 case VMX_PREEMPTION_TIMER_VALUE:
4862                         if (!cpu_has_vmx_preemption_timer())
4863                                 continue;
4864                         break;
4865                 case GUEST_INTR_STATUS:
4866                         if (!cpu_has_vmx_apicv())
4867                                 continue;
4868                         break;
4869                 default:
4870                         break;
4871                 }
4872
4873                 clear_bit(field, vmx_vmwrite_bitmap);
4874                 clear_bit(field, vmx_vmread_bitmap);
4875 #ifdef CONFIG_X86_64
4876                 if (field & 1)
4877                         continue;
4878 #endif
4879                 if (j < i)
4880                         shadow_read_write_fields[j] = field;
4881                 j++;
4882         }
4883         max_shadow_read_write_fields = j;
4884 }
4885
4886 static __init int alloc_kvm_area(void)
4887 {
4888         int cpu;
4889
4890         for_each_possible_cpu(cpu) {
4891                 struct vmcs *vmcs;
4892
4893                 vmcs = alloc_vmcs_cpu(false, cpu);
4894                 if (!vmcs) {
4895                         free_kvm_area();
4896                         return -ENOMEM;
4897                 }
4898
4899                 /*
4900                  * When eVMCS is enabled, alloc_vmcs_cpu() sets
4901                  * vmcs->revision_id to KVM_EVMCS_VERSION instead of
4902                  * revision_id reported by MSR_IA32_VMX_BASIC.
4903                  *
4904                  * However, even though not explictly documented by
4905                  * TLFS, VMXArea passed as VMXON argument should
4906                  * still be marked with revision_id reported by
4907                  * physical CPU.
4908                  */
4909                 if (static_branch_unlikely(&enable_evmcs))
4910                         vmcs->hdr.revision_id = vmcs_config.revision_id;
4911
4912                 per_cpu(vmxarea, cpu) = vmcs;
4913         }
4914         return 0;
4915 }
4916
4917 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
4918                 struct kvm_segment *save)
4919 {
4920         if (!emulate_invalid_guest_state) {
4921                 /*
4922                  * CS and SS RPL should be equal during guest entry according
4923                  * to VMX spec, but in reality it is not always so. Since vcpu
4924                  * is in the middle of the transition from real mode to
4925                  * protected mode it is safe to assume that RPL 0 is a good
4926                  * default value.
4927                  */
4928                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4929                         save->selector &= ~SEGMENT_RPL_MASK;
4930                 save->dpl = save->selector & SEGMENT_RPL_MASK;
4931                 save->s = 1;
4932         }
4933         vmx_set_segment(vcpu, save, seg);
4934 }
4935
4936 static void enter_pmode(struct kvm_vcpu *vcpu)
4937 {
4938         unsigned long flags;
4939         struct vcpu_vmx *vmx = to_vmx(vcpu);
4940
4941         /*
4942          * Update real mode segment cache. It may be not up-to-date if sement
4943          * register was written while vcpu was in a guest mode.
4944          */
4945         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
4946         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
4947         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
4948         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
4949         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
4950         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
4951
4952         vmx->rmode.vm86_active = 0;
4953
4954         vmx_segment_cache_clear(vmx);
4955
4956         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
4957
4958         flags = vmcs_readl(GUEST_RFLAGS);
4959         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
4960         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
4961         vmcs_writel(GUEST_RFLAGS, flags);
4962
4963         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
4964                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
4965
4966         update_exception_bitmap(vcpu);
4967
4968         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
4969         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
4970         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
4971         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
4972         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
4973         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
4974 }
4975
4976 static void fix_rmode_seg(int seg, struct kvm_segment *save)
4977 {
4978         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4979         struct kvm_segment var = *save;
4980
4981         var.dpl = 0x3;
4982         if (seg == VCPU_SREG_CS)
4983                 var.type = 0x3;
4984
4985         if (!emulate_invalid_guest_state) {
4986                 var.selector = var.base >> 4;
4987                 var.base = var.base & 0xffff0;
4988                 var.limit = 0xffff;
4989                 var.g = 0;
4990                 var.db = 0;
4991                 var.present = 1;
4992                 var.s = 1;
4993                 var.l = 0;
4994                 var.unusable = 0;
4995                 var.type = 0x3;
4996                 var.avl = 0;
4997                 if (save->base & 0xf)
4998                         printk_once(KERN_WARNING "kvm: segment base is not "
4999                                         "paragraph aligned when entering "
5000                                         "protected mode (seg=%d)", seg);
5001         }
5002
5003         vmcs_write16(sf->selector, var.selector);
5004         vmcs_writel(sf->base, var.base);
5005         vmcs_write32(sf->limit, var.limit);
5006         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
5007 }
5008
5009 static void enter_rmode(struct kvm_vcpu *vcpu)
5010 {
5011         unsigned long flags;
5012         struct vcpu_vmx *vmx = to_vmx(vcpu);
5013         struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
5014
5015         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
5016         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
5017         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
5018         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
5019         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
5020         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
5021         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
5022
5023         vmx->rmode.vm86_active = 1;
5024
5025         /*
5026          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
5027          * vcpu. Warn the user that an update is overdue.
5028          */
5029         if (!kvm_vmx->tss_addr)
5030                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
5031                              "called before entering vcpu\n");
5032
5033         vmx_segment_cache_clear(vmx);
5034
5035         vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
5036         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
5037         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
5038
5039         flags = vmcs_readl(GUEST_RFLAGS);
5040         vmx->rmode.save_rflags = flags;
5041
5042         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
5043
5044         vmcs_writel(GUEST_RFLAGS, flags);
5045         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
5046         update_exception_bitmap(vcpu);
5047
5048         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
5049         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
5050         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
5051         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
5052         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
5053         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
5054
5055         kvm_mmu_reset_context(vcpu);
5056 }
5057
5058 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
5059 {
5060         struct vcpu_vmx *vmx = to_vmx(vcpu);
5061         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
5062
5063         if (!msr)
5064                 return;
5065
5066         vcpu->arch.efer = efer;
5067         if (efer & EFER_LMA) {
5068                 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5069                 msr->data = efer;
5070         } else {
5071                 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5072
5073                 msr->data = efer & ~EFER_LME;
5074         }
5075         setup_msrs(vmx);
5076 }
5077
5078 #ifdef CONFIG_X86_64
5079
5080 static void enter_lmode(struct kvm_vcpu *vcpu)
5081 {
5082         u32 guest_tr_ar;
5083
5084         vmx_segment_cache_clear(to_vmx(vcpu));
5085
5086         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
5087         if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
5088                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
5089                                      __func__);
5090                 vmcs_write32(GUEST_TR_AR_BYTES,
5091                              (guest_tr_ar & ~VMX_AR_TYPE_MASK)
5092                              | VMX_AR_TYPE_BUSY_64_TSS);
5093         }
5094         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
5095 }
5096
5097 static void exit_lmode(struct kvm_vcpu *vcpu)
5098 {
5099         vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5100         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
5101 }
5102
5103 #endif
5104
5105 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
5106                                 bool invalidate_gpa)
5107 {
5108         if (enable_ept && (invalidate_gpa || !enable_vpid)) {
5109                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
5110                         return;
5111                 ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
5112         } else {
5113                 vpid_sync_context(vpid);
5114         }
5115 }
5116
5117 static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5118 {
5119         __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
5120 }
5121
5122 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
5123 {
5124         int vpid = to_vmx(vcpu)->vpid;
5125
5126         if (!vpid_sync_vcpu_addr(vpid, addr))
5127                 vpid_sync_context(vpid);
5128
5129         /*
5130          * If VPIDs are not supported or enabled, then the above is a no-op.
5131          * But we don't really need a TLB flush in that case anyway, because
5132          * each VM entry/exit includes an implicit flush when VPID is 0.
5133          */
5134 }
5135
5136 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
5137 {
5138         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
5139
5140         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
5141         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
5142 }
5143
5144 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
5145 {
5146         if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
5147                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
5148         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5149 }
5150
5151 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
5152 {
5153         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
5154
5155         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
5156         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
5157 }
5158
5159 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
5160 {
5161         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5162
5163         if (!test_bit(VCPU_EXREG_PDPTR,
5164                       (unsigned long *)&vcpu->arch.regs_dirty))
5165                 return;
5166
5167         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
5168                 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
5169                 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
5170                 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
5171                 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
5172         }
5173 }
5174
5175 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
5176 {
5177         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5178
5179         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
5180                 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
5181                 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
5182                 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
5183                 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
5184         }
5185
5186         __set_bit(VCPU_EXREG_PDPTR,
5187                   (unsigned long *)&vcpu->arch.regs_avail);
5188         __set_bit(VCPU_EXREG_PDPTR,
5189                   (unsigned long *)&vcpu->arch.regs_dirty);
5190 }
5191
5192 static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5193 {
5194         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
5195         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
5196         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5197
5198         if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
5199                 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5200             nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5201                 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
5202
5203         return fixed_bits_valid(val, fixed0, fixed1);
5204 }
5205
5206 static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5207 {
5208         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
5209         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
5210
5211         return fixed_bits_valid(val, fixed0, fixed1);
5212 }
5213
5214 static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
5215 {
5216         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
5217         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
5218
5219         return fixed_bits_valid(val, fixed0, fixed1);
5220 }
5221
5222 /* No difference in the restrictions on guest and host CR4 in VMX operation. */
5223 #define nested_guest_cr4_valid  nested_cr4_valid
5224 #define nested_host_cr4_valid   nested_cr4_valid
5225
5226 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
5227
5228 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
5229                                         unsigned long cr0,
5230                                         struct kvm_vcpu *vcpu)
5231 {
5232         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
5233                 vmx_decache_cr3(vcpu);
5234         if (!(cr0 & X86_CR0_PG)) {
5235                 /* From paging/starting to nonpaging */
5236                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
5237                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
5238                              (CPU_BASED_CR3_LOAD_EXITING |
5239                               CPU_BASED_CR3_STORE_EXITING));
5240                 vcpu->arch.cr0 = cr0;
5241                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
5242         } else if (!is_paging(vcpu)) {
5243                 /* From nonpaging to paging */
5244                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
5245                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
5246                              ~(CPU_BASED_CR3_LOAD_EXITING |
5247                                CPU_BASED_CR3_STORE_EXITING));
5248                 vcpu->arch.cr0 = cr0;
5249                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
5250         }
5251
5252         if (!(cr0 & X86_CR0_WP))
5253                 *hw_cr0 &= ~X86_CR0_WP;
5254 }
5255
5256 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
5257 {
5258         struct vcpu_vmx *vmx = to_vmx(vcpu);
5259         unsigned long hw_cr0;
5260
5261         hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
5262         if (enable_unrestricted_guest)
5263                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
5264         else {
5265                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
5266
5267                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
5268                         enter_pmode(vcpu);
5269
5270                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
5271                         enter_rmode(vcpu);
5272         }
5273
5274 #ifdef CONFIG_X86_64
5275         if (vcpu->arch.efer & EFER_LME) {
5276                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
5277                         enter_lmode(vcpu);
5278                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
5279                         exit_lmode(vcpu);
5280         }
5281 #endif
5282
5283         if (enable_ept && !enable_unrestricted_guest)
5284                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
5285
5286         vmcs_writel(CR0_READ_SHADOW, cr0);
5287         vmcs_writel(GUEST_CR0, hw_cr0);
5288         vcpu->arch.cr0 = cr0;
5289
5290         /* depends on vcpu->arch.cr0 to be set to a new value */
5291         vmx->emulation_required = emulation_required(vcpu);
5292 }
5293
5294 static int get_ept_level(struct kvm_vcpu *vcpu)
5295 {
5296         if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
5297                 return 5;
5298         return 4;
5299 }
5300
5301 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
5302 {
5303         u64 eptp = VMX_EPTP_MT_WB;
5304
5305         eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
5306
5307         if (enable_ept_ad_bits &&
5308             (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
5309                 eptp |= VMX_EPTP_AD_ENABLE_BIT;
5310         eptp |= (root_hpa & PAGE_MASK);
5311
5312         return eptp;
5313 }
5314
5315 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
5316 {
5317         struct kvm *kvm = vcpu->kvm;
5318         unsigned long guest_cr3;
5319         u64 eptp;
5320
5321         guest_cr3 = cr3;
5322         if (enable_ept) {
5323                 eptp = construct_eptp(vcpu, cr3);
5324                 vmcs_write64(EPT_POINTER, eptp);
5325
5326                 if (kvm_x86_ops->tlb_remote_flush) {
5327                         spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
5328                         to_vmx(vcpu)->ept_pointer = eptp;
5329                         to_kvm_vmx(kvm)->ept_pointers_match
5330                                 = EPT_POINTERS_CHECK;
5331                         spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
5332                 }
5333
5334                 if (enable_unrestricted_guest || is_paging(vcpu) ||
5335                     is_guest_mode(vcpu))
5336                         guest_cr3 = kvm_read_cr3(vcpu);
5337                 else
5338                         guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
5339                 ept_load_pdptrs(vcpu);
5340         }
5341
5342         vmcs_writel(GUEST_CR3, guest_cr3);
5343 }
5344
5345 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
5346 {
5347         /*
5348          * Pass through host's Machine Check Enable value to hw_cr4, which
5349          * is in force while we are in guest mode.  Do not let guests control
5350          * this bit, even if host CR4.MCE == 0.
5351          */
5352         unsigned long hw_cr4;
5353
5354         hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
5355         if (enable_unrestricted_guest)
5356                 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
5357         else if (to_vmx(vcpu)->rmode.vm86_active)
5358                 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
5359         else
5360                 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
5361
5362         if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
5363                 if (cr4 & X86_CR4_UMIP) {
5364                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
5365                                 SECONDARY_EXEC_DESC);
5366                         hw_cr4 &= ~X86_CR4_UMIP;
5367                 } else if (!is_guest_mode(vcpu) ||
5368                         !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
5369                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
5370                                         SECONDARY_EXEC_DESC);
5371         }
5372
5373         if (cr4 & X86_CR4_VMXE) {
5374                 /*
5375                  * To use VMXON (and later other VMX instructions), a guest
5376                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
5377                  * So basically the check on whether to allow nested VMX
5378                  * is here.  We operate under the default treatment of SMM,
5379                  * so VMX cannot be enabled under SMM.
5380                  */
5381                 if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
5382                         return 1;
5383         }
5384
5385         if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
5386                 return 1;
5387
5388         vcpu->arch.cr4 = cr4;
5389
5390         if (!enable_unrestricted_guest) {
5391                 if (enable_ept) {
5392                         if (!is_paging(vcpu)) {
5393                                 hw_cr4 &= ~X86_CR4_PAE;
5394                                 hw_cr4 |= X86_CR4_PSE;
5395                         } else if (!(cr4 & X86_CR4_PAE)) {
5396                                 hw_cr4 &= ~X86_CR4_PAE;
5397                         }
5398                 }
5399
5400                 /*
5401                  * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
5402                  * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
5403                  * to be manually disabled when guest switches to non-paging
5404                  * mode.
5405                  *
5406                  * If !enable_unrestricted_guest, the CPU is always running
5407                  * with CR0.PG=1 and CR4 needs to be modified.
5408                  * If enable_unrestricted_guest, the CPU automatically
5409                  * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
5410                  */
5411                 if (!is_paging(vcpu))
5412                         hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
5413         }
5414
5415         vmcs_writel(CR4_READ_SHADOW, cr4);
5416         vmcs_writel(GUEST_CR4, hw_cr4);
5417         return 0;
5418 }
5419
5420 static void vmx_get_segment(struct kvm_vcpu *vcpu,
5421                             struct kvm_segment *var, int seg)
5422 {
5423         struct vcpu_vmx *vmx = to_vmx(vcpu);
5424         u32 ar;
5425
5426         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
5427                 *var = vmx->rmode.segs[seg];
5428                 if (seg == VCPU_SREG_TR
5429                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
5430                         return;
5431                 var->base = vmx_read_guest_seg_base(vmx, seg);
5432                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
5433                 return;
5434         }
5435         var->base = vmx_read_guest_seg_base(vmx, seg);
5436         var->limit = vmx_read_guest_seg_limit(vmx, seg);
5437         var->selector = vmx_read_guest_seg_selector(vmx, seg);
5438         ar = vmx_read_guest_seg_ar(vmx, seg);
5439         var->unusable = (ar >> 16) & 1;
5440         var->type = ar & 15;
5441         var->s = (ar >> 4) & 1;
5442         var->dpl = (ar >> 5) & 3;
5443         /*
5444          * Some userspaces do not preserve unusable property. Since usable
5445          * segment has to be present according to VMX spec we can use present
5446          * property to amend userspace bug by making unusable segment always
5447          * nonpresent. vmx_segment_access_rights() already marks nonpresent
5448          * segment as unusable.
5449          */
5450         var->present = !var->unusable;
5451         var->avl = (ar >> 12) & 1;
5452         var->l = (ar >> 13) & 1;
5453         var->db = (ar >> 14) & 1;
5454         var->g = (ar >> 15) & 1;
5455 }
5456
5457 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
5458 {
5459         struct kvm_segment s;
5460
5461         if (to_vmx(vcpu)->rmode.vm86_active) {
5462                 vmx_get_segment(vcpu, &s, seg);
5463                 return s.base;
5464         }
5465         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
5466 }
5467
5468 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
5469 {
5470         struct vcpu_vmx *vmx = to_vmx(vcpu);
5471
5472         if (unlikely(vmx->rmode.vm86_active))
5473                 return 0;
5474         else {
5475                 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
5476                 return VMX_AR_DPL(ar);
5477         }
5478 }
5479
5480 static u32 vmx_segment_access_rights(struct kvm_segment *var)
5481 {
5482         u32 ar;
5483
5484         if (var->unusable || !var->present)
5485                 ar = 1 << 16;
5486         else {
5487                 ar = var->type & 15;
5488                 ar |= (var->s & 1) << 4;
5489                 ar |= (var->dpl & 3) << 5;
5490                 ar |= (var->present & 1) << 7;
5491                 ar |= (var->avl & 1) << 12;
5492                 ar |= (var->l & 1) << 13;
5493                 ar |= (var->db & 1) << 14;
5494                 ar |= (var->g & 1) << 15;
5495         }
5496
5497         return ar;
5498 }
5499
5500 static void vmx_set_segment(struct kvm_vcpu *vcpu,
5501                             struct kvm_segment *var, int seg)
5502 {
5503         struct vcpu_vmx *vmx = to_vmx(vcpu);
5504         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
5505
5506         vmx_segment_cache_clear(vmx);
5507
5508         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
5509                 vmx->rmode.segs[seg] = *var;
5510                 if (seg == VCPU_SREG_TR)
5511                         vmcs_write16(sf->selector, var->selector);
5512                 else if (var->s)
5513                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
5514                 goto out;
5515         }
5516
5517         vmcs_writel(sf->base, var->base);
5518         vmcs_write32(sf->limit, var->limit);
5519         vmcs_write16(sf->selector, var->selector);
5520
5521         /*
5522          *   Fix the "Accessed" bit in AR field of segment registers for older
5523          * qemu binaries.
5524          *   IA32 arch specifies that at the time of processor reset the
5525          * "Accessed" bit in the AR field of segment registers is 1. And qemu
5526          * is setting it to 0 in the userland code. This causes invalid guest
5527          * state vmexit when "unrestricted guest" mode is turned on.
5528          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
5529          * tree. Newer qemu binaries with that qemu fix would not need this
5530          * kvm hack.
5531          */
5532         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
5533                 var->type |= 0x1; /* Accessed */
5534
5535         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
5536
5537 out:
5538         vmx->emulation_required = emulation_required(vcpu);
5539 }
5540
5541 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5542 {
5543         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
5544
5545         *db = (ar >> 14) & 1;
5546         *l = (ar >> 13) & 1;
5547 }
5548
5549 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5550 {
5551         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
5552         dt->address = vmcs_readl(GUEST_IDTR_BASE);
5553 }
5554
5555 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5556 {
5557         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
5558         vmcs_writel(GUEST_IDTR_BASE, dt->address);
5559 }
5560
5561 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5562 {
5563         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
5564         dt->address = vmcs_readl(GUEST_GDTR_BASE);
5565 }
5566
5567 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5568 {
5569         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
5570         vmcs_writel(GUEST_GDTR_BASE, dt->address);
5571 }
5572
5573 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
5574 {
5575         struct kvm_segment var;
5576         u32 ar;
5577
5578         vmx_get_segment(vcpu, &var, seg);
5579         var.dpl = 0x3;
5580         if (seg == VCPU_SREG_CS)
5581                 var.type = 0x3;
5582         ar = vmx_segment_access_rights(&var);
5583
5584         if (var.base != (var.selector << 4))
5585                 return false;
5586         if (var.limit != 0xffff)
5587                 return false;
5588         if (ar != 0xf3)
5589                 return false;
5590
5591         return true;
5592 }
5593
5594 static bool code_segment_valid(struct kvm_vcpu *vcpu)
5595 {
5596         struct kvm_segment cs;
5597         unsigned int cs_rpl;
5598
5599         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5600         cs_rpl = cs.selector & SEGMENT_RPL_MASK;
5601
5602         if (cs.unusable)
5603                 return false;
5604         if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
5605                 return false;
5606         if (!cs.s)
5607                 return false;
5608         if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
5609                 if (cs.dpl > cs_rpl)
5610                         return false;
5611         } else {
5612                 if (cs.dpl != cs_rpl)
5613                         return false;
5614         }
5615         if (!cs.present)
5616                 return false;
5617
5618         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
5619         return true;
5620 }
5621
5622 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
5623 {
5624         struct kvm_segment ss;
5625         unsigned int ss_rpl;
5626
5627         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
5628         ss_rpl = ss.selector & SEGMENT_RPL_MASK;
5629
5630         if (ss.unusable)
5631                 return true;
5632         if (ss.type != 3 && ss.type != 7)
5633                 return false;
5634         if (!ss.s)
5635                 return false;
5636         if (ss.dpl != ss_rpl) /* DPL != RPL */
5637                 return false;
5638         if (!ss.present)
5639                 return false;
5640
5641         return true;
5642 }
5643
5644 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
5645 {
5646         struct kvm_segment var;
5647         unsigned int rpl;
5648
5649         vmx_get_segment(vcpu, &var, seg);
5650         rpl = var.selector & SEGMENT_RPL_MASK;
5651
5652         if (var.unusable)
5653                 return true;
5654         if (!var.s)
5655                 return false;
5656         if (!var.present)
5657                 return false;
5658         if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
5659                 if (var.dpl < rpl) /* DPL < RPL */
5660                         return false;
5661         }
5662
5663         /* TODO: Add other members to kvm_segment_field to allow checking for other access
5664          * rights flags
5665          */
5666         return true;
5667 }
5668
5669 static bool tr_valid(struct kvm_vcpu *vcpu)
5670 {
5671         struct kvm_segment tr;
5672
5673         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
5674
5675         if (tr.unusable)
5676                 return false;
5677         if (tr.selector & SEGMENT_TI_MASK)      /* TI = 1 */
5678                 return false;
5679         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
5680                 return false;
5681         if (!tr.present)
5682                 return false;
5683
5684         return true;
5685 }
5686
5687 static bool ldtr_valid(struct kvm_vcpu *vcpu)
5688 {
5689         struct kvm_segment ldtr;
5690
5691         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
5692
5693         if (ldtr.unusable)
5694                 return true;
5695         if (ldtr.selector & SEGMENT_TI_MASK)    /* TI = 1 */
5696                 return false;
5697         if (ldtr.type != 2)
5698                 return false;
5699         if (!ldtr.present)
5700                 return false;
5701
5702         return true;
5703 }
5704
5705 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
5706 {
5707         struct kvm_segment cs, ss;
5708
5709         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5710         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
5711
5712         return ((cs.selector & SEGMENT_RPL_MASK) ==
5713                  (ss.selector & SEGMENT_RPL_MASK));
5714 }
5715
5716 /*
5717  * Check if guest state is valid. Returns true if valid, false if
5718  * not.
5719  * We assume that registers are always usable
5720  */
5721 static bool guest_state_valid(struct kvm_vcpu *vcpu)
5722 {
5723         if (enable_unrestricted_guest)
5724                 return true;
5725
5726         /* real mode guest state checks */
5727         if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
5728                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
5729                         return false;
5730                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
5731                         return false;
5732                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
5733                         return false;
5734                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
5735                         return false;
5736                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
5737                         return false;
5738                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
5739                         return false;
5740         } else {
5741         /* protected mode guest state checks */
5742                 if (!cs_ss_rpl_check(vcpu))
5743                         return false;
5744                 if (!code_segment_valid(vcpu))
5745                         return false;
5746                 if (!stack_segment_valid(vcpu))
5747                         return false;
5748                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
5749                         return false;
5750                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
5751                         return false;
5752                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
5753                         return false;
5754                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
5755                         return false;
5756                 if (!tr_valid(vcpu))
5757                         return false;
5758                 if (!ldtr_valid(vcpu))
5759                         return false;
5760         }
5761         /* TODO:
5762          * - Add checks on RIP
5763          * - Add checks on RFLAGS
5764          */
5765
5766         return true;
5767 }
5768
5769 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
5770 {
5771         return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
5772 }
5773
5774 static int init_rmode_tss(struct kvm *kvm)
5775 {
5776         gfn_t fn;
5777         u16 data = 0;
5778         int idx, r;
5779
5780         idx = srcu_read_lock(&kvm->srcu);
5781         fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
5782         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5783         if (r < 0)
5784                 goto out;
5785         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
5786         r = kvm_write_guest_page(kvm, fn++, &data,
5787                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
5788         if (r < 0)
5789                 goto out;
5790         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
5791         if (r < 0)
5792                 goto out;
5793         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5794         if (r < 0)
5795                 goto out;
5796         data = ~0;
5797         r = kvm_write_guest_page(kvm, fn, &data,
5798                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
5799                                  sizeof(u8));
5800 out:
5801         srcu_read_unlock(&kvm->srcu, idx);
5802         return r;
5803 }
5804
5805 static int init_rmode_identity_map(struct kvm *kvm)
5806 {
5807         struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
5808         int i, idx, r = 0;
5809         kvm_pfn_t identity_map_pfn;
5810         u32 tmp;
5811
5812         /* Protect kvm_vmx->ept_identity_pagetable_done. */
5813         mutex_lock(&kvm->slots_lock);
5814
5815         if (likely(kvm_vmx->ept_identity_pagetable_done))
5816                 goto out2;
5817
5818         if (!kvm_vmx->ept_identity_map_addr)
5819                 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
5820         identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
5821
5822         r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
5823                                     kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
5824         if (r < 0)
5825                 goto out2;
5826
5827         idx = srcu_read_lock(&kvm->srcu);
5828         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
5829         if (r < 0)
5830                 goto out;
5831         /* Set up identity-mapping pagetable for EPT in real mode */
5832         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
5833                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
5834                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
5835                 r = kvm_write_guest_page(kvm, identity_map_pfn,
5836                                 &tmp, i * sizeof(tmp), sizeof(tmp));
5837                 if (r < 0)
5838                         goto out;
5839         }
5840         kvm_vmx->ept_identity_pagetable_done = true;
5841
5842 out:
5843         srcu_read_unlock(&kvm->srcu, idx);
5844
5845 out2:
5846         mutex_unlock(&kvm->slots_lock);
5847         return r;
5848 }
5849
5850 static void seg_setup(int seg)
5851 {
5852         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
5853         unsigned int ar;
5854
5855         vmcs_write16(sf->selector, 0);
5856         vmcs_writel(sf->base, 0);
5857         vmcs_write32(sf->limit, 0xffff);
5858         ar = 0x93;
5859         if (seg == VCPU_SREG_CS)
5860                 ar |= 0x08; /* code segment */
5861
5862         vmcs_write32(sf->ar_bytes, ar);
5863 }
5864
5865 static int alloc_apic_access_page(struct kvm *kvm)
5866 {
5867         struct page *page;
5868         int r = 0;
5869
5870         mutex_lock(&kvm->slots_lock);
5871         if (kvm->arch.apic_access_page_done)
5872                 goto out;
5873         r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
5874                                     APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
5875         if (r)
5876                 goto out;
5877
5878         page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
5879         if (is_error_page(page)) {
5880                 r = -EFAULT;
5881                 goto out;
5882         }
5883
5884         /*
5885          * Do not pin the page in memory, so that memory hot-unplug
5886          * is able to migrate it.
5887          */
5888         put_page(page);
5889         kvm->arch.apic_access_page_done = true;
5890 out:
5891         mutex_unlock(&kvm->slots_lock);
5892         return r;
5893 }
5894
5895 static int allocate_vpid(void)
5896 {
5897         int vpid;
5898
5899         if (!enable_vpid)
5900                 return 0;
5901         spin_lock(&vmx_vpid_lock);
5902         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
5903         if (vpid < VMX_NR_VPIDS)
5904                 __set_bit(vpid, vmx_vpid_bitmap);
5905         else
5906                 vpid = 0;
5907         spin_unlock(&vmx_vpid_lock);
5908         return vpid;
5909 }
5910
5911 static void free_vpid(int vpid)
5912 {
5913         if (!enable_vpid || vpid == 0)
5914                 return;
5915         spin_lock(&vmx_vpid_lock);
5916         __clear_bit(vpid, vmx_vpid_bitmap);
5917         spin_unlock(&vmx_vpid_lock);
5918 }
5919
5920 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
5921                                                           u32 msr, int type)
5922 {
5923         int f = sizeof(unsigned long);
5924
5925         if (!cpu_has_vmx_msr_bitmap())
5926                 return;
5927
5928         if (static_branch_unlikely(&enable_evmcs))
5929                 evmcs_touch_msr_bitmap();
5930
5931         /*
5932          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5933          * have the write-low and read-high bitmap offsets the wrong way round.
5934          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5935          */
5936         if (msr <= 0x1fff) {
5937                 if (type & MSR_TYPE_R)
5938                         /* read-low */
5939                         __clear_bit(msr, msr_bitmap + 0x000 / f);
5940
5941                 if (type & MSR_TYPE_W)
5942                         /* write-low */
5943                         __clear_bit(msr, msr_bitmap + 0x800 / f);
5944
5945         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5946                 msr &= 0x1fff;
5947                 if (type & MSR_TYPE_R)
5948                         /* read-high */
5949                         __clear_bit(msr, msr_bitmap + 0x400 / f);
5950
5951                 if (type & MSR_TYPE_W)
5952                         /* write-high */
5953                         __clear_bit(msr, msr_bitmap + 0xc00 / f);
5954
5955         }
5956 }
5957
5958 static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
5959                                                          u32 msr, int type)
5960 {
5961         int f = sizeof(unsigned long);
5962
5963         if (!cpu_has_vmx_msr_bitmap())
5964                 return;
5965
5966         if (static_branch_unlikely(&enable_evmcs))
5967                 evmcs_touch_msr_bitmap();
5968
5969         /*
5970          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5971          * have the write-low and read-high bitmap offsets the wrong way round.
5972          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5973          */
5974         if (msr <= 0x1fff) {
5975                 if (type & MSR_TYPE_R)
5976                         /* read-low */
5977                         __set_bit(msr, msr_bitmap + 0x000 / f);
5978
5979                 if (type & MSR_TYPE_W)
5980                         /* write-low */
5981                         __set_bit(msr, msr_bitmap + 0x800 / f);
5982
5983         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5984                 msr &= 0x1fff;
5985                 if (type & MSR_TYPE_R)
5986                         /* read-high */
5987                         __set_bit(msr, msr_bitmap + 0x400 / f);
5988
5989                 if (type & MSR_TYPE_W)
5990                         /* write-high */
5991                         __set_bit(msr, msr_bitmap + 0xc00 / f);
5992
5993         }
5994 }
5995
5996 static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
5997                                                       u32 msr, int type, bool value)
5998 {
5999         if (value)
6000                 vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
6001         else
6002                 vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
6003 }
6004
6005 /*
6006  * If a msr is allowed by L0, we should check whether it is allowed by L1.
6007  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
6008  */
6009 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
6010                                                unsigned long *msr_bitmap_nested,
6011                                                u32 msr, int type)
6012 {
6013         int f = sizeof(unsigned long);
6014
6015         /*
6016          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
6017          * have the write-low and read-high bitmap offsets the wrong way round.
6018          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
6019          */
6020         if (msr <= 0x1fff) {
6021                 if (type & MSR_TYPE_R &&
6022                    !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
6023                         /* read-low */
6024                         __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
6025
6026                 if (type & MSR_TYPE_W &&
6027                    !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
6028                         /* write-low */
6029                         __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
6030
6031         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
6032                 msr &= 0x1fff;
6033                 if (type & MSR_TYPE_R &&
6034                    !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
6035                         /* read-high */
6036                         __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
6037
6038                 if (type & MSR_TYPE_W &&
6039                    !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
6040                         /* write-high */
6041                         __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
6042
6043         }
6044 }
6045
6046 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
6047 {
6048         u8 mode = 0;
6049
6050         if (cpu_has_secondary_exec_ctrls() &&
6051             (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
6052              SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
6053                 mode |= MSR_BITMAP_MODE_X2APIC;
6054                 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
6055                         mode |= MSR_BITMAP_MODE_X2APIC_APICV;
6056         }
6057
6058         return mode;
6059 }
6060
6061 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
6062
6063 static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
6064                                          u8 mode)
6065 {
6066         int msr;
6067
6068         for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
6069                 unsigned word = msr / BITS_PER_LONG;
6070                 msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
6071                 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
6072         }
6073
6074         if (mode & MSR_BITMAP_MODE_X2APIC) {
6075                 /*
6076                  * TPR reads and writes can be virtualized even if virtual interrupt
6077                  * delivery is not in use.
6078                  */
6079                 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
6080                 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
6081                         vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
6082                         vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
6083                         vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
6084                 }
6085         }
6086 }
6087
6088 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
6089 {
6090         struct vcpu_vmx *vmx = to_vmx(vcpu);
6091         unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
6092         u8 mode = vmx_msr_bitmap_mode(vcpu);
6093         u8 changed = mode ^ vmx->msr_bitmap_mode;
6094
6095         if (!changed)
6096                 return;
6097
6098         if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
6099                 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
6100
6101         vmx->msr_bitmap_mode = mode;
6102 }
6103
6104 static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
6105 {
6106         return enable_apicv;
6107 }
6108
6109 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
6110 {
6111         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6112         gfn_t gfn;
6113
6114         /*
6115          * Don't need to mark the APIC access page dirty; it is never
6116          * written to by the CPU during APIC virtualization.
6117          */
6118
6119         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
6120                 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
6121                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
6122         }
6123
6124         if (nested_cpu_has_posted_intr(vmcs12)) {
6125                 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
6126                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
6127         }
6128 }
6129
6130
6131 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
6132 {
6133         struct vcpu_vmx *vmx = to_vmx(vcpu);
6134         int max_irr;
6135         void *vapic_page;
6136         u16 status;
6137
6138         if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
6139                 return;
6140
6141         vmx->nested.pi_pending = false;
6142         if (!pi_test_and_clear_on(vmx->nested.pi_desc))
6143                 return;
6144
6145         max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
6146         if (max_irr != 256) {
6147                 vapic_page = kmap(vmx->nested.virtual_apic_page);
6148                 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
6149                         vapic_page, &max_irr);
6150                 kunmap(vmx->nested.virtual_apic_page);
6151
6152                 status = vmcs_read16(GUEST_INTR_STATUS);
6153                 if ((u8)max_irr > ((u8)status & 0xff)) {
6154                         status &= ~0xff;
6155                         status |= (u8)max_irr;
6156                         vmcs_write16(GUEST_INTR_STATUS, status);
6157                 }
6158         }
6159
6160         nested_mark_vmcs12_pages_dirty(vcpu);
6161 }
6162
6163 static u8 vmx_get_rvi(void)
6164 {
6165         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
6166 }
6167
6168 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6169 {
6170         struct vcpu_vmx *vmx = to_vmx(vcpu);
6171         void *vapic_page;
6172         u32 vppr;
6173         int rvi;
6174
6175         if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
6176                 !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
6177                 WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
6178                 return false;
6179
6180         rvi = vmx_get_rvi();
6181
6182         vapic_page = kmap(vmx->nested.virtual_apic_page);
6183         vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
6184         kunmap(vmx->nested.virtual_apic_page);
6185
6186         return ((rvi & 0xf0) > (vppr & 0xf0));
6187 }
6188
6189 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
6190                                                      bool nested)
6191 {
6192 #ifdef CONFIG_SMP
6193         int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
6194
6195         if (vcpu->mode == IN_GUEST_MODE) {
6196                 /*
6197                  * The vector of interrupt to be delivered to vcpu had
6198                  * been set in PIR before this function.
6199                  *
6200                  * Following cases will be reached in this block, and
6201                  * we always send a notification event in all cases as
6202                  * explained below.
6203                  *
6204                  * Case 1: vcpu keeps in non-root mode. Sending a
6205                  * notification event posts the interrupt to vcpu.
6206                  *
6207                  * Case 2: vcpu exits to root mode and is still
6208                  * runnable. PIR will be synced to vIRR before the
6209                  * next vcpu entry. Sending a notification event in
6210                  * this case has no effect, as vcpu is not in root
6211                  * mode.
6212                  *
6213                  * Case 3: vcpu exits to root mode and is blocked.
6214                  * vcpu_block() has already synced PIR to vIRR and
6215                  * never blocks vcpu if vIRR is not cleared. Therefore,
6216                  * a blocked vcpu here does not wait for any requested
6217                  * interrupts in PIR, and sending a notification event
6218                  * which has no effect is safe here.
6219                  */
6220
6221                 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
6222                 return true;
6223         }
6224 #endif
6225         return false;
6226 }
6227
6228 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
6229                                                 int vector)
6230 {
6231         struct vcpu_vmx *vmx = to_vmx(vcpu);
6232
6233         if (is_guest_mode(vcpu) &&
6234             vector == vmx->nested.posted_intr_nv) {
6235                 /*
6236                  * If a posted intr is not recognized by hardware,
6237                  * we will accomplish it in the next vmentry.
6238                  */
6239                 vmx->nested.pi_pending = true;
6240                 kvm_make_request(KVM_REQ_EVENT, vcpu);
6241                 /* the PIR and ON have been set by L1. */
6242                 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
6243                         kvm_vcpu_kick(vcpu);
6244                 return 0;
6245         }
6246         return -1;
6247 }
6248 /*
6249  * Send interrupt to vcpu via posted interrupt way.
6250  * 1. If target vcpu is running(non-root mode), send posted interrupt
6251  * notification to vcpu and hardware will sync PIR to vIRR atomically.
6252  * 2. If target vcpu isn't running(root mode), kick it to pick up the
6253  * interrupt from PIR in next vmentry.
6254  */
6255 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
6256 {
6257         struct vcpu_vmx *vmx = to_vmx(vcpu);
6258         int r;
6259
6260         r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
6261         if (!r)
6262                 return;
6263
6264         if (pi_test_and_set_pir(vector, &vmx->pi_desc))
6265                 return;
6266
6267         /* If a previous notification has sent the IPI, nothing to do.  */
6268         if (pi_test_and_set_on(&vmx->pi_desc))
6269                 return;
6270
6271         if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
6272                 kvm_vcpu_kick(vcpu);
6273 }
6274
6275 /*
6276  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
6277  * will not change in the lifetime of the guest.
6278  * Note that host-state that does change is set elsewhere. E.g., host-state
6279  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
6280  */
6281 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
6282 {
6283         u32 low32, high32;
6284         unsigned long tmpl;
6285         struct desc_ptr dt;
6286         unsigned long cr0, cr3, cr4;
6287
6288         cr0 = read_cr0();
6289         WARN_ON(cr0 & X86_CR0_TS);
6290         vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
6291
6292         /*
6293          * Save the most likely value for this task's CR3 in the VMCS.
6294          * We can't use __get_current_cr3_fast() because we're not atomic.
6295          */
6296         cr3 = __read_cr3();
6297         vmcs_writel(HOST_CR3, cr3);             /* 22.2.3  FIXME: shadow tables */
6298         vmx->loaded_vmcs->host_state.cr3 = cr3;
6299
6300         /* Save the most likely value for this task's CR4 in the VMCS. */
6301         cr4 = cr4_read_shadow();
6302         vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
6303         vmx->loaded_vmcs->host_state.cr4 = cr4;
6304
6305         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
6306 #ifdef CONFIG_X86_64
6307         /*
6308          * Load null selectors, so we can avoid reloading them in
6309          * vmx_prepare_switch_to_host(), in case userspace uses
6310          * the null selectors too (the expected case).
6311          */
6312         vmcs_write16(HOST_DS_SELECTOR, 0);
6313         vmcs_write16(HOST_ES_SELECTOR, 0);
6314 #else
6315         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6316         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6317 #endif
6318         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6319         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
6320
6321         store_idt(&dt);
6322         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
6323         vmx->host_idt_base = dt.address;
6324
6325         vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
6326
6327         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
6328         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
6329         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
6330         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
6331
6332         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
6333                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
6334                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
6335         }
6336 }
6337
6338 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
6339 {
6340         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
6341         if (enable_ept)
6342                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
6343         if (is_guest_mode(&vmx->vcpu))
6344                 vmx->vcpu.arch.cr4_guest_owned_bits &=
6345                         ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
6346         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
6347 }
6348
6349 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
6350 {
6351         u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
6352
6353         if (!kvm_vcpu_apicv_active(&vmx->vcpu))
6354                 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
6355
6356         if (!enable_vnmi)
6357                 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
6358
6359         /* Enable the preemption timer dynamically */
6360         pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
6361         return pin_based_exec_ctrl;
6362 }
6363
6364 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
6365 {
6366         struct vcpu_vmx *vmx = to_vmx(vcpu);
6367
6368         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
6369         if (cpu_has_secondary_exec_ctrls()) {
6370                 if (kvm_vcpu_apicv_active(vcpu))
6371                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
6372                                       SECONDARY_EXEC_APIC_REGISTER_VIRT |
6373                                       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6374                 else
6375                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
6376                                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
6377                                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6378         }
6379
6380         if (cpu_has_vmx_msr_bitmap())
6381                 vmx_update_msr_bitmap(vcpu);
6382 }
6383
6384 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
6385 {
6386         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
6387
6388         if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
6389                 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
6390
6391         if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
6392                 exec_control &= ~CPU_BASED_TPR_SHADOW;
6393 #ifdef CONFIG_X86_64
6394                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
6395                                 CPU_BASED_CR8_LOAD_EXITING;
6396 #endif
6397         }
6398         if (!enable_ept)
6399                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
6400                                 CPU_BASED_CR3_LOAD_EXITING  |
6401                                 CPU_BASED_INVLPG_EXITING;
6402         if (kvm_mwait_in_guest(vmx->vcpu.kvm))
6403                 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
6404                                 CPU_BASED_MONITOR_EXITING);
6405         if (kvm_hlt_in_guest(vmx->vcpu.kvm))
6406                 exec_control &= ~CPU_BASED_HLT_EXITING;
6407         return exec_control;
6408 }
6409
6410 static bool vmx_rdrand_supported(void)
6411 {
6412         return vmcs_config.cpu_based_2nd_exec_ctrl &
6413                 SECONDARY_EXEC_RDRAND_EXITING;
6414 }
6415
6416 static bool vmx_rdseed_supported(void)
6417 {
6418         return vmcs_config.cpu_based_2nd_exec_ctrl &
6419                 SECONDARY_EXEC_RDSEED_EXITING;
6420 }
6421
6422 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
6423 {
6424         struct kvm_vcpu *vcpu = &vmx->vcpu;
6425
6426         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
6427
6428         if (!cpu_need_virtualize_apic_accesses(vcpu))
6429                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6430         if (vmx->vpid == 0)
6431                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
6432         if (!enable_ept) {
6433                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
6434                 enable_unrestricted_guest = 0;
6435         }
6436         if (!enable_unrestricted_guest)
6437                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
6438         if (kvm_pause_in_guest(vmx->vcpu.kvm))
6439                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
6440         if (!kvm_vcpu_apicv_active(vcpu))
6441                 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
6442                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6443         exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6444
6445         /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
6446          * in vmx_set_cr4.  */
6447         exec_control &= ~SECONDARY_EXEC_DESC;
6448
6449         /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
6450            (handle_vmptrld).
6451            We can NOT enable shadow_vmcs here because we don't have yet
6452            a current VMCS12
6453         */
6454         exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
6455
6456         if (!enable_pml)
6457                 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
6458
6459         if (vmx_xsaves_supported()) {
6460                 /* Exposing XSAVES only when XSAVE is exposed */
6461                 bool xsaves_enabled =
6462                         guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
6463                         guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
6464
6465                 if (!xsaves_enabled)
6466                         exec_control &= ~SECONDARY_EXEC_XSAVES;
6467
6468                 if (nested) {
6469                         if (xsaves_enabled)
6470                                 vmx->nested.msrs.secondary_ctls_high |=
6471                                         SECONDARY_EXEC_XSAVES;
6472                         else
6473                                 vmx->nested.msrs.secondary_ctls_high &=
6474                                         ~SECONDARY_EXEC_XSAVES;
6475                 }
6476         }
6477
6478         if (vmx_rdtscp_supported()) {
6479                 bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
6480                 if (!rdtscp_enabled)
6481                         exec_control &= ~SECONDARY_EXEC_RDTSCP;
6482
6483                 if (nested) {
6484                         if (rdtscp_enabled)
6485                                 vmx->nested.msrs.secondary_ctls_high |=
6486                                         SECONDARY_EXEC_RDTSCP;
6487                         else
6488                                 vmx->nested.msrs.secondary_ctls_high &=
6489                                         ~SECONDARY_EXEC_RDTSCP;
6490                 }
6491         }
6492
6493         if (vmx_invpcid_supported()) {
6494                 /* Exposing INVPCID only when PCID is exposed */
6495                 bool invpcid_enabled =
6496                         guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
6497                         guest_cpuid_has(vcpu, X86_FEATURE_PCID);
6498
6499                 if (!invpcid_enabled) {
6500                         exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
6501                         guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
6502                 }
6503
6504                 if (nested) {
6505                         if (invpcid_enabled)
6506                                 vmx->nested.msrs.secondary_ctls_high |=
6507                                         SECONDARY_EXEC_ENABLE_INVPCID;
6508                         else
6509                                 vmx->nested.msrs.secondary_ctls_high &=
6510                                         ~SECONDARY_EXEC_ENABLE_INVPCID;
6511                 }
6512         }
6513
6514         if (vmx_rdrand_supported()) {
6515                 bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
6516                 if (rdrand_enabled)
6517                         exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
6518
6519                 if (nested) {
6520                         if (rdrand_enabled)
6521                                 vmx->nested.msrs.secondary_ctls_high |=
6522                                         SECONDARY_EXEC_RDRAND_EXITING;
6523                         else
6524                                 vmx->nested.msrs.secondary_ctls_high &=
6525                                         ~SECONDARY_EXEC_RDRAND_EXITING;
6526                 }
6527         }
6528
6529         if (vmx_rdseed_supported()) {
6530                 bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
6531                 if (rdseed_enabled)
6532                         exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
6533
6534                 if (nested) {
6535                         if (rdseed_enabled)
6536                                 vmx->nested.msrs.secondary_ctls_high |=
6537                                         SECONDARY_EXEC_RDSEED_EXITING;
6538                         else
6539                                 vmx->nested.msrs.secondary_ctls_high &=
6540                                         ~SECONDARY_EXEC_RDSEED_EXITING;
6541                 }
6542         }
6543
6544         vmx->secondary_exec_control = exec_control;
6545 }
6546
6547 static void ept_set_mmio_spte_mask(void)
6548 {
6549         /*
6550          * EPT Misconfigurations can be generated if the value of bits 2:0
6551          * of an EPT paging-structure entry is 110b (write/execute).
6552          */
6553         kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
6554                                    VMX_EPT_MISCONFIG_WX_VALUE);
6555 }
6556
6557 #define VMX_XSS_EXIT_BITMAP 0
6558 /*
6559  * Sets up the vmcs for emulated real mode.
6560  */
6561 static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
6562 {
6563         int i;
6564
6565         if (enable_shadow_vmcs) {
6566                 /*
6567                  * At vCPU creation, "VMWRITE to any supported field
6568                  * in the VMCS" is supported, so use the more
6569                  * permissive vmx_vmread_bitmap to specify both read
6570                  * and write permissions for the shadow VMCS.
6571                  */
6572                 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6573                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
6574         }
6575         if (cpu_has_vmx_msr_bitmap())
6576                 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
6577
6578         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
6579
6580         /* Control */
6581         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
6582         vmx->hv_deadline_tsc = -1;
6583
6584         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
6585
6586         if (cpu_has_secondary_exec_ctrls()) {
6587                 vmx_compute_secondary_exec_control(vmx);
6588                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6589                              vmx->secondary_exec_control);
6590         }
6591
6592         if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
6593                 vmcs_write64(EOI_EXIT_BITMAP0, 0);
6594                 vmcs_write64(EOI_EXIT_BITMAP1, 0);
6595                 vmcs_write64(EOI_EXIT_BITMAP2, 0);
6596                 vmcs_write64(EOI_EXIT_BITMAP3, 0);
6597
6598                 vmcs_write16(GUEST_INTR_STATUS, 0);
6599
6600                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
6601                 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
6602         }
6603
6604         if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
6605                 vmcs_write32(PLE_GAP, ple_gap);
6606                 vmx->ple_window = ple_window;
6607                 vmx->ple_window_dirty = true;
6608         }
6609
6610         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
6611         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
6612         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
6613
6614         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
6615         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
6616         vmx_set_constant_host_state(vmx);
6617         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
6618         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
6619
6620         if (cpu_has_vmx_vmfunc())
6621                 vmcs_write64(VM_FUNCTION_CONTROL, 0);
6622
6623         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
6624         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
6625         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
6626         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6627         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
6628
6629         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
6630                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
6631
6632         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
6633                 u32 index = vmx_msr_index[i];
6634                 u32 data_low, data_high;
6635                 int j = vmx->nmsrs;
6636
6637                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
6638                         continue;
6639                 if (wrmsr_safe(index, data_low, data_high) < 0)
6640                         continue;
6641                 vmx->guest_msrs[j].index = i;
6642                 vmx->guest_msrs[j].data = 0;
6643                 vmx->guest_msrs[j].mask = -1ull;
6644                 ++vmx->nmsrs;
6645         }
6646
6647         vmx->arch_capabilities = kvm_get_arch_capabilities();
6648
6649         vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
6650
6651         /* 22.2.1, 20.8.1 */
6652         vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
6653
6654         vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
6655         vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
6656
6657         set_cr4_guest_host_mask(vmx);
6658
6659         if (vmx_xsaves_supported())
6660                 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
6661
6662         if (enable_pml) {
6663                 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
6664                 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6665         }
6666
6667         if (cpu_has_vmx_encls_vmexit())
6668                 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
6669 }
6670
6671 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
6672 {
6673         struct vcpu_vmx *vmx = to_vmx(vcpu);
6674         struct msr_data apic_base_msr;
6675         u64 cr0;
6676
6677         vmx->rmode.vm86_active = 0;
6678         vmx->spec_ctrl = 0;
6679
6680         vcpu->arch.microcode_version = 0x100000000ULL;
6681         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
6682         kvm_set_cr8(vcpu, 0);
6683
6684         if (!init_event) {
6685                 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
6686                                      MSR_IA32_APICBASE_ENABLE;
6687                 if (kvm_vcpu_is_reset_bsp(vcpu))
6688                         apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
6689                 apic_base_msr.host_initiated = true;
6690                 kvm_set_apic_base(vcpu, &apic_base_msr);
6691         }
6692
6693         vmx_segment_cache_clear(vmx);
6694
6695         seg_setup(VCPU_SREG_CS);
6696         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
6697         vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
6698
6699         seg_setup(VCPU_SREG_DS);
6700         seg_setup(VCPU_SREG_ES);
6701         seg_setup(VCPU_SREG_FS);
6702         seg_setup(VCPU_SREG_GS);
6703         seg_setup(VCPU_SREG_SS);
6704
6705         vmcs_write16(GUEST_TR_SELECTOR, 0);
6706         vmcs_writel(GUEST_TR_BASE, 0);
6707         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
6708         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
6709
6710         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
6711         vmcs_writel(GUEST_LDTR_BASE, 0);
6712         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
6713         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
6714
6715         if (!init_event) {
6716                 vmcs_write32(GUEST_SYSENTER_CS, 0);
6717                 vmcs_writel(GUEST_SYSENTER_ESP, 0);
6718                 vmcs_writel(GUEST_SYSENTER_EIP, 0);
6719                 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
6720         }
6721
6722         kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
6723         kvm_rip_write(vcpu, 0xfff0);
6724
6725         vmcs_writel(GUEST_GDTR_BASE, 0);
6726         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
6727
6728         vmcs_writel(GUEST_IDTR_BASE, 0);
6729         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
6730
6731         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
6732         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
6733         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
6734         if (kvm_mpx_supported())
6735                 vmcs_write64(GUEST_BNDCFGS, 0);
6736
6737         setup_msrs(vmx);
6738
6739         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
6740
6741         if (cpu_has_vmx_tpr_shadow() && !init_event) {
6742                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
6743                 if (cpu_need_tpr_shadow(vcpu))
6744                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
6745                                      __pa(vcpu->arch.apic->regs));
6746                 vmcs_write32(TPR_THRESHOLD, 0);
6747         }
6748
6749         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6750
6751         if (vmx->vpid != 0)
6752                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
6753
6754         cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
6755         vmx->vcpu.arch.cr0 = cr0;
6756         vmx_set_cr0(vcpu, cr0); /* enter rmode */
6757         vmx_set_cr4(vcpu, 0);
6758         vmx_set_efer(vcpu, 0);
6759
6760         update_exception_bitmap(vcpu);
6761
6762         vpid_sync_context(vmx->vpid);
6763         if (init_event)
6764                 vmx_clear_hlt(vcpu);
6765 }
6766
6767 /*
6768  * In nested virtualization, check if L1 asked to exit on external interrupts.
6769  * For most existing hypervisors, this will always return true.
6770  */
6771 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
6772 {
6773         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
6774                 PIN_BASED_EXT_INTR_MASK;
6775 }
6776
6777 /*
6778  * In nested virtualization, check if L1 has set
6779  * VM_EXIT_ACK_INTR_ON_EXIT
6780  */
6781 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
6782 {
6783         return get_vmcs12(vcpu)->vm_exit_controls &
6784                 VM_EXIT_ACK_INTR_ON_EXIT;
6785 }
6786
6787 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
6788 {
6789         return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
6790 }
6791
6792 static void enable_irq_window(struct kvm_vcpu *vcpu)
6793 {
6794         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6795                       CPU_BASED_VIRTUAL_INTR_PENDING);
6796 }
6797
6798 static void enable_nmi_window(struct kvm_vcpu *vcpu)
6799 {
6800         if (!enable_vnmi ||
6801             vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
6802                 enable_irq_window(vcpu);
6803                 return;
6804         }
6805
6806         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6807                       CPU_BASED_VIRTUAL_NMI_PENDING);
6808 }
6809
6810 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
6811 {
6812         struct vcpu_vmx *vmx = to_vmx(vcpu);
6813         uint32_t intr;
6814         int irq = vcpu->arch.interrupt.nr;
6815
6816         trace_kvm_inj_virq(irq);
6817
6818         ++vcpu->stat.irq_injections;
6819         if (vmx->rmode.vm86_active) {
6820                 int inc_eip = 0;
6821                 if (vcpu->arch.interrupt.soft)
6822                         inc_eip = vcpu->arch.event_exit_inst_len;
6823                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
6824                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6825                 return;
6826         }
6827         intr = irq | INTR_INFO_VALID_MASK;
6828         if (vcpu->arch.interrupt.soft) {
6829                 intr |= INTR_TYPE_SOFT_INTR;
6830                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
6831                              vmx->vcpu.arch.event_exit_inst_len);
6832         } else
6833                 intr |= INTR_TYPE_EXT_INTR;
6834         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
6835
6836         vmx_clear_hlt(vcpu);
6837 }
6838
6839 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
6840 {
6841         struct vcpu_vmx *vmx = to_vmx(vcpu);
6842
6843         if (!enable_vnmi) {
6844                 /*
6845                  * Tracking the NMI-blocked state in software is built upon
6846                  * finding the next open IRQ window. This, in turn, depends on
6847                  * well-behaving guests: They have to keep IRQs disabled at
6848                  * least as long as the NMI handler runs. Otherwise we may
6849                  * cause NMI nesting, maybe breaking the guest. But as this is
6850                  * highly unlikely, we can live with the residual risk.
6851                  */
6852                 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
6853                 vmx->loaded_vmcs->vnmi_blocked_time = 0;
6854         }
6855
6856         ++vcpu->stat.nmi_injections;
6857         vmx->loaded_vmcs->nmi_known_unmasked = false;
6858
6859         if (vmx->rmode.vm86_active) {
6860                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
6861                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6862                 return;
6863         }
6864
6865         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
6866                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
6867
6868         vmx_clear_hlt(vcpu);
6869 }
6870
6871 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
6872 {
6873         struct vcpu_vmx *vmx = to_vmx(vcpu);
6874         bool masked;
6875
6876         if (!enable_vnmi)
6877                 return vmx->loaded_vmcs->soft_vnmi_blocked;
6878         if (vmx->loaded_vmcs->nmi_known_unmasked)
6879                 return false;
6880         masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
6881         vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6882         return masked;
6883 }
6884
6885 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
6886 {
6887         struct vcpu_vmx *vmx = to_vmx(vcpu);
6888
6889         if (!enable_vnmi) {
6890                 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
6891                         vmx->loaded_vmcs->soft_vnmi_blocked = masked;
6892                         vmx->loaded_vmcs->vnmi_blocked_time = 0;
6893                 }
6894         } else {
6895                 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6896                 if (masked)
6897                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6898                                       GUEST_INTR_STATE_NMI);
6899                 else
6900                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
6901                                         GUEST_INTR_STATE_NMI);
6902         }
6903 }
6904
6905 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
6906 {
6907         if (to_vmx(vcpu)->nested.nested_run_pending)
6908                 return 0;
6909
6910         if (!enable_vnmi &&
6911             to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
6912                 return 0;
6913
6914         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6915                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
6916                    | GUEST_INTR_STATE_NMI));
6917 }
6918
6919 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
6920 {
6921         return (!to_vmx(vcpu)->nested.nested_run_pending &&
6922                 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
6923                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6924                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
6925 }
6926
6927 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
6928 {
6929         int ret;
6930
6931         if (enable_unrestricted_guest)
6932                 return 0;
6933
6934         ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
6935                                     PAGE_SIZE * 3);
6936         if (ret)
6937                 return ret;
6938         to_kvm_vmx(kvm)->tss_addr = addr;
6939         return init_rmode_tss(kvm);
6940 }
6941
6942 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
6943 {
6944         to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
6945         return 0;
6946 }
6947
6948 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
6949 {
6950         switch (vec) {
6951         case BP_VECTOR:
6952                 /*
6953                  * Update instruction length as we may reinject the exception
6954                  * from user space while in guest debugging mode.
6955                  */
6956                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
6957                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
6958                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
6959                         return false;
6960                 /* fall through */
6961         case DB_VECTOR:
6962                 if (vcpu->guest_debug &
6963                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6964                         return false;
6965                 /* fall through */
6966         case DE_VECTOR:
6967         case OF_VECTOR:
6968         case BR_VECTOR:
6969         case UD_VECTOR:
6970         case DF_VECTOR:
6971         case SS_VECTOR:
6972         case GP_VECTOR:
6973         case MF_VECTOR:
6974                 return true;
6975         break;
6976         }
6977         return false;
6978 }
6979
6980 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
6981                                   int vec, u32 err_code)
6982 {
6983         /*
6984          * Instruction with address size override prefix opcode 0x67
6985          * Cause the #SS fault with 0 error code in VM86 mode.
6986          */
6987         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
6988                 if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
6989                         if (vcpu->arch.halt_request) {
6990                                 vcpu->arch.halt_request = 0;
6991                                 return kvm_vcpu_halt(vcpu);
6992                         }
6993                         return 1;
6994                 }
6995                 return 0;
6996         }
6997
6998         /*
6999          * Forward all other exceptions that are valid in real mode.
7000          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
7001          *        the required debugging infrastructure rework.
7002          */
7003         kvm_queue_exception(vcpu, vec);
7004         return 1;
7005 }
7006
7007 /*
7008  * Trigger machine check on the host. We assume all the MSRs are already set up
7009  * by the CPU and that we still run on the same CPU as the MCE occurred on.
7010  * We pass a fake environment to the machine check handler because we want
7011  * the guest to be always treated like user space, no matter what context
7012  * it used internally.
7013  */
7014 static void kvm_machine_check(void)
7015 {
7016 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
7017         struct pt_regs regs = {
7018                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
7019                 .flags = X86_EFLAGS_IF,
7020         };
7021
7022         do_machine_check(&regs, 0);
7023 #endif
7024 }
7025
7026 static int handle_machine_check(struct kvm_vcpu *vcpu)
7027 {
7028         /* already handled by vcpu_run */
7029         return 1;
7030 }
7031
7032 static int handle_exception(struct kvm_vcpu *vcpu)
7033 {
7034         struct vcpu_vmx *vmx = to_vmx(vcpu);
7035         struct kvm_run *kvm_run = vcpu->run;
7036         u32 intr_info, ex_no, error_code;
7037         unsigned long cr2, rip, dr6;
7038         u32 vect_info;
7039         enum emulation_result er;
7040
7041         vect_info = vmx->idt_vectoring_info;
7042         intr_info = vmx->exit_intr_info;
7043
7044         if (is_machine_check(intr_info))
7045                 return handle_machine_check(vcpu);
7046
7047         if (is_nmi(intr_info))
7048                 return 1;  /* already handled by vmx_vcpu_run() */
7049
7050         if (is_invalid_opcode(intr_info))
7051                 return handle_ud(vcpu);
7052
7053         error_code = 0;
7054         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
7055                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
7056
7057         if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
7058                 WARN_ON_ONCE(!enable_vmware_backdoor);
7059                 er = kvm_emulate_instruction(vcpu,
7060                         EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
7061                 if (er == EMULATE_USER_EXIT)
7062                         return 0;
7063                 else if (er != EMULATE_DONE)
7064                         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
7065                 return 1;
7066         }
7067
7068         /*
7069          * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
7070          * MMIO, it is better to report an internal error.
7071          * See the comments in vmx_handle_exit.
7072          */
7073         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
7074             !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
7075                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7076                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
7077                 vcpu->run->internal.ndata = 3;
7078                 vcpu->run->internal.data[0] = vect_info;
7079                 vcpu->run->internal.data[1] = intr_info;
7080                 vcpu->run->internal.data[2] = error_code;
7081                 return 0;
7082         }
7083
7084         if (is_page_fault(intr_info)) {
7085                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
7086                 /* EPT won't cause page fault directly */
7087                 WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
7088                 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
7089         }
7090
7091         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
7092
7093         if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
7094                 return handle_rmode_exception(vcpu, ex_no, error_code);
7095
7096         switch (ex_no) {
7097         case AC_VECTOR:
7098                 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
7099                 return 1;
7100         case DB_VECTOR:
7101                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
7102                 if (!(vcpu->guest_debug &
7103                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
7104                         vcpu->arch.dr6 &= ~15;
7105                         vcpu->arch.dr6 |= dr6 | DR6_RTM;
7106                         if (is_icebp(intr_info))
7107                                 skip_emulated_instruction(vcpu);
7108
7109                         kvm_queue_exception(vcpu, DB_VECTOR);
7110                         return 1;
7111                 }
7112                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
7113                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
7114                 /* fall through */
7115         case BP_VECTOR:
7116                 /*
7117                  * Update instruction length as we may reinject #BP from
7118                  * user space while in guest debugging mode. Reading it for
7119                  * #DB as well causes no harm, it is not used in that case.
7120                  */
7121                 vmx->vcpu.arch.event_exit_inst_len =
7122                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
7123                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
7124                 rip = kvm_rip_read(vcpu);
7125                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
7126                 kvm_run->debug.arch.exception = ex_no;
7127                 break;
7128         default:
7129                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
7130                 kvm_run->ex.exception = ex_no;
7131                 kvm_run->ex.error_code = error_code;
7132                 break;
7133         }
7134         return 0;
7135 }
7136
7137 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
7138 {
7139         ++vcpu->stat.irq_exits;
7140         return 1;
7141 }
7142
7143 static int handle_triple_fault(struct kvm_vcpu *vcpu)
7144 {
7145         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
7146         vcpu->mmio_needed = 0;
7147         return 0;
7148 }
7149
7150 static int handle_io(struct kvm_vcpu *vcpu)
7151 {
7152         unsigned long exit_qualification;
7153         int size, in, string;
7154         unsigned port;
7155
7156         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7157         string = (exit_qualification & 16) != 0;
7158
7159         ++vcpu->stat.io_exits;
7160
7161         if (string)
7162                 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7163
7164         port = exit_qualification >> 16;
7165         size = (exit_qualification & 7) + 1;
7166         in = (exit_qualification & 8) != 0;
7167
7168         return kvm_fast_pio(vcpu, size, port, in);
7169 }
7170
7171 static void
7172 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
7173 {
7174         /*
7175          * Patch in the VMCALL instruction:
7176          */
7177         hypercall[0] = 0x0f;
7178         hypercall[1] = 0x01;
7179         hypercall[2] = 0xc1;
7180 }
7181
7182 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
7183 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
7184 {
7185         if (is_guest_mode(vcpu)) {
7186                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7187                 unsigned long orig_val = val;
7188
7189                 /*
7190                  * We get here when L2 changed cr0 in a way that did not change
7191                  * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
7192                  * but did change L0 shadowed bits. So we first calculate the
7193                  * effective cr0 value that L1 would like to write into the
7194                  * hardware. It consists of the L2-owned bits from the new
7195                  * value combined with the L1-owned bits from L1's guest_cr0.
7196                  */
7197                 val = (val & ~vmcs12->cr0_guest_host_mask) |
7198                         (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
7199
7200                 if (!nested_guest_cr0_valid(vcpu, val))
7201                         return 1;
7202
7203                 if (kvm_set_cr0(vcpu, val))
7204                         return 1;
7205                 vmcs_writel(CR0_READ_SHADOW, orig_val);
7206                 return 0;
7207         } else {
7208                 if (to_vmx(vcpu)->nested.vmxon &&
7209                     !nested_host_cr0_valid(vcpu, val))
7210                         return 1;
7211
7212                 return kvm_set_cr0(vcpu, val);
7213         }
7214 }
7215
7216 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
7217 {
7218         if (is_guest_mode(vcpu)) {
7219                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7220                 unsigned long orig_val = val;
7221
7222                 /* analogously to handle_set_cr0 */
7223                 val = (val & ~vmcs12->cr4_guest_host_mask) |
7224                         (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
7225                 if (kvm_set_cr4(vcpu, val))
7226                         return 1;
7227                 vmcs_writel(CR4_READ_SHADOW, orig_val);
7228                 return 0;
7229         } else
7230                 return kvm_set_cr4(vcpu, val);
7231 }
7232
7233 static int handle_desc(struct kvm_vcpu *vcpu)
7234 {
7235         WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
7236         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7237 }
7238
7239 static int handle_cr(struct kvm_vcpu *vcpu)
7240 {
7241         unsigned long exit_qualification, val;
7242         int cr;
7243         int reg;
7244         int err;
7245         int ret;
7246
7247         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7248         cr = exit_qualification & 15;
7249         reg = (exit_qualification >> 8) & 15;
7250         switch ((exit_qualification >> 4) & 3) {
7251         case 0: /* mov to cr */
7252                 val = kvm_register_readl(vcpu, reg);
7253                 trace_kvm_cr_write(cr, val);
7254                 switch (cr) {
7255                 case 0:
7256                         err = handle_set_cr0(vcpu, val);
7257                         return kvm_complete_insn_gp(vcpu, err);
7258                 case 3:
7259                         WARN_ON_ONCE(enable_unrestricted_guest);
7260                         err = kvm_set_cr3(vcpu, val);
7261                         return kvm_complete_insn_gp(vcpu, err);
7262                 case 4:
7263                         err = handle_set_cr4(vcpu, val);
7264                         return kvm_complete_insn_gp(vcpu, err);
7265                 case 8: {
7266                                 u8 cr8_prev = kvm_get_cr8(vcpu);
7267                                 u8 cr8 = (u8)val;
7268                                 err = kvm_set_cr8(vcpu, cr8);
7269                                 ret = kvm_complete_insn_gp(vcpu, err);
7270                                 if (lapic_in_kernel(vcpu))
7271                                         return ret;
7272                                 if (cr8_prev <= cr8)
7273                                         return ret;
7274                                 /*
7275                                  * TODO: we might be squashing a
7276                                  * KVM_GUESTDBG_SINGLESTEP-triggered
7277                                  * KVM_EXIT_DEBUG here.
7278                                  */
7279                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
7280                                 return 0;
7281                         }
7282                 }
7283                 break;
7284         case 2: /* clts */
7285                 WARN_ONCE(1, "Guest should always own CR0.TS");
7286                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
7287                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
7288                 return kvm_skip_emulated_instruction(vcpu);
7289         case 1: /*mov from cr*/
7290                 switch (cr) {
7291                 case 3:
7292                         WARN_ON_ONCE(enable_unrestricted_guest);
7293                         val = kvm_read_cr3(vcpu);
7294                         kvm_register_write(vcpu, reg, val);
7295                         trace_kvm_cr_read(cr, val);
7296                         return kvm_skip_emulated_instruction(vcpu);
7297                 case 8:
7298                         val = kvm_get_cr8(vcpu);
7299                         kvm_register_write(vcpu, reg, val);
7300                         trace_kvm_cr_read(cr, val);
7301                         return kvm_skip_emulated_instruction(vcpu);
7302                 }
7303                 break;
7304         case 3: /* lmsw */
7305                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
7306                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
7307                 kvm_lmsw(vcpu, val);
7308
7309                 return kvm_skip_emulated_instruction(vcpu);
7310         default:
7311                 break;
7312         }
7313         vcpu->run->exit_reason = 0;
7314         vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
7315                (int)(exit_qualification >> 4) & 3, cr);
7316         return 0;
7317 }
7318
7319 static int handle_dr(struct kvm_vcpu *vcpu)
7320 {
7321         unsigned long exit_qualification;
7322         int dr, dr7, reg;
7323
7324         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7325         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
7326
7327         /* First, if DR does not exist, trigger UD */
7328         if (!kvm_require_dr(vcpu, dr))
7329                 return 1;
7330
7331         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
7332         if (!kvm_require_cpl(vcpu, 0))
7333                 return 1;
7334         dr7 = vmcs_readl(GUEST_DR7);
7335         if (dr7 & DR7_GD) {
7336                 /*
7337                  * As the vm-exit takes precedence over the debug trap, we
7338                  * need to emulate the latter, either for the host or the
7339                  * guest debugging itself.
7340                  */
7341                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
7342                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
7343                         vcpu->run->debug.arch.dr7 = dr7;
7344                         vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
7345                         vcpu->run->debug.arch.exception = DB_VECTOR;
7346                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
7347                         return 0;
7348                 } else {
7349                         vcpu->arch.dr6 &= ~15;
7350                         vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
7351                         kvm_queue_exception(vcpu, DB_VECTOR);
7352                         return 1;
7353                 }
7354         }
7355
7356         if (vcpu->guest_debug == 0) {
7357                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7358                                 CPU_BASED_MOV_DR_EXITING);
7359
7360                 /*
7361                  * No more DR vmexits; force a reload of the debug registers
7362                  * and reenter on this instruction.  The next vmexit will
7363                  * retrieve the full state of the debug registers.
7364                  */
7365                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
7366                 return 1;
7367         }
7368
7369         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
7370         if (exit_qualification & TYPE_MOV_FROM_DR) {
7371                 unsigned long val;
7372
7373                 if (kvm_get_dr(vcpu, dr, &val))
7374                         return 1;
7375                 kvm_register_write(vcpu, reg, val);
7376         } else
7377                 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
7378                         return 1;
7379
7380         return kvm_skip_emulated_instruction(vcpu);
7381 }
7382
7383 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
7384 {
7385         return vcpu->arch.dr6;
7386 }
7387
7388 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
7389 {
7390 }
7391
7392 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
7393 {
7394         get_debugreg(vcpu->arch.db[0], 0);
7395         get_debugreg(vcpu->arch.db[1], 1);
7396         get_debugreg(vcpu->arch.db[2], 2);
7397         get_debugreg(vcpu->arch.db[3], 3);
7398         get_debugreg(vcpu->arch.dr6, 6);
7399         vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
7400
7401         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
7402         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
7403 }
7404
7405 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
7406 {
7407         vmcs_writel(GUEST_DR7, val);
7408 }
7409
7410 static int handle_cpuid(struct kvm_vcpu *vcpu)
7411 {
7412         return kvm_emulate_cpuid(vcpu);
7413 }
7414
7415 static int handle_rdmsr(struct kvm_vcpu *vcpu)
7416 {
7417         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
7418         struct msr_data msr_info;
7419
7420         msr_info.index = ecx;
7421         msr_info.host_initiated = false;
7422         if (vmx_get_msr(vcpu, &msr_info)) {
7423                 trace_kvm_msr_read_ex(ecx);
7424                 kvm_inject_gp(vcpu, 0);
7425                 return 1;
7426         }
7427
7428         trace_kvm_msr_read(ecx, msr_info.data);
7429
7430         /* FIXME: handling of bits 32:63 of rax, rdx */
7431         vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
7432         vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
7433         return kvm_skip_emulated_instruction(vcpu);
7434 }
7435
7436 static int handle_wrmsr(struct kvm_vcpu *vcpu)
7437 {
7438         struct msr_data msr;
7439         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
7440         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
7441                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
7442
7443         msr.data = data;
7444         msr.index = ecx;
7445         msr.host_initiated = false;
7446         if (kvm_set_msr(vcpu, &msr) != 0) {
7447                 trace_kvm_msr_write_ex(ecx, data);
7448                 kvm_inject_gp(vcpu, 0);
7449                 return 1;
7450         }
7451
7452         trace_kvm_msr_write(ecx, data);
7453         return kvm_skip_emulated_instruction(vcpu);
7454 }
7455
7456 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
7457 {
7458         kvm_apic_update_ppr(vcpu);
7459         return 1;
7460 }
7461
7462 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
7463 {
7464         vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7465                         CPU_BASED_VIRTUAL_INTR_PENDING);
7466
7467         kvm_make_request(KVM_REQ_EVENT, vcpu);
7468
7469         ++vcpu->stat.irq_window_exits;
7470         return 1;
7471 }
7472
7473 static int handle_halt(struct kvm_vcpu *vcpu)
7474 {
7475         return kvm_emulate_halt(vcpu);
7476 }
7477
7478 static int handle_vmcall(struct kvm_vcpu *vcpu)
7479 {
7480         return kvm_emulate_hypercall(vcpu);
7481 }
7482
7483 static int handle_invd(struct kvm_vcpu *vcpu)
7484 {
7485         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7486 }
7487
7488 static int handle_invlpg(struct kvm_vcpu *vcpu)
7489 {
7490         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7491
7492         kvm_mmu_invlpg(vcpu, exit_qualification);
7493         return kvm_skip_emulated_instruction(vcpu);
7494 }
7495
7496 static int handle_rdpmc(struct kvm_vcpu *vcpu)
7497 {
7498         int err;
7499
7500         err = kvm_rdpmc(vcpu);
7501         return kvm_complete_insn_gp(vcpu, err);
7502 }
7503
7504 static int handle_wbinvd(struct kvm_vcpu *vcpu)
7505 {
7506         return kvm_emulate_wbinvd(vcpu);
7507 }
7508
7509 static int handle_xsetbv(struct kvm_vcpu *vcpu)
7510 {
7511         u64 new_bv = kvm_read_edx_eax(vcpu);
7512         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
7513
7514         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
7515                 return kvm_skip_emulated_instruction(vcpu);
7516         return 1;
7517 }
7518
7519 static int handle_xsaves(struct kvm_vcpu *vcpu)
7520 {
7521         kvm_skip_emulated_instruction(vcpu);
7522         WARN(1, "this should never happen\n");
7523         return 1;
7524 }
7525
7526 static int handle_xrstors(struct kvm_vcpu *vcpu)
7527 {
7528         kvm_skip_emulated_instruction(vcpu);
7529         WARN(1, "this should never happen\n");
7530         return 1;
7531 }
7532
7533 static int handle_apic_access(struct kvm_vcpu *vcpu)
7534 {
7535         if (likely(fasteoi)) {
7536                 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7537                 int access_type, offset;
7538
7539                 access_type = exit_qualification & APIC_ACCESS_TYPE;
7540                 offset = exit_qualification & APIC_ACCESS_OFFSET;
7541                 /*
7542                  * Sane guest uses MOV to write EOI, with written value
7543                  * not cared. So make a short-circuit here by avoiding
7544                  * heavy instruction emulation.
7545                  */
7546                 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
7547                     (offset == APIC_EOI)) {
7548                         kvm_lapic_set_eoi(vcpu);
7549                         return kvm_skip_emulated_instruction(vcpu);
7550                 }
7551         }
7552         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7553 }
7554
7555 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
7556 {
7557         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7558         int vector = exit_qualification & 0xff;
7559
7560         /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
7561         kvm_apic_set_eoi_accelerated(vcpu, vector);
7562         return 1;
7563 }
7564
7565 static int handle_apic_write(struct kvm_vcpu *vcpu)
7566 {
7567         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7568         u32 offset = exit_qualification & 0xfff;
7569
7570         /* APIC-write VM exit is trap-like and thus no need to adjust IP */
7571         kvm_apic_write_nodecode(vcpu, offset);
7572         return 1;
7573 }
7574
7575 static int handle_task_switch(struct kvm_vcpu *vcpu)
7576 {
7577         struct vcpu_vmx *vmx = to_vmx(vcpu);
7578         unsigned long exit_qualification;
7579         bool has_error_code = false;
7580         u32 error_code = 0;
7581         u16 tss_selector;
7582         int reason, type, idt_v, idt_index;
7583
7584         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
7585         idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
7586         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
7587
7588         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7589
7590         reason = (u32)exit_qualification >> 30;
7591         if (reason == TASK_SWITCH_GATE && idt_v) {
7592                 switch (type) {
7593                 case INTR_TYPE_NMI_INTR:
7594                         vcpu->arch.nmi_injected = false;
7595                         vmx_set_nmi_mask(vcpu, true);
7596                         break;
7597                 case INTR_TYPE_EXT_INTR:
7598                 case INTR_TYPE_SOFT_INTR:
7599                         kvm_clear_interrupt_queue(vcpu);
7600                         break;
7601                 case INTR_TYPE_HARD_EXCEPTION:
7602                         if (vmx->idt_vectoring_info &
7603                             VECTORING_INFO_DELIVER_CODE_MASK) {
7604                                 has_error_code = true;
7605                                 error_code =
7606                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
7607                         }
7608                         /* fall through */
7609                 case INTR_TYPE_SOFT_EXCEPTION:
7610                         kvm_clear_exception_queue(vcpu);
7611                         break;
7612                 default:
7613                         break;
7614                 }
7615         }
7616         tss_selector = exit_qualification;
7617
7618         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
7619                        type != INTR_TYPE_EXT_INTR &&
7620                        type != INTR_TYPE_NMI_INTR))
7621                 skip_emulated_instruction(vcpu);
7622
7623         if (kvm_task_switch(vcpu, tss_selector,
7624                             type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
7625                             has_error_code, error_code) == EMULATE_FAIL) {
7626                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7627                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7628                 vcpu->run->internal.ndata = 0;
7629                 return 0;
7630         }
7631
7632         /*
7633          * TODO: What about debug traps on tss switch?
7634          *       Are we supposed to inject them and update dr6?
7635          */
7636
7637         return 1;
7638 }
7639
7640 static int handle_ept_violation(struct kvm_vcpu *vcpu)
7641 {
7642         unsigned long exit_qualification;
7643         gpa_t gpa;
7644         u64 error_code;
7645
7646         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7647
7648         /*
7649          * EPT violation happened while executing iret from NMI,
7650          * "blocked by NMI" bit has to be set before next VM entry.
7651          * There are errata that may cause this bit to not be set:
7652          * AAK134, BY25.
7653          */
7654         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
7655                         enable_vnmi &&
7656                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
7657                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
7658
7659         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
7660         trace_kvm_page_fault(gpa, exit_qualification);
7661
7662         /* Is it a read fault? */
7663         error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
7664                      ? PFERR_USER_MASK : 0;
7665         /* Is it a write fault? */
7666         error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
7667                       ? PFERR_WRITE_MASK : 0;
7668         /* Is it a fetch fault? */
7669         error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
7670                       ? PFERR_FETCH_MASK : 0;
7671         /* ept page table entry is present? */
7672         error_code |= (exit_qualification &
7673                        (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
7674                         EPT_VIOLATION_EXECUTABLE))
7675                       ? PFERR_PRESENT_MASK : 0;
7676
7677         error_code |= (exit_qualification & 0x100) != 0 ?
7678                PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
7679
7680         vcpu->arch.exit_qualification = exit_qualification;
7681         return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
7682 }
7683
7684 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
7685 {
7686         gpa_t gpa;
7687
7688         /*
7689          * A nested guest cannot optimize MMIO vmexits, because we have an
7690          * nGPA here instead of the required GPA.
7691          */
7692         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
7693         if (!is_guest_mode(vcpu) &&
7694             !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
7695                 trace_kvm_fast_mmio(gpa);
7696                 /*
7697                  * Doing kvm_skip_emulated_instruction() depends on undefined
7698                  * behavior: Intel's manual doesn't mandate
7699                  * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
7700                  * occurs and while on real hardware it was observed to be set,
7701                  * other hypervisors (namely Hyper-V) don't set it, we end up
7702                  * advancing IP with some random value. Disable fast mmio when
7703                  * running nested and keep it for real hardware in hope that
7704                  * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
7705                  */
7706                 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
7707                         return kvm_skip_emulated_instruction(vcpu);
7708                 else
7709                         return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
7710                                                                 EMULATE_DONE;
7711         }
7712
7713         return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
7714 }
7715
7716 static int handle_nmi_window(struct kvm_vcpu *vcpu)
7717 {
7718         WARN_ON_ONCE(!enable_vnmi);
7719         vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7720                         CPU_BASED_VIRTUAL_NMI_PENDING);
7721         ++vcpu->stat.nmi_window_exits;
7722         kvm_make_request(KVM_REQ_EVENT, vcpu);
7723
7724         return 1;
7725 }
7726
7727 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
7728 {
7729         struct vcpu_vmx *vmx = to_vmx(vcpu);
7730         enum emulation_result err = EMULATE_DONE;
7731         int ret = 1;
7732         u32 cpu_exec_ctrl;
7733         bool intr_window_requested;
7734         unsigned count = 130;
7735
7736         /*
7737          * We should never reach the point where we are emulating L2
7738          * due to invalid guest state as that means we incorrectly
7739          * allowed a nested VMEntry with an invalid vmcs12.
7740          */
7741         WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
7742
7743         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
7744         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
7745
7746         while (vmx->emulation_required && count-- != 0) {
7747                 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
7748                         return handle_interrupt_window(&vmx->vcpu);
7749
7750                 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
7751                         return 1;
7752
7753                 err = kvm_emulate_instruction(vcpu, 0);
7754
7755                 if (err == EMULATE_USER_EXIT) {
7756                         ++vcpu->stat.mmio_exits;
7757                         ret = 0;
7758                         goto out;
7759                 }
7760
7761                 if (err != EMULATE_DONE)
7762                         goto emulation_error;
7763
7764                 if (vmx->emulation_required && !vmx->rmode.vm86_active &&
7765                     vcpu->arch.exception.pending)
7766                         goto emulation_error;
7767
7768                 if (vcpu->arch.halt_request) {
7769                         vcpu->arch.halt_request = 0;
7770                         ret = kvm_vcpu_halt(vcpu);
7771                         goto out;
7772                 }
7773
7774                 if (signal_pending(current))
7775                         goto out;
7776                 if (need_resched())
7777                         schedule();
7778         }
7779
7780 out:
7781         return ret;
7782
7783 emulation_error:
7784         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7785         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7786         vcpu->run->internal.ndata = 0;
7787         return 0;
7788 }
7789
7790 static void grow_ple_window(struct kvm_vcpu *vcpu)
7791 {
7792         struct vcpu_vmx *vmx = to_vmx(vcpu);
7793         int old = vmx->ple_window;
7794
7795         vmx->ple_window = __grow_ple_window(old, ple_window,
7796                                             ple_window_grow,
7797                                             ple_window_max);
7798
7799         if (vmx->ple_window != old)
7800                 vmx->ple_window_dirty = true;
7801
7802         trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
7803 }
7804
7805 static void shrink_ple_window(struct kvm_vcpu *vcpu)
7806 {
7807         struct vcpu_vmx *vmx = to_vmx(vcpu);
7808         int old = vmx->ple_window;
7809
7810         vmx->ple_window = __shrink_ple_window(old, ple_window,
7811                                               ple_window_shrink,
7812                                               ple_window);
7813
7814         if (vmx->ple_window != old)
7815                 vmx->ple_window_dirty = true;
7816
7817         trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
7818 }
7819
7820 /*
7821  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
7822  */
7823 static void wakeup_handler(void)
7824 {
7825         struct kvm_vcpu *vcpu;
7826         int cpu = smp_processor_id();
7827
7828         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7829         list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
7830                         blocked_vcpu_list) {
7831                 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
7832
7833                 if (pi_test_on(pi_desc) == 1)
7834                         kvm_vcpu_kick(vcpu);
7835         }
7836         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7837 }
7838
7839 static void vmx_enable_tdp(void)
7840 {
7841         kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
7842                 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
7843                 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
7844                 0ull, VMX_EPT_EXECUTABLE_MASK,
7845                 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
7846                 VMX_EPT_RWX_MASK, 0ull);
7847
7848         ept_set_mmio_spte_mask();
7849         kvm_enable_tdp();
7850 }
7851
7852 static __init int hardware_setup(void)
7853 {
7854         unsigned long host_bndcfgs;
7855         int r = -ENOMEM, i;
7856
7857         rdmsrl_safe(MSR_EFER, &host_efer);
7858
7859         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
7860                 kvm_define_shared_msr(i, vmx_msr_index[i]);
7861
7862         for (i = 0; i < VMX_BITMAP_NR; i++) {
7863                 vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
7864                 if (!vmx_bitmap[i])
7865                         goto out;
7866         }
7867
7868         memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
7869         memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
7870
7871         if (setup_vmcs_config(&vmcs_config) < 0) {
7872                 r = -EIO;
7873                 goto out;
7874         }
7875
7876         if (boot_cpu_has(X86_FEATURE_NX))
7877                 kvm_enable_efer_bits(EFER_NX);
7878
7879         if (boot_cpu_has(X86_FEATURE_MPX)) {
7880                 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
7881                 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
7882         }
7883
7884         if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
7885                 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
7886                 enable_vpid = 0;
7887
7888         if (!cpu_has_vmx_ept() ||
7889             !cpu_has_vmx_ept_4levels() ||
7890             !cpu_has_vmx_ept_mt_wb() ||
7891             !cpu_has_vmx_invept_global())
7892                 enable_ept = 0;
7893
7894         if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
7895                 enable_ept_ad_bits = 0;
7896
7897         if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
7898                 enable_unrestricted_guest = 0;
7899
7900         if (!cpu_has_vmx_flexpriority())
7901                 flexpriority_enabled = 0;
7902
7903         if (!cpu_has_virtual_nmis())
7904                 enable_vnmi = 0;
7905
7906         /*
7907          * set_apic_access_page_addr() is used to reload apic access
7908          * page upon invalidation.  No need to do anything if not
7909          * using the APIC_ACCESS_ADDR VMCS field.
7910          */
7911         if (!flexpriority_enabled)
7912                 kvm_x86_ops->set_apic_access_page_addr = NULL;
7913
7914         if (!cpu_has_vmx_tpr_shadow())
7915                 kvm_x86_ops->update_cr8_intercept = NULL;
7916
7917         if (enable_ept && !cpu_has_vmx_ept_2m_page())
7918                 kvm_disable_largepages();
7919
7920 #if IS_ENABLED(CONFIG_HYPERV)
7921         if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
7922             && enable_ept)
7923                 kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
7924 #endif
7925
7926         if (!cpu_has_vmx_ple()) {
7927                 ple_gap = 0;
7928                 ple_window = 0;
7929                 ple_window_grow = 0;
7930                 ple_window_max = 0;
7931                 ple_window_shrink = 0;
7932         }
7933
7934         if (!cpu_has_vmx_apicv()) {
7935                 enable_apicv = 0;
7936                 kvm_x86_ops->sync_pir_to_irr = NULL;
7937         }
7938
7939         if (cpu_has_vmx_tsc_scaling()) {
7940                 kvm_has_tsc_control = true;
7941                 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
7942                 kvm_tsc_scaling_ratio_frac_bits = 48;
7943         }
7944
7945         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
7946
7947         if (enable_ept)
7948                 vmx_enable_tdp();
7949         else
7950                 kvm_disable_tdp();
7951
7952         if (!nested) {
7953                 kvm_x86_ops->get_nested_state = NULL;
7954                 kvm_x86_ops->set_nested_state = NULL;
7955         }
7956
7957         /*
7958          * Only enable PML when hardware supports PML feature, and both EPT
7959          * and EPT A/D bit features are enabled -- PML depends on them to work.
7960          */
7961         if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
7962                 enable_pml = 0;
7963
7964         if (!enable_pml) {
7965                 kvm_x86_ops->slot_enable_log_dirty = NULL;
7966                 kvm_x86_ops->slot_disable_log_dirty = NULL;
7967                 kvm_x86_ops->flush_log_dirty = NULL;
7968                 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
7969         }
7970
7971         if (!cpu_has_vmx_preemption_timer())
7972                 kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
7973
7974         if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
7975                 u64 vmx_msr;
7976
7977                 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
7978                 cpu_preemption_timer_multi =
7979                          vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
7980         } else {
7981                 kvm_x86_ops->set_hv_timer = NULL;
7982                 kvm_x86_ops->cancel_hv_timer = NULL;
7983         }
7984
7985         if (!cpu_has_vmx_shadow_vmcs())
7986                 enable_shadow_vmcs = 0;
7987         if (enable_shadow_vmcs)
7988                 init_vmcs_shadow_fields();
7989
7990         kvm_set_posted_intr_wakeup_handler(wakeup_handler);
7991         nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv);
7992
7993         kvm_mce_cap_supported |= MCG_LMCE_P;
7994
7995         return alloc_kvm_area();
7996
7997 out:
7998         for (i = 0; i < VMX_BITMAP_NR; i++)
7999                 free_page((unsigned long)vmx_bitmap[i]);
8000
8001     return r;
8002 }
8003
8004 static __exit void hardware_unsetup(void)
8005 {
8006         int i;
8007
8008         for (i = 0; i < VMX_BITMAP_NR; i++)
8009                 free_page((unsigned long)vmx_bitmap[i]);
8010
8011         free_kvm_area();
8012 }
8013
8014 /*
8015  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
8016  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
8017  */
8018 static int handle_pause(struct kvm_vcpu *vcpu)
8019 {
8020         if (!kvm_pause_in_guest(vcpu->kvm))
8021                 grow_ple_window(vcpu);
8022
8023         /*
8024          * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
8025          * VM-execution control is ignored if CPL > 0. OTOH, KVM
8026          * never set PAUSE_EXITING and just set PLE if supported,
8027          * so the vcpu must be CPL=0 if it gets a PAUSE exit.
8028          */
8029         kvm_vcpu_on_spin(vcpu, true);
8030         return kvm_skip_emulated_instruction(vcpu);
8031 }
8032
8033 static int handle_nop(struct kvm_vcpu *vcpu)
8034 {
8035         return kvm_skip_emulated_instruction(vcpu);
8036 }
8037
8038 static int handle_mwait(struct kvm_vcpu *vcpu)
8039 {
8040         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
8041         return handle_nop(vcpu);
8042 }
8043
8044 static int handle_invalid_op(struct kvm_vcpu *vcpu)
8045 {
8046         kvm_queue_exception(vcpu, UD_VECTOR);
8047         return 1;
8048 }
8049
8050 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
8051 {
8052         return 1;
8053 }
8054
8055 static int handle_monitor(struct kvm_vcpu *vcpu)
8056 {
8057         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
8058         return handle_nop(vcpu);
8059 }
8060
8061 /*
8062  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
8063  * set the success or error code of an emulated VMX instruction (as specified
8064  * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
8065  * instruction.
8066  */
8067 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
8068 {
8069         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
8070                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
8071                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
8072         return kvm_skip_emulated_instruction(vcpu);
8073 }
8074
8075 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
8076 {
8077         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
8078                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
8079                             X86_EFLAGS_SF | X86_EFLAGS_OF))
8080                         | X86_EFLAGS_CF);
8081         return kvm_skip_emulated_instruction(vcpu);
8082 }
8083
8084 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
8085                                 u32 vm_instruction_error)
8086 {
8087         /*
8088          * failValid writes the error number to the current VMCS, which
8089          * can't be done if there isn't a current VMCS.
8090          */
8091         if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
8092                 return nested_vmx_failInvalid(vcpu);
8093
8094         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
8095                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
8096                             X86_EFLAGS_SF | X86_EFLAGS_OF))
8097                         | X86_EFLAGS_ZF);
8098         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
8099         /*
8100          * We don't need to force a shadow sync because
8101          * VM_INSTRUCTION_ERROR is not shadowed
8102          */
8103         return kvm_skip_emulated_instruction(vcpu);
8104 }
8105
8106 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
8107 {
8108         /* TODO: not to reset guest simply here. */
8109         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8110         pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
8111 }
8112
8113 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
8114 {
8115         struct vcpu_vmx *vmx =
8116                 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
8117
8118         vmx->nested.preemption_timer_expired = true;
8119         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
8120         kvm_vcpu_kick(&vmx->vcpu);
8121
8122         return HRTIMER_NORESTART;
8123 }
8124
8125 /*
8126  * Decode the memory-address operand of a vmx instruction, as recorded on an
8127  * exit caused by such an instruction (run by a guest hypervisor).
8128  * On success, returns 0. When the operand is invalid, returns 1 and throws
8129  * #UD or #GP.
8130  */
8131 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
8132                                  unsigned long exit_qualification,
8133                                  u32 vmx_instruction_info, bool wr, gva_t *ret)
8134 {
8135         gva_t off;
8136         bool exn;
8137         struct kvm_segment s;
8138
8139         /*
8140          * According to Vol. 3B, "Information for VM Exits Due to Instruction
8141          * Execution", on an exit, vmx_instruction_info holds most of the
8142          * addressing components of the operand. Only the displacement part
8143          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
8144          * For how an actual address is calculated from all these components,
8145          * refer to Vol. 1, "Operand Addressing".
8146          */
8147         int  scaling = vmx_instruction_info & 3;
8148         int  addr_size = (vmx_instruction_info >> 7) & 7;
8149         bool is_reg = vmx_instruction_info & (1u << 10);
8150         int  seg_reg = (vmx_instruction_info >> 15) & 7;
8151         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
8152         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
8153         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
8154         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
8155
8156         if (is_reg) {
8157                 kvm_queue_exception(vcpu, UD_VECTOR);
8158                 return 1;
8159         }
8160
8161         /* Addr = segment_base + offset */
8162         /* offset = base + [index * scale] + displacement */
8163         off = exit_qualification; /* holds the displacement */
8164         if (base_is_valid)
8165                 off += kvm_register_read(vcpu, base_reg);
8166         if (index_is_valid)
8167                 off += kvm_register_read(vcpu, index_reg)<<scaling;
8168         vmx_get_segment(vcpu, &s, seg_reg);
8169         *ret = s.base + off;
8170
8171         if (addr_size == 1) /* 32 bit */
8172                 *ret &= 0xffffffff;
8173
8174         /* Checks for #GP/#SS exceptions. */
8175         exn = false;
8176         if (is_long_mode(vcpu)) {
8177                 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
8178                  * non-canonical form. This is the only check on the memory
8179                  * destination for long mode!
8180                  */
8181                 exn = is_noncanonical_address(*ret, vcpu);
8182         } else if (is_protmode(vcpu)) {
8183                 /* Protected mode: apply checks for segment validity in the
8184                  * following order:
8185                  * - segment type check (#GP(0) may be thrown)
8186                  * - usability check (#GP(0)/#SS(0))
8187                  * - limit check (#GP(0)/#SS(0))
8188                  */
8189                 if (wr)
8190                         /* #GP(0) if the destination operand is located in a
8191                          * read-only data segment or any code segment.
8192                          */
8193                         exn = ((s.type & 0xa) == 0 || (s.type & 8));
8194                 else
8195                         /* #GP(0) if the source operand is located in an
8196                          * execute-only code segment
8197                          */
8198                         exn = ((s.type & 0xa) == 8);
8199                 if (exn) {
8200                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8201                         return 1;
8202                 }
8203                 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
8204                  */
8205                 exn = (s.unusable != 0);
8206                 /* Protected mode: #GP(0)/#SS(0) if the memory
8207                  * operand is outside the segment limit.
8208                  */
8209                 exn = exn || (off + sizeof(u64) > s.limit);
8210         }
8211         if (exn) {
8212                 kvm_queue_exception_e(vcpu,
8213                                       seg_reg == VCPU_SREG_SS ?
8214                                                 SS_VECTOR : GP_VECTOR,
8215                                       0);
8216                 return 1;
8217         }
8218
8219         return 0;
8220 }
8221
8222 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
8223 {
8224         gva_t gva;
8225         struct x86_exception e;
8226
8227         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
8228                         vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
8229                 return 1;
8230
8231         if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
8232                 kvm_inject_page_fault(vcpu, &e);
8233                 return 1;
8234         }
8235
8236         return 0;
8237 }
8238
8239 /*
8240  * Allocate a shadow VMCS and associate it with the currently loaded
8241  * VMCS, unless such a shadow VMCS already exists. The newly allocated
8242  * VMCS is also VMCLEARed, so that it is ready for use.
8243  */
8244 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
8245 {
8246         struct vcpu_vmx *vmx = to_vmx(vcpu);
8247         struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
8248
8249         /*
8250          * We should allocate a shadow vmcs for vmcs01 only when L1
8251          * executes VMXON and free it when L1 executes VMXOFF.
8252          * As it is invalid to execute VMXON twice, we shouldn't reach
8253          * here when vmcs01 already have an allocated shadow vmcs.
8254          */
8255         WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
8256
8257         if (!loaded_vmcs->shadow_vmcs) {
8258                 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
8259                 if (loaded_vmcs->shadow_vmcs)
8260                         vmcs_clear(loaded_vmcs->shadow_vmcs);
8261         }
8262         return loaded_vmcs->shadow_vmcs;
8263 }
8264
8265 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
8266 {
8267         struct vcpu_vmx *vmx = to_vmx(vcpu);
8268         int r;
8269
8270         r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
8271         if (r < 0)
8272                 goto out_vmcs02;
8273
8274         vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
8275         if (!vmx->nested.cached_vmcs12)
8276                 goto out_cached_vmcs12;
8277
8278         vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
8279         if (!vmx->nested.cached_shadow_vmcs12)
8280                 goto out_cached_shadow_vmcs12;
8281
8282         if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
8283                 goto out_shadow_vmcs;
8284
8285         hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
8286                      HRTIMER_MODE_REL_PINNED);
8287         vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
8288
8289         vmx->nested.vpid02 = allocate_vpid();
8290
8291         vmx->nested.vmcs02_initialized = false;
8292         vmx->nested.vmxon = true;
8293         return 0;
8294
8295 out_shadow_vmcs:
8296         kfree(vmx->nested.cached_shadow_vmcs12);
8297
8298 out_cached_shadow_vmcs12:
8299         kfree(vmx->nested.cached_vmcs12);
8300
8301 out_cached_vmcs12:
8302         free_loaded_vmcs(&vmx->nested.vmcs02);
8303
8304 out_vmcs02:
8305         return -ENOMEM;
8306 }
8307
8308 /*
8309  * Emulate the VMXON instruction.
8310  * Currently, we just remember that VMX is active, and do not save or even
8311  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
8312  * do not currently need to store anything in that guest-allocated memory
8313  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
8314  * argument is different from the VMXON pointer (which the spec says they do).
8315  */
8316 static int handle_vmon(struct kvm_vcpu *vcpu)
8317 {
8318         int ret;
8319         gpa_t vmptr;
8320         struct page *page;
8321         struct vcpu_vmx *vmx = to_vmx(vcpu);
8322         const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
8323                 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
8324
8325         /*
8326          * The Intel VMX Instruction Reference lists a bunch of bits that are
8327          * prerequisite to running VMXON, most notably cr4.VMXE must be set to
8328          * 1 (see vmx_set_cr4() for when we allow the guest to set this).
8329          * Otherwise, we should fail with #UD.  But most faulting conditions
8330          * have already been checked by hardware, prior to the VM-exit for
8331          * VMXON.  We do test guest cr4.VMXE because processor CR4 always has
8332          * that bit set to 1 in non-root mode.
8333          */
8334         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
8335                 kvm_queue_exception(vcpu, UD_VECTOR);
8336                 return 1;
8337         }
8338
8339         /* CPL=0 must be checked manually. */
8340         if (vmx_get_cpl(vcpu)) {
8341                 kvm_inject_gp(vcpu, 0);
8342                 return 1;
8343         }
8344
8345         if (vmx->nested.vmxon)
8346                 return nested_vmx_failValid(vcpu,
8347                         VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
8348
8349         if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
8350                         != VMXON_NEEDED_FEATURES) {
8351                 kvm_inject_gp(vcpu, 0);
8352                 return 1;
8353         }
8354
8355         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8356                 return 1;
8357
8358         /*
8359          * SDM 3: 24.11.5
8360          * The first 4 bytes of VMXON region contain the supported
8361          * VMCS revision identifier
8362          *
8363          * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
8364          * which replaces physical address width with 32
8365          */
8366         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8367                 return nested_vmx_failInvalid(vcpu);
8368
8369         page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8370         if (is_error_page(page))
8371                 return nested_vmx_failInvalid(vcpu);
8372
8373         if (*(u32 *)kmap(page) != VMCS12_REVISION) {
8374                 kunmap(page);
8375                 kvm_release_page_clean(page);
8376                 return nested_vmx_failInvalid(vcpu);
8377         }
8378         kunmap(page);
8379         kvm_release_page_clean(page);
8380
8381         vmx->nested.vmxon_ptr = vmptr;
8382         ret = enter_vmx_operation(vcpu);
8383         if (ret)
8384                 return ret;
8385
8386         return nested_vmx_succeed(vcpu);
8387 }
8388
8389 /*
8390  * Intel's VMX Instruction Reference specifies a common set of prerequisites
8391  * for running VMX instructions (except VMXON, whose prerequisites are
8392  * slightly different). It also specifies what exception to inject otherwise.
8393  * Note that many of these exceptions have priority over VM exits, so they
8394  * don't have to be checked again here.
8395  */
8396 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
8397 {
8398         if (!to_vmx(vcpu)->nested.vmxon) {
8399                 kvm_queue_exception(vcpu, UD_VECTOR);
8400                 return 0;
8401         }
8402
8403         if (vmx_get_cpl(vcpu)) {
8404                 kvm_inject_gp(vcpu, 0);
8405                 return 0;
8406         }
8407
8408         return 1;
8409 }
8410
8411 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
8412 {
8413         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
8414         vmcs_write64(VMCS_LINK_POINTER, -1ull);
8415 }
8416
8417 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
8418 {
8419         if (vmx->nested.current_vmptr == -1ull)
8420                 return;
8421
8422         if (enable_shadow_vmcs) {
8423                 /* copy to memory all shadowed fields in case
8424                    they were modified */
8425                 copy_shadow_to_vmcs12(vmx);
8426                 vmx->nested.sync_shadow_vmcs = false;
8427                 vmx_disable_shadow_vmcs(vmx);
8428         }
8429         vmx->nested.posted_intr_nv = -1;
8430
8431         /* Flush VMCS12 to guest memory */
8432         kvm_vcpu_write_guest_page(&vmx->vcpu,
8433                                   vmx->nested.current_vmptr >> PAGE_SHIFT,
8434                                   vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
8435
8436         vmx->nested.current_vmptr = -1ull;
8437 }
8438
8439 /*
8440  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
8441  * just stops using VMX.
8442  */
8443 static void free_nested(struct vcpu_vmx *vmx)
8444 {
8445         if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
8446                 return;
8447
8448         vmx->nested.vmxon = false;
8449         vmx->nested.smm.vmxon = false;
8450         free_vpid(vmx->nested.vpid02);
8451         vmx->nested.posted_intr_nv = -1;
8452         vmx->nested.current_vmptr = -1ull;
8453         if (enable_shadow_vmcs) {
8454                 vmx_disable_shadow_vmcs(vmx);
8455                 vmcs_clear(vmx->vmcs01.shadow_vmcs);
8456                 free_vmcs(vmx->vmcs01.shadow_vmcs);
8457                 vmx->vmcs01.shadow_vmcs = NULL;
8458         }
8459         kfree(vmx->nested.cached_vmcs12);
8460         kfree(vmx->nested.cached_shadow_vmcs12);
8461         /* Unpin physical memory we referred to in the vmcs02 */
8462         if (vmx->nested.apic_access_page) {
8463                 kvm_release_page_dirty(vmx->nested.apic_access_page);
8464                 vmx->nested.apic_access_page = NULL;
8465         }
8466         if (vmx->nested.virtual_apic_page) {
8467                 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
8468                 vmx->nested.virtual_apic_page = NULL;
8469         }
8470         if (vmx->nested.pi_desc_page) {
8471                 kunmap(vmx->nested.pi_desc_page);
8472                 kvm_release_page_dirty(vmx->nested.pi_desc_page);
8473                 vmx->nested.pi_desc_page = NULL;
8474                 vmx->nested.pi_desc = NULL;
8475         }
8476
8477         free_loaded_vmcs(&vmx->nested.vmcs02);
8478 }
8479
8480 /* Emulate the VMXOFF instruction */
8481 static int handle_vmoff(struct kvm_vcpu *vcpu)
8482 {
8483         if (!nested_vmx_check_permission(vcpu))
8484                 return 1;
8485         free_nested(to_vmx(vcpu));
8486         return nested_vmx_succeed(vcpu);
8487 }
8488
8489 /* Emulate the VMCLEAR instruction */
8490 static int handle_vmclear(struct kvm_vcpu *vcpu)
8491 {
8492         struct vcpu_vmx *vmx = to_vmx(vcpu);
8493         u32 zero = 0;
8494         gpa_t vmptr;
8495
8496         if (!nested_vmx_check_permission(vcpu))
8497                 return 1;
8498
8499         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8500                 return 1;
8501
8502         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8503                 return nested_vmx_failValid(vcpu,
8504                         VMXERR_VMCLEAR_INVALID_ADDRESS);
8505
8506         if (vmptr == vmx->nested.vmxon_ptr)
8507                 return nested_vmx_failValid(vcpu,
8508                         VMXERR_VMCLEAR_VMXON_POINTER);
8509
8510         if (vmptr == vmx->nested.current_vmptr)
8511                 nested_release_vmcs12(vmx);
8512
8513         kvm_vcpu_write_guest(vcpu,
8514                         vmptr + offsetof(struct vmcs12, launch_state),
8515                         &zero, sizeof(zero));
8516
8517         return nested_vmx_succeed(vcpu);
8518 }
8519
8520 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
8521
8522 /* Emulate the VMLAUNCH instruction */
8523 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
8524 {
8525         return nested_vmx_run(vcpu, true);
8526 }
8527
8528 /* Emulate the VMRESUME instruction */
8529 static int handle_vmresume(struct kvm_vcpu *vcpu)
8530 {
8531
8532         return nested_vmx_run(vcpu, false);
8533 }
8534
8535 /*
8536  * Read a vmcs12 field. Since these can have varying lengths and we return
8537  * one type, we chose the biggest type (u64) and zero-extend the return value
8538  * to that size. Note that the caller, handle_vmread, might need to use only
8539  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
8540  * 64-bit fields are to be returned).
8541  */
8542 static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
8543                                   unsigned long field, u64 *ret)
8544 {
8545         short offset = vmcs_field_to_offset(field);
8546         char *p;
8547
8548         if (offset < 0)
8549                 return offset;
8550
8551         p = (char *)vmcs12 + offset;
8552
8553         switch (vmcs_field_width(field)) {
8554         case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
8555                 *ret = *((natural_width *)p);
8556                 return 0;
8557         case VMCS_FIELD_WIDTH_U16:
8558                 *ret = *((u16 *)p);
8559                 return 0;
8560         case VMCS_FIELD_WIDTH_U32:
8561                 *ret = *((u32 *)p);
8562                 return 0;
8563         case VMCS_FIELD_WIDTH_U64:
8564                 *ret = *((u64 *)p);
8565                 return 0;
8566         default:
8567                 WARN_ON(1);
8568                 return -ENOENT;
8569         }
8570 }
8571
8572
8573 static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
8574                                    unsigned long field, u64 field_value){
8575         short offset = vmcs_field_to_offset(field);
8576         char *p = (char *)vmcs12 + offset;
8577         if (offset < 0)
8578                 return offset;
8579
8580         switch (vmcs_field_width(field)) {
8581         case VMCS_FIELD_WIDTH_U16:
8582                 *(u16 *)p = field_value;
8583                 return 0;
8584         case VMCS_FIELD_WIDTH_U32:
8585                 *(u32 *)p = field_value;
8586                 return 0;
8587         case VMCS_FIELD_WIDTH_U64:
8588                 *(u64 *)p = field_value;
8589                 return 0;
8590         case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
8591                 *(natural_width *)p = field_value;
8592                 return 0;
8593         default:
8594                 WARN_ON(1);
8595                 return -ENOENT;
8596         }
8597
8598 }
8599
8600 /*
8601  * Copy the writable VMCS shadow fields back to the VMCS12, in case
8602  * they have been modified by the L1 guest. Note that the "read-only"
8603  * VM-exit information fields are actually writable if the vCPU is
8604  * configured to support "VMWRITE to any supported field in the VMCS."
8605  */
8606 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
8607 {
8608         const u16 *fields[] = {
8609                 shadow_read_write_fields,
8610                 shadow_read_only_fields
8611         };
8612         const int max_fields[] = {
8613                 max_shadow_read_write_fields,
8614                 max_shadow_read_only_fields
8615         };
8616         int i, q;
8617         unsigned long field;
8618         u64 field_value;
8619         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
8620
8621         preempt_disable();
8622
8623         vmcs_load(shadow_vmcs);
8624
8625         for (q = 0; q < ARRAY_SIZE(fields); q++) {
8626                 for (i = 0; i < max_fields[q]; i++) {
8627                         field = fields[q][i];
8628                         field_value = __vmcs_readl(field);
8629                         vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
8630                 }
8631                 /*
8632                  * Skip the VM-exit information fields if they are read-only.
8633                  */
8634                 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
8635                         break;
8636         }
8637
8638         vmcs_clear(shadow_vmcs);
8639         vmcs_load(vmx->loaded_vmcs->vmcs);
8640
8641         preempt_enable();
8642 }
8643
8644 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
8645 {
8646         const u16 *fields[] = {
8647                 shadow_read_write_fields,
8648                 shadow_read_only_fields
8649         };
8650         const int max_fields[] = {
8651                 max_shadow_read_write_fields,
8652                 max_shadow_read_only_fields
8653         };
8654         int i, q;
8655         unsigned long field;
8656         u64 field_value = 0;
8657         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
8658
8659         vmcs_load(shadow_vmcs);
8660
8661         for (q = 0; q < ARRAY_SIZE(fields); q++) {
8662                 for (i = 0; i < max_fields[q]; i++) {
8663                         field = fields[q][i];
8664                         vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
8665                         __vmcs_writel(field, field_value);
8666                 }
8667         }
8668
8669         vmcs_clear(shadow_vmcs);
8670         vmcs_load(vmx->loaded_vmcs->vmcs);
8671 }
8672
8673 static int handle_vmread(struct kvm_vcpu *vcpu)
8674 {
8675         unsigned long field;
8676         u64 field_value;
8677         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8678         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8679         gva_t gva = 0;
8680         struct vmcs12 *vmcs12;
8681
8682         if (!nested_vmx_check_permission(vcpu))
8683                 return 1;
8684
8685         if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
8686                 return nested_vmx_failInvalid(vcpu);
8687
8688         if (!is_guest_mode(vcpu))
8689                 vmcs12 = get_vmcs12(vcpu);
8690         else {
8691                 /*
8692                  * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
8693                  * to shadowed-field sets the ALU flags for VMfailInvalid.
8694                  */
8695                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
8696                         return nested_vmx_failInvalid(vcpu);
8697                 vmcs12 = get_shadow_vmcs12(vcpu);
8698         }
8699
8700         /* Decode instruction info and find the field to read */
8701         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
8702         /* Read the field, zero-extended to a u64 field_value */
8703         if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
8704                 return nested_vmx_failValid(vcpu,
8705                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8706
8707         /*
8708          * Now copy part of this value to register or memory, as requested.
8709          * Note that the number of bits actually copied is 32 or 64 depending
8710          * on the guest's mode (32 or 64 bit), not on the given field's length.
8711          */
8712         if (vmx_instruction_info & (1u << 10)) {
8713                 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
8714                         field_value);
8715         } else {
8716                 if (get_vmx_mem_address(vcpu, exit_qualification,
8717                                 vmx_instruction_info, true, &gva))
8718                         return 1;
8719                 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
8720                 kvm_write_guest_virt_system(vcpu, gva, &field_value,
8721                                             (is_long_mode(vcpu) ? 8 : 4), NULL);
8722         }
8723
8724         return nested_vmx_succeed(vcpu);
8725 }
8726
8727
8728 static int handle_vmwrite(struct kvm_vcpu *vcpu)
8729 {
8730         unsigned long field;
8731         gva_t gva;
8732         struct vcpu_vmx *vmx = to_vmx(vcpu);
8733         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8734         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8735
8736         /* The value to write might be 32 or 64 bits, depending on L1's long
8737          * mode, and eventually we need to write that into a field of several
8738          * possible lengths. The code below first zero-extends the value to 64
8739          * bit (field_value), and then copies only the appropriate number of
8740          * bits into the vmcs12 field.
8741          */
8742         u64 field_value = 0;
8743         struct x86_exception e;
8744         struct vmcs12 *vmcs12;
8745
8746         if (!nested_vmx_check_permission(vcpu))
8747                 return 1;
8748
8749         if (vmx->nested.current_vmptr == -1ull)
8750                 return nested_vmx_failInvalid(vcpu);
8751
8752         if (vmx_instruction_info & (1u << 10))
8753                 field_value = kvm_register_readl(vcpu,
8754                         (((vmx_instruction_info) >> 3) & 0xf));
8755         else {
8756                 if (get_vmx_mem_address(vcpu, exit_qualification,
8757                                 vmx_instruction_info, false, &gva))
8758                         return 1;
8759                 if (kvm_read_guest_virt(vcpu, gva, &field_value,
8760                                         (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
8761                         kvm_inject_page_fault(vcpu, &e);
8762                         return 1;
8763                 }
8764         }
8765
8766
8767         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
8768         /*
8769          * If the vCPU supports "VMWRITE to any supported field in the
8770          * VMCS," then the "read-only" fields are actually read/write.
8771          */
8772         if (vmcs_field_readonly(field) &&
8773             !nested_cpu_has_vmwrite_any_field(vcpu))
8774                 return nested_vmx_failValid(vcpu,
8775                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
8776
8777         if (!is_guest_mode(vcpu))
8778                 vmcs12 = get_vmcs12(vcpu);
8779         else {
8780                 /*
8781                  * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
8782                  * to shadowed-field sets the ALU flags for VMfailInvalid.
8783                  */
8784                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
8785                         return nested_vmx_failInvalid(vcpu);
8786                 vmcs12 = get_shadow_vmcs12(vcpu);
8787         }
8788
8789         if (vmcs12_write_any(vmcs12, field, field_value) < 0)
8790                 return nested_vmx_failValid(vcpu,
8791                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8792
8793         /*
8794          * Do not track vmcs12 dirty-state if in guest-mode
8795          * as we actually dirty shadow vmcs12 instead of vmcs12.
8796          */
8797         if (!is_guest_mode(vcpu)) {
8798                 switch (field) {
8799 #define SHADOW_FIELD_RW(x) case x:
8800 #include "vmx_shadow_fields.h"
8801                         /*
8802                          * The fields that can be updated by L1 without a vmexit are
8803                          * always updated in the vmcs02, the others go down the slow
8804                          * path of prepare_vmcs02.
8805                          */
8806                         break;
8807                 default:
8808                         vmx->nested.dirty_vmcs12 = true;
8809                         break;
8810                 }
8811         }
8812
8813         return nested_vmx_succeed(vcpu);
8814 }
8815
8816 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
8817 {
8818         vmx->nested.current_vmptr = vmptr;
8819         if (enable_shadow_vmcs) {
8820                 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
8821                               SECONDARY_EXEC_SHADOW_VMCS);
8822                 vmcs_write64(VMCS_LINK_POINTER,
8823                              __pa(vmx->vmcs01.shadow_vmcs));
8824                 vmx->nested.sync_shadow_vmcs = true;
8825         }
8826         vmx->nested.dirty_vmcs12 = true;
8827 }
8828
8829 /* Emulate the VMPTRLD instruction */
8830 static int handle_vmptrld(struct kvm_vcpu *vcpu)
8831 {
8832         struct vcpu_vmx *vmx = to_vmx(vcpu);
8833         gpa_t vmptr;
8834
8835         if (!nested_vmx_check_permission(vcpu))
8836                 return 1;
8837
8838         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8839                 return 1;
8840
8841         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8842                 return nested_vmx_failValid(vcpu,
8843                         VMXERR_VMPTRLD_INVALID_ADDRESS);
8844
8845         if (vmptr == vmx->nested.vmxon_ptr)
8846                 return nested_vmx_failValid(vcpu,
8847                         VMXERR_VMPTRLD_VMXON_POINTER);
8848
8849         if (vmx->nested.current_vmptr != vmptr) {
8850                 struct vmcs12 *new_vmcs12;
8851                 struct page *page;
8852                 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8853                 if (is_error_page(page))
8854                         return nested_vmx_failInvalid(vcpu);
8855
8856                 new_vmcs12 = kmap(page);
8857                 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
8858                     (new_vmcs12->hdr.shadow_vmcs &&
8859                      !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
8860                         kunmap(page);
8861                         kvm_release_page_clean(page);
8862                         return nested_vmx_failValid(vcpu,
8863                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
8864                 }
8865
8866                 nested_release_vmcs12(vmx);
8867                 /*
8868                  * Load VMCS12 from guest memory since it is not already
8869                  * cached.
8870                  */
8871                 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
8872                 kunmap(page);
8873                 kvm_release_page_clean(page);
8874
8875                 set_current_vmptr(vmx, vmptr);
8876         }
8877
8878         return nested_vmx_succeed(vcpu);
8879 }
8880
8881 /* Emulate the VMPTRST instruction */
8882 static int handle_vmptrst(struct kvm_vcpu *vcpu)
8883 {
8884         unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
8885         u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8886         gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
8887         struct x86_exception e;
8888         gva_t gva;
8889
8890         if (!nested_vmx_check_permission(vcpu))
8891                 return 1;
8892
8893         if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
8894                 return 1;
8895         /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
8896         if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
8897                                         sizeof(gpa_t), &e)) {
8898                 kvm_inject_page_fault(vcpu, &e);
8899                 return 1;
8900         }
8901         return nested_vmx_succeed(vcpu);
8902 }
8903
8904 /* Emulate the INVEPT instruction */
8905 static int handle_invept(struct kvm_vcpu *vcpu)
8906 {
8907         struct vcpu_vmx *vmx = to_vmx(vcpu);
8908         u32 vmx_instruction_info, types;
8909         unsigned long type;
8910         gva_t gva;
8911         struct x86_exception e;
8912         struct {
8913                 u64 eptp, gpa;
8914         } operand;
8915
8916         if (!(vmx->nested.msrs.secondary_ctls_high &
8917               SECONDARY_EXEC_ENABLE_EPT) ||
8918             !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
8919                 kvm_queue_exception(vcpu, UD_VECTOR);
8920                 return 1;
8921         }
8922
8923         if (!nested_vmx_check_permission(vcpu))
8924                 return 1;
8925
8926         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8927         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
8928
8929         types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
8930
8931         if (type >= 32 || !(types & (1 << type)))
8932                 return nested_vmx_failValid(vcpu,
8933                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
8934
8935         /* According to the Intel VMX instruction reference, the memory
8936          * operand is read even if it isn't needed (e.g., for type==global)
8937          */
8938         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
8939                         vmx_instruction_info, false, &gva))
8940                 return 1;
8941         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
8942                 kvm_inject_page_fault(vcpu, &e);
8943                 return 1;
8944         }
8945
8946         switch (type) {
8947         case VMX_EPT_EXTENT_GLOBAL:
8948         /*
8949          * TODO: track mappings and invalidate
8950          * single context requests appropriately
8951          */
8952         case VMX_EPT_EXTENT_CONTEXT:
8953                 kvm_mmu_sync_roots(vcpu);
8954                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
8955                 break;
8956         default:
8957                 BUG_ON(1);
8958                 break;
8959         }
8960
8961         return nested_vmx_succeed(vcpu);
8962 }
8963
8964 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
8965 {
8966         struct vcpu_vmx *vmx = to_vmx(vcpu);
8967
8968         return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
8969 }
8970
8971 static int handle_invvpid(struct kvm_vcpu *vcpu)
8972 {
8973         struct vcpu_vmx *vmx = to_vmx(vcpu);
8974         u32 vmx_instruction_info;
8975         unsigned long type, types;
8976         gva_t gva;
8977         struct x86_exception e;
8978         struct {
8979                 u64 vpid;
8980                 u64 gla;
8981         } operand;
8982         u16 vpid02;
8983
8984         if (!(vmx->nested.msrs.secondary_ctls_high &
8985               SECONDARY_EXEC_ENABLE_VPID) ||
8986                         !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
8987                 kvm_queue_exception(vcpu, UD_VECTOR);
8988                 return 1;
8989         }
8990
8991         if (!nested_vmx_check_permission(vcpu))
8992                 return 1;
8993
8994         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8995         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
8996
8997         types = (vmx->nested.msrs.vpid_caps &
8998                         VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
8999
9000         if (type >= 32 || !(types & (1 << type)))
9001                 return nested_vmx_failValid(vcpu,
9002                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9003
9004         /* according to the intel vmx instruction reference, the memory
9005          * operand is read even if it isn't needed (e.g., for type==global)
9006          */
9007         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
9008                         vmx_instruction_info, false, &gva))
9009                 return 1;
9010         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
9011                 kvm_inject_page_fault(vcpu, &e);
9012                 return 1;
9013         }
9014         if (operand.vpid >> 16)
9015                 return nested_vmx_failValid(vcpu,
9016                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9017
9018         vpid02 = nested_get_vpid02(vcpu);
9019         switch (type) {
9020         case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
9021                 if (!operand.vpid ||
9022                     is_noncanonical_address(operand.gla, vcpu))
9023                         return nested_vmx_failValid(vcpu,
9024                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9025                 if (cpu_has_vmx_invvpid_individual_addr()) {
9026                         __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
9027                                 vpid02, operand.gla);
9028                 } else
9029                         __vmx_flush_tlb(vcpu, vpid02, false);
9030                 break;
9031         case VMX_VPID_EXTENT_SINGLE_CONTEXT:
9032         case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
9033                 if (!operand.vpid)
9034                         return nested_vmx_failValid(vcpu,
9035                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9036                 __vmx_flush_tlb(vcpu, vpid02, false);
9037                 break;
9038         case VMX_VPID_EXTENT_ALL_CONTEXT:
9039                 __vmx_flush_tlb(vcpu, vpid02, false);
9040                 break;
9041         default:
9042                 WARN_ON_ONCE(1);
9043                 return kvm_skip_emulated_instruction(vcpu);
9044         }
9045
9046         return nested_vmx_succeed(vcpu);
9047 }
9048
9049 static int handle_invpcid(struct kvm_vcpu *vcpu)
9050 {
9051         u32 vmx_instruction_info;
9052         unsigned long type;
9053         bool pcid_enabled;
9054         gva_t gva;
9055         struct x86_exception e;
9056         unsigned i;
9057         unsigned long roots_to_free = 0;
9058         struct {
9059                 u64 pcid;
9060                 u64 gla;
9061         } operand;
9062
9063         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
9064                 kvm_queue_exception(vcpu, UD_VECTOR);
9065                 return 1;
9066         }
9067
9068         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
9069         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
9070
9071         if (type > 3) {
9072                 kvm_inject_gp(vcpu, 0);
9073                 return 1;
9074         }
9075
9076         /* According to the Intel instruction reference, the memory operand
9077          * is read even if it isn't needed (e.g., for type==all)
9078          */
9079         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
9080                                 vmx_instruction_info, false, &gva))
9081                 return 1;
9082
9083         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
9084                 kvm_inject_page_fault(vcpu, &e);
9085                 return 1;
9086         }
9087
9088         if (operand.pcid >> 12 != 0) {
9089                 kvm_inject_gp(vcpu, 0);
9090                 return 1;
9091         }
9092
9093         pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
9094
9095         switch (type) {
9096         case INVPCID_TYPE_INDIV_ADDR:
9097                 if ((!pcid_enabled && (operand.pcid != 0)) ||
9098                     is_noncanonical_address(operand.gla, vcpu)) {
9099                         kvm_inject_gp(vcpu, 0);
9100                         return 1;
9101                 }
9102                 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
9103                 return kvm_skip_emulated_instruction(vcpu);
9104
9105         case INVPCID_TYPE_SINGLE_CTXT:
9106                 if (!pcid_enabled && (operand.pcid != 0)) {
9107                         kvm_inject_gp(vcpu, 0);
9108                         return 1;
9109                 }
9110
9111                 if (kvm_get_active_pcid(vcpu) == operand.pcid) {
9112                         kvm_mmu_sync_roots(vcpu);
9113                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
9114                 }
9115
9116                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
9117                         if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3)
9118                             == operand.pcid)
9119                                 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
9120
9121                 kvm_mmu_free_roots(vcpu, roots_to_free);
9122                 /*
9123                  * If neither the current cr3 nor any of the prev_roots use the
9124                  * given PCID, then nothing needs to be done here because a
9125                  * resync will happen anyway before switching to any other CR3.
9126                  */
9127
9128                 return kvm_skip_emulated_instruction(vcpu);
9129
9130         case INVPCID_TYPE_ALL_NON_GLOBAL:
9131                 /*
9132                  * Currently, KVM doesn't mark global entries in the shadow
9133                  * page tables, so a non-global flush just degenerates to a
9134                  * global flush. If needed, we could optimize this later by
9135                  * keeping track of global entries in shadow page tables.
9136                  */
9137
9138                 /* fall-through */
9139         case INVPCID_TYPE_ALL_INCL_GLOBAL:
9140                 kvm_mmu_unload(vcpu);
9141                 return kvm_skip_emulated_instruction(vcpu);
9142
9143         default:
9144                 BUG(); /* We have already checked above that type <= 3 */
9145         }
9146 }
9147
9148 static int handle_pml_full(struct kvm_vcpu *vcpu)
9149 {
9150         unsigned long exit_qualification;
9151
9152         trace_kvm_pml_full(vcpu->vcpu_id);
9153
9154         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9155
9156         /*
9157          * PML buffer FULL happened while executing iret from NMI,
9158          * "blocked by NMI" bit has to be set before next VM entry.
9159          */
9160         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
9161                         enable_vnmi &&
9162                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
9163                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
9164                                 GUEST_INTR_STATE_NMI);
9165
9166         /*
9167          * PML buffer already flushed at beginning of VMEXIT. Nothing to do
9168          * here.., and there's no userspace involvement needed for PML.
9169          */
9170         return 1;
9171 }
9172
9173 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
9174 {
9175         if (!to_vmx(vcpu)->req_immediate_exit)
9176                 kvm_lapic_expired_hv_timer(vcpu);
9177         return 1;
9178 }
9179
9180 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
9181 {
9182         struct vcpu_vmx *vmx = to_vmx(vcpu);
9183         int maxphyaddr = cpuid_maxphyaddr(vcpu);
9184
9185         /* Check for memory type validity */
9186         switch (address & VMX_EPTP_MT_MASK) {
9187         case VMX_EPTP_MT_UC:
9188                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
9189                         return false;
9190                 break;
9191         case VMX_EPTP_MT_WB:
9192                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
9193                         return false;
9194                 break;
9195         default:
9196                 return false;
9197         }
9198
9199         /* only 4 levels page-walk length are valid */
9200         if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
9201                 return false;
9202
9203         /* Reserved bits should not be set */
9204         if (address >> maxphyaddr || ((address >> 7) & 0x1f))
9205                 return false;
9206
9207         /* AD, if set, should be supported */
9208         if (address & VMX_EPTP_AD_ENABLE_BIT) {
9209                 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
9210                         return false;
9211         }
9212
9213         return true;
9214 }
9215
9216 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
9217                                      struct vmcs12 *vmcs12)
9218 {
9219         u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
9220         u64 address;
9221         bool accessed_dirty;
9222         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
9223
9224         if (!nested_cpu_has_eptp_switching(vmcs12) ||
9225             !nested_cpu_has_ept(vmcs12))
9226                 return 1;
9227
9228         if (index >= VMFUNC_EPTP_ENTRIES)
9229                 return 1;
9230
9231
9232         if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
9233                                      &address, index * 8, 8))
9234                 return 1;
9235
9236         accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
9237
9238         /*
9239          * If the (L2) guest does a vmfunc to the currently
9240          * active ept pointer, we don't have to do anything else
9241          */
9242         if (vmcs12->ept_pointer != address) {
9243                 if (!valid_ept_address(vcpu, address))
9244                         return 1;
9245
9246                 kvm_mmu_unload(vcpu);
9247                 mmu->ept_ad = accessed_dirty;
9248                 mmu->base_role.ad_disabled = !accessed_dirty;
9249                 vmcs12->ept_pointer = address;
9250                 /*
9251                  * TODO: Check what's the correct approach in case
9252                  * mmu reload fails. Currently, we just let the next
9253                  * reload potentially fail
9254                  */
9255                 kvm_mmu_reload(vcpu);
9256         }
9257
9258         return 0;
9259 }
9260
9261 static int handle_vmfunc(struct kvm_vcpu *vcpu)
9262 {
9263         struct vcpu_vmx *vmx = to_vmx(vcpu);
9264         struct vmcs12 *vmcs12;
9265         u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
9266
9267         /*
9268          * VMFUNC is only supported for nested guests, but we always enable the
9269          * secondary control for simplicity; for non-nested mode, fake that we
9270          * didn't by injecting #UD.
9271          */
9272         if (!is_guest_mode(vcpu)) {
9273                 kvm_queue_exception(vcpu, UD_VECTOR);
9274                 return 1;
9275         }
9276
9277         vmcs12 = get_vmcs12(vcpu);
9278         if ((vmcs12->vm_function_control & (1 << function)) == 0)
9279                 goto fail;
9280
9281         switch (function) {
9282         case 0:
9283                 if (nested_vmx_eptp_switching(vcpu, vmcs12))
9284                         goto fail;
9285                 break;
9286         default:
9287                 goto fail;
9288         }
9289         return kvm_skip_emulated_instruction(vcpu);
9290
9291 fail:
9292         nested_vmx_vmexit(vcpu, vmx->exit_reason,
9293                           vmcs_read32(VM_EXIT_INTR_INFO),
9294                           vmcs_readl(EXIT_QUALIFICATION));
9295         return 1;
9296 }
9297
9298 static int handle_encls(struct kvm_vcpu *vcpu)
9299 {
9300         /*
9301          * SGX virtualization is not yet supported.  There is no software
9302          * enable bit for SGX, so we have to trap ENCLS and inject a #UD
9303          * to prevent the guest from executing ENCLS.
9304          */
9305         kvm_queue_exception(vcpu, UD_VECTOR);
9306         return 1;
9307 }
9308
9309 /*
9310  * The exit handlers return 1 if the exit was handled fully and guest execution
9311  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
9312  * to be done to userspace and return 0.
9313  */
9314 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
9315         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
9316         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
9317         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
9318         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
9319         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
9320         [EXIT_REASON_CR_ACCESS]               = handle_cr,
9321         [EXIT_REASON_DR_ACCESS]               = handle_dr,
9322         [EXIT_REASON_CPUID]                   = handle_cpuid,
9323         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
9324         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
9325         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
9326         [EXIT_REASON_HLT]                     = handle_halt,
9327         [EXIT_REASON_INVD]                    = handle_invd,
9328         [EXIT_REASON_INVLPG]                  = handle_invlpg,
9329         [EXIT_REASON_RDPMC]                   = handle_rdpmc,
9330         [EXIT_REASON_VMCALL]                  = handle_vmcall,
9331         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
9332         [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
9333         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
9334         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
9335         [EXIT_REASON_VMREAD]                  = handle_vmread,
9336         [EXIT_REASON_VMRESUME]                = handle_vmresume,
9337         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
9338         [EXIT_REASON_VMOFF]                   = handle_vmoff,
9339         [EXIT_REASON_VMON]                    = handle_vmon,
9340         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
9341         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
9342         [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
9343         [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
9344         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
9345         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
9346         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
9347         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
9348         [EXIT_REASON_GDTR_IDTR]               = handle_desc,
9349         [EXIT_REASON_LDTR_TR]                 = handle_desc,
9350         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
9351         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
9352         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
9353         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_mwait,
9354         [EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
9355         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
9356         [EXIT_REASON_INVEPT]                  = handle_invept,
9357         [EXIT_REASON_INVVPID]                 = handle_invvpid,
9358         [EXIT_REASON_RDRAND]                  = handle_invalid_op,
9359         [EXIT_REASON_RDSEED]                  = handle_invalid_op,
9360         [EXIT_REASON_XSAVES]                  = handle_xsaves,
9361         [EXIT_REASON_XRSTORS]                 = handle_xrstors,
9362         [EXIT_REASON_PML_FULL]                = handle_pml_full,
9363         [EXIT_REASON_INVPCID]                 = handle_invpcid,
9364         [EXIT_REASON_VMFUNC]                  = handle_vmfunc,
9365         [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
9366         [EXIT_REASON_ENCLS]                   = handle_encls,
9367 };
9368
9369 static const int kvm_vmx_max_exit_handlers =
9370         ARRAY_SIZE(kvm_vmx_exit_handlers);
9371
9372 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
9373                                        struct vmcs12 *vmcs12)
9374 {
9375         unsigned long exit_qualification;
9376         gpa_t bitmap, last_bitmap;
9377         unsigned int port;
9378         int size;
9379         u8 b;
9380
9381         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
9382                 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
9383
9384         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9385
9386         port = exit_qualification >> 16;
9387         size = (exit_qualification & 7) + 1;
9388
9389         last_bitmap = (gpa_t)-1;
9390         b = -1;
9391
9392         while (size > 0) {
9393                 if (port < 0x8000)
9394                         bitmap = vmcs12->io_bitmap_a;
9395                 else if (port < 0x10000)
9396                         bitmap = vmcs12->io_bitmap_b;
9397                 else
9398                         return true;
9399                 bitmap += (port & 0x7fff) / 8;
9400
9401                 if (last_bitmap != bitmap)
9402                         if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
9403                                 return true;
9404                 if (b & (1 << (port & 7)))
9405                         return true;
9406
9407                 port++;
9408                 size--;
9409                 last_bitmap = bitmap;
9410         }
9411
9412         return false;
9413 }
9414
9415 /*
9416  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
9417  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
9418  * disinterest in the current event (read or write a specific MSR) by using an
9419  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
9420  */
9421 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
9422         struct vmcs12 *vmcs12, u32 exit_reason)
9423 {
9424         u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
9425         gpa_t bitmap;
9426
9427         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
9428                 return true;
9429
9430         /*
9431          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
9432          * for the four combinations of read/write and low/high MSR numbers.
9433          * First we need to figure out which of the four to use:
9434          */
9435         bitmap = vmcs12->msr_bitmap;
9436         if (exit_reason == EXIT_REASON_MSR_WRITE)
9437                 bitmap += 2048;
9438         if (msr_index >= 0xc0000000) {
9439                 msr_index -= 0xc0000000;
9440                 bitmap += 1024;
9441         }
9442
9443         /* Then read the msr_index'th bit from this bitmap: */
9444         if (msr_index < 1024*8) {
9445                 unsigned char b;
9446                 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
9447                         return true;
9448                 return 1 & (b >> (msr_index & 7));
9449         } else
9450                 return true; /* let L1 handle the wrong parameter */
9451 }
9452
9453 /*
9454  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
9455  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
9456  * intercept (via guest_host_mask etc.) the current event.
9457  */
9458 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
9459         struct vmcs12 *vmcs12)
9460 {
9461         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9462         int cr = exit_qualification & 15;
9463         int reg;
9464         unsigned long val;
9465
9466         switch ((exit_qualification >> 4) & 3) {
9467         case 0: /* mov to cr */
9468                 reg = (exit_qualification >> 8) & 15;
9469                 val = kvm_register_readl(vcpu, reg);
9470                 switch (cr) {
9471                 case 0:
9472                         if (vmcs12->cr0_guest_host_mask &
9473                             (val ^ vmcs12->cr0_read_shadow))
9474                                 return true;
9475                         break;
9476                 case 3:
9477                         if ((vmcs12->cr3_target_count >= 1 &&
9478                                         vmcs12->cr3_target_value0 == val) ||
9479                                 (vmcs12->cr3_target_count >= 2 &&
9480                                         vmcs12->cr3_target_value1 == val) ||
9481                                 (vmcs12->cr3_target_count >= 3 &&
9482                                         vmcs12->cr3_target_value2 == val) ||
9483                                 (vmcs12->cr3_target_count >= 4 &&
9484                                         vmcs12->cr3_target_value3 == val))
9485                                 return false;
9486                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
9487                                 return true;
9488                         break;
9489                 case 4:
9490                         if (vmcs12->cr4_guest_host_mask &
9491                             (vmcs12->cr4_read_shadow ^ val))
9492                                 return true;
9493                         break;
9494                 case 8:
9495                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
9496                                 return true;
9497                         break;
9498                 }
9499                 break;
9500         case 2: /* clts */
9501                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
9502                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
9503                         return true;
9504                 break;
9505         case 1: /* mov from cr */
9506                 switch (cr) {
9507                 case 3:
9508                         if (vmcs12->cpu_based_vm_exec_control &
9509                             CPU_BASED_CR3_STORE_EXITING)
9510                                 return true;
9511                         break;
9512                 case 8:
9513                         if (vmcs12->cpu_based_vm_exec_control &
9514                             CPU_BASED_CR8_STORE_EXITING)
9515                                 return true;
9516                         break;
9517                 }
9518                 break;
9519         case 3: /* lmsw */
9520                 /*
9521                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
9522                  * cr0. Other attempted changes are ignored, with no exit.
9523                  */
9524                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
9525                 if (vmcs12->cr0_guest_host_mask & 0xe &
9526                     (val ^ vmcs12->cr0_read_shadow))
9527                         return true;
9528                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
9529                     !(vmcs12->cr0_read_shadow & 0x1) &&
9530                     (val & 0x1))
9531                         return true;
9532                 break;
9533         }
9534         return false;
9535 }
9536
9537 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
9538         struct vmcs12 *vmcs12, gpa_t bitmap)
9539 {
9540         u32 vmx_instruction_info;
9541         unsigned long field;
9542         u8 b;
9543
9544         if (!nested_cpu_has_shadow_vmcs(vmcs12))
9545                 return true;
9546
9547         /* Decode instruction info and find the field to access */
9548         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
9549         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
9550
9551         /* Out-of-range fields always cause a VM exit from L2 to L1 */
9552         if (field >> 15)
9553                 return true;
9554
9555         if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
9556                 return true;
9557
9558         return 1 & (b >> (field & 7));
9559 }
9560
9561 /*
9562  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
9563  * should handle it ourselves in L0 (and then continue L2). Only call this
9564  * when in is_guest_mode (L2).
9565  */
9566 static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
9567 {
9568         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9569         struct vcpu_vmx *vmx = to_vmx(vcpu);
9570         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9571
9572         if (vmx->nested.nested_run_pending)
9573                 return false;
9574
9575         if (unlikely(vmx->fail)) {
9576                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
9577                                     vmcs_read32(VM_INSTRUCTION_ERROR));
9578                 return true;
9579         }
9580
9581         /*
9582          * The host physical addresses of some pages of guest memory
9583          * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
9584          * Page). The CPU may write to these pages via their host
9585          * physical address while L2 is running, bypassing any
9586          * address-translation-based dirty tracking (e.g. EPT write
9587          * protection).
9588          *
9589          * Mark them dirty on every exit from L2 to prevent them from
9590          * getting out of sync with dirty tracking.
9591          */
9592         nested_mark_vmcs12_pages_dirty(vcpu);
9593
9594         trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
9595                                 vmcs_readl(EXIT_QUALIFICATION),
9596                                 vmx->idt_vectoring_info,
9597                                 intr_info,
9598                                 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9599                                 KVM_ISA_VMX);
9600
9601         switch (exit_reason) {
9602         case EXIT_REASON_EXCEPTION_NMI:
9603                 if (is_nmi(intr_info))
9604                         return false;
9605                 else if (is_page_fault(intr_info))
9606                         return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
9607                 else if (is_debug(intr_info) &&
9608                          vcpu->guest_debug &
9609                          (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
9610                         return false;
9611                 else if (is_breakpoint(intr_info) &&
9612                          vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
9613                         return false;
9614                 return vmcs12->exception_bitmap &
9615                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
9616         case EXIT_REASON_EXTERNAL_INTERRUPT:
9617                 return false;
9618         case EXIT_REASON_TRIPLE_FAULT:
9619                 return true;
9620         case EXIT_REASON_PENDING_INTERRUPT:
9621                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
9622         case EXIT_REASON_NMI_WINDOW:
9623                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
9624         case EXIT_REASON_TASK_SWITCH:
9625                 return true;
9626         case EXIT_REASON_CPUID:
9627                 return true;
9628         case EXIT_REASON_HLT:
9629                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
9630         case EXIT_REASON_INVD:
9631                 return true;
9632         case EXIT_REASON_INVLPG:
9633                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
9634         case EXIT_REASON_RDPMC:
9635                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
9636         case EXIT_REASON_RDRAND:
9637                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
9638         case EXIT_REASON_RDSEED:
9639                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
9640         case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
9641                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
9642         case EXIT_REASON_VMREAD:
9643                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
9644                         vmcs12->vmread_bitmap);
9645         case EXIT_REASON_VMWRITE:
9646                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
9647                         vmcs12->vmwrite_bitmap);
9648         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
9649         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
9650         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
9651         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
9652         case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
9653                 /*
9654                  * VMX instructions trap unconditionally. This allows L1 to
9655                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
9656                  */
9657                 return true;
9658         case EXIT_REASON_CR_ACCESS:
9659                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
9660         case EXIT_REASON_DR_ACCESS:
9661                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
9662         case EXIT_REASON_IO_INSTRUCTION:
9663                 return nested_vmx_exit_handled_io(vcpu, vmcs12);
9664         case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
9665                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
9666         case EXIT_REASON_MSR_READ:
9667         case EXIT_REASON_MSR_WRITE:
9668                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
9669         case EXIT_REASON_INVALID_STATE:
9670                 return true;
9671         case EXIT_REASON_MWAIT_INSTRUCTION:
9672                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
9673         case EXIT_REASON_MONITOR_TRAP_FLAG:
9674                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
9675         case EXIT_REASON_MONITOR_INSTRUCTION:
9676                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
9677         case EXIT_REASON_PAUSE_INSTRUCTION:
9678                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
9679                         nested_cpu_has2(vmcs12,
9680                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
9681         case EXIT_REASON_MCE_DURING_VMENTRY:
9682                 return false;
9683         case EXIT_REASON_TPR_BELOW_THRESHOLD:
9684                 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
9685         case EXIT_REASON_APIC_ACCESS:
9686         case EXIT_REASON_APIC_WRITE:
9687         case EXIT_REASON_EOI_INDUCED:
9688                 /*
9689                  * The controls for "virtualize APIC accesses," "APIC-
9690                  * register virtualization," and "virtual-interrupt
9691                  * delivery" only come from vmcs12.
9692                  */
9693                 return true;
9694         case EXIT_REASON_EPT_VIOLATION:
9695                 /*
9696                  * L0 always deals with the EPT violation. If nested EPT is
9697                  * used, and the nested mmu code discovers that the address is
9698                  * missing in the guest EPT table (EPT12), the EPT violation
9699                  * will be injected with nested_ept_inject_page_fault()
9700                  */
9701                 return false;
9702         case EXIT_REASON_EPT_MISCONFIG:
9703                 /*
9704                  * L2 never uses directly L1's EPT, but rather L0's own EPT
9705                  * table (shadow on EPT) or a merged EPT table that L0 built
9706                  * (EPT on EPT). So any problems with the structure of the
9707                  * table is L0's fault.
9708                  */
9709                 return false;
9710         case EXIT_REASON_INVPCID:
9711                 return
9712                         nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
9713                         nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
9714         case EXIT_REASON_WBINVD:
9715                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
9716         case EXIT_REASON_XSETBV:
9717                 return true;
9718         case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
9719                 /*
9720                  * This should never happen, since it is not possible to
9721                  * set XSS to a non-zero value---neither in L1 nor in L2.
9722                  * If if it were, XSS would have to be checked against
9723                  * the XSS exit bitmap in vmcs12.
9724                  */
9725                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
9726         case EXIT_REASON_PREEMPTION_TIMER:
9727                 return false;
9728         case EXIT_REASON_PML_FULL:
9729                 /* We emulate PML support to L1. */
9730                 return false;
9731         case EXIT_REASON_VMFUNC:
9732                 /* VM functions are emulated through L2->L0 vmexits. */
9733                 return false;
9734         case EXIT_REASON_ENCLS:
9735                 /* SGX is never exposed to L1 */
9736                 return false;
9737         default:
9738                 return true;
9739         }
9740 }
9741
9742 static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
9743 {
9744         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9745
9746         /*
9747          * At this point, the exit interruption info in exit_intr_info
9748          * is only valid for EXCEPTION_NMI exits.  For EXTERNAL_INTERRUPT
9749          * we need to query the in-kernel LAPIC.
9750          */
9751         WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
9752         if ((exit_intr_info &
9753              (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
9754             (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
9755                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9756                 vmcs12->vm_exit_intr_error_code =
9757                         vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
9758         }
9759
9760         nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
9761                           vmcs_readl(EXIT_QUALIFICATION));
9762         return 1;
9763 }
9764
9765 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
9766 {
9767         *info1 = vmcs_readl(EXIT_QUALIFICATION);
9768         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
9769 }
9770
9771 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
9772 {
9773         if (vmx->pml_pg) {
9774                 __free_page(vmx->pml_pg);
9775                 vmx->pml_pg = NULL;
9776         }
9777 }
9778
9779 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
9780 {
9781         struct vcpu_vmx *vmx = to_vmx(vcpu);
9782         u64 *pml_buf;
9783         u16 pml_idx;
9784
9785         pml_idx = vmcs_read16(GUEST_PML_INDEX);
9786
9787         /* Do nothing if PML buffer is empty */
9788         if (pml_idx == (PML_ENTITY_NUM - 1))
9789                 return;
9790
9791         /* PML index always points to next available PML buffer entity */
9792         if (pml_idx >= PML_ENTITY_NUM)
9793                 pml_idx = 0;
9794         else
9795                 pml_idx++;
9796
9797         pml_buf = page_address(vmx->pml_pg);
9798         for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
9799                 u64 gpa;
9800
9801                 gpa = pml_buf[pml_idx];
9802                 WARN_ON(gpa & (PAGE_SIZE - 1));
9803                 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
9804         }
9805
9806         /* reset PML index */
9807         vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
9808 }
9809
9810 /*
9811  * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
9812  * Called before reporting dirty_bitmap to userspace.
9813  */
9814 static void kvm_flush_pml_buffers(struct kvm *kvm)
9815 {
9816         int i;
9817         struct kvm_vcpu *vcpu;
9818         /*
9819          * We only need to kick vcpu out of guest mode here, as PML buffer
9820          * is flushed at beginning of all VMEXITs, and it's obvious that only
9821          * vcpus running in guest are possible to have unflushed GPAs in PML
9822          * buffer.
9823          */
9824         kvm_for_each_vcpu(i, vcpu, kvm)
9825                 kvm_vcpu_kick(vcpu);
9826 }
9827
9828 static void vmx_dump_sel(char *name, uint32_t sel)
9829 {
9830         pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
9831                name, vmcs_read16(sel),
9832                vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
9833                vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
9834                vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
9835 }
9836
9837 static void vmx_dump_dtsel(char *name, uint32_t limit)
9838 {
9839         pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
9840                name, vmcs_read32(limit),
9841                vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
9842 }
9843
9844 static void dump_vmcs(void)
9845 {
9846         u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
9847         u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
9848         u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
9849         u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
9850         u32 secondary_exec_control = 0;
9851         unsigned long cr4 = vmcs_readl(GUEST_CR4);
9852         u64 efer = vmcs_read64(GUEST_IA32_EFER);
9853         int i, n;
9854
9855         if (cpu_has_secondary_exec_ctrls())
9856                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
9857
9858         pr_err("*** Guest State ***\n");
9859         pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9860                vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
9861                vmcs_readl(CR0_GUEST_HOST_MASK));
9862         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9863                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
9864         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
9865         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
9866             (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
9867         {
9868                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
9869                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
9870                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
9871                        vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
9872         }
9873         pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
9874                vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
9875         pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
9876                vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
9877         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9878                vmcs_readl(GUEST_SYSENTER_ESP),
9879                vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
9880         vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
9881         vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
9882         vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
9883         vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
9884         vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
9885         vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
9886         vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
9887         vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
9888         vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
9889         vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
9890         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
9891             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
9892                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
9893                        efer, vmcs_read64(GUEST_IA32_PAT));
9894         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
9895                vmcs_read64(GUEST_IA32_DEBUGCTL),
9896                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
9897         if (cpu_has_load_perf_global_ctrl &&
9898             vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
9899                 pr_err("PerfGlobCtl = 0x%016llx\n",
9900                        vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
9901         if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
9902                 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
9903         pr_err("Interruptibility = %08x  ActivityState = %08x\n",
9904                vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
9905                vmcs_read32(GUEST_ACTIVITY_STATE));
9906         if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
9907                 pr_err("InterruptStatus = %04x\n",
9908                        vmcs_read16(GUEST_INTR_STATUS));
9909
9910         pr_err("*** Host State ***\n");
9911         pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
9912                vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
9913         pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
9914                vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
9915                vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
9916                vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
9917                vmcs_read16(HOST_TR_SELECTOR));
9918         pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
9919                vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
9920                vmcs_readl(HOST_TR_BASE));
9921         pr_err("GDTBase=%016lx IDTBase=%016lx\n",
9922                vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
9923         pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
9924                vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
9925                vmcs_readl(HOST_CR4));
9926         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9927                vmcs_readl(HOST_IA32_SYSENTER_ESP),
9928                vmcs_read32(HOST_IA32_SYSENTER_CS),
9929                vmcs_readl(HOST_IA32_SYSENTER_EIP));
9930         if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
9931                 pr_err("EFER = 0x%016llx  PAT = 0x%016llx\n",
9932                        vmcs_read64(HOST_IA32_EFER),
9933                        vmcs_read64(HOST_IA32_PAT));
9934         if (cpu_has_load_perf_global_ctrl &&
9935             vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
9936                 pr_err("PerfGlobCtl = 0x%016llx\n",
9937                        vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
9938
9939         pr_err("*** Control State ***\n");
9940         pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
9941                pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
9942         pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
9943         pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
9944                vmcs_read32(EXCEPTION_BITMAP),
9945                vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
9946                vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
9947         pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
9948                vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
9949                vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
9950                vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
9951         pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
9952                vmcs_read32(VM_EXIT_INTR_INFO),
9953                vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9954                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
9955         pr_err("        reason=%08x qualification=%016lx\n",
9956                vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
9957         pr_err("IDTVectoring: info=%08x errcode=%08x\n",
9958                vmcs_read32(IDT_VECTORING_INFO_FIELD),
9959                vmcs_read32(IDT_VECTORING_ERROR_CODE));
9960         pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
9961         if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
9962                 pr_err("TSC Multiplier = 0x%016llx\n",
9963                        vmcs_read64(TSC_MULTIPLIER));
9964         if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
9965                 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
9966         if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
9967                 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
9968         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
9969                 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
9970         n = vmcs_read32(CR3_TARGET_COUNT);
9971         for (i = 0; i + 1 < n; i += 4)
9972                 pr_err("CR3 target%u=%016lx target%u=%016lx\n",
9973                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
9974                        i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
9975         if (i < n)
9976                 pr_err("CR3 target%u=%016lx\n",
9977                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
9978         if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
9979                 pr_err("PLE Gap=%08x Window=%08x\n",
9980                        vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
9981         if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
9982                 pr_err("Virtual processor ID = 0x%04x\n",
9983                        vmcs_read16(VIRTUAL_PROCESSOR_ID));
9984 }
9985
9986 /*
9987  * The guest has exited.  See if we can fix it or if we need userspace
9988  * assistance.
9989  */
9990 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
9991 {
9992         struct vcpu_vmx *vmx = to_vmx(vcpu);
9993         u32 exit_reason = vmx->exit_reason;
9994         u32 vectoring_info = vmx->idt_vectoring_info;
9995
9996         trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
9997
9998         /*
9999          * Flush logged GPAs PML buffer, this will make dirty_bitmap more
10000          * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
10001          * querying dirty_bitmap, we only need to kick all vcpus out of guest
10002          * mode as if vcpus is in root mode, the PML buffer must has been
10003          * flushed already.
10004          */
10005         if (enable_pml)
10006                 vmx_flush_pml_buffer(vcpu);
10007
10008         /* If guest state is invalid, start emulating */
10009         if (vmx->emulation_required)
10010                 return handle_invalid_guest_state(vcpu);
10011
10012         if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
10013                 return nested_vmx_reflect_vmexit(vcpu, exit_reason);
10014
10015         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
10016                 dump_vmcs();
10017                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
10018                 vcpu->run->fail_entry.hardware_entry_failure_reason
10019                         = exit_reason;
10020                 return 0;
10021         }
10022
10023         if (unlikely(vmx->fail)) {
10024                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
10025                 vcpu->run->fail_entry.hardware_entry_failure_reason
10026                         = vmcs_read32(VM_INSTRUCTION_ERROR);
10027                 return 0;
10028         }
10029
10030         /*
10031          * Note:
10032          * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
10033          * delivery event since it indicates guest is accessing MMIO.
10034          * The vm-exit can be triggered again after return to guest that
10035          * will cause infinite loop.
10036          */
10037         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
10038                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
10039                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
10040                         exit_reason != EXIT_REASON_PML_FULL &&
10041                         exit_reason != EXIT_REASON_TASK_SWITCH)) {
10042                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
10043                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
10044                 vcpu->run->internal.ndata = 3;
10045                 vcpu->run->internal.data[0] = vectoring_info;
10046                 vcpu->run->internal.data[1] = exit_reason;
10047                 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
10048                 if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
10049                         vcpu->run->internal.ndata++;
10050                         vcpu->run->internal.data[3] =
10051                                 vmcs_read64(GUEST_PHYSICAL_ADDRESS);
10052                 }
10053                 return 0;
10054         }
10055
10056         if (unlikely(!enable_vnmi &&
10057                      vmx->loaded_vmcs->soft_vnmi_blocked)) {
10058                 if (vmx_interrupt_allowed(vcpu)) {
10059                         vmx->loaded_vmcs->soft_vnmi_blocked = 0;
10060                 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
10061                            vcpu->arch.nmi_pending) {
10062                         /*
10063                          * This CPU don't support us in finding the end of an
10064                          * NMI-blocked window if the guest runs with IRQs
10065                          * disabled. So we pull the trigger after 1 s of
10066                          * futile waiting, but inform the user about this.
10067                          */
10068                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
10069                                "state on VCPU %d after 1 s timeout\n",
10070                                __func__, vcpu->vcpu_id);
10071                         vmx->loaded_vmcs->soft_vnmi_blocked = 0;
10072                 }
10073         }
10074
10075         if (exit_reason < kvm_vmx_max_exit_handlers
10076             && kvm_vmx_exit_handlers[exit_reason])
10077                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
10078         else {
10079                 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
10080                                 exit_reason);
10081                 kvm_queue_exception(vcpu, UD_VECTOR);
10082                 return 1;
10083         }
10084 }
10085
10086 /*
10087  * Software based L1D cache flush which is used when microcode providing
10088  * the cache control MSR is not loaded.
10089  *
10090  * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
10091  * flush it is required to read in 64 KiB because the replacement algorithm
10092  * is not exactly LRU. This could be sized at runtime via topology
10093  * information but as all relevant affected CPUs have 32KiB L1D cache size
10094  * there is no point in doing so.
10095  */
10096 static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
10097 {
10098         int size = PAGE_SIZE << L1D_CACHE_ORDER;
10099
10100         /*
10101          * This code is only executed when the the flush mode is 'cond' or
10102          * 'always'
10103          */
10104         if (static_branch_likely(&vmx_l1d_flush_cond)) {
10105                 bool flush_l1d;
10106
10107                 /*
10108                  * Clear the per-vcpu flush bit, it gets set again
10109                  * either from vcpu_run() or from one of the unsafe
10110                  * VMEXIT handlers.
10111                  */
10112                 flush_l1d = vcpu->arch.l1tf_flush_l1d;
10113                 vcpu->arch.l1tf_flush_l1d = false;
10114
10115                 /*
10116                  * Clear the per-cpu flush bit, it gets set again from
10117                  * the interrupt handlers.
10118                  */
10119                 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
10120                 kvm_clear_cpu_l1tf_flush_l1d();
10121
10122                 if (!flush_l1d)
10123                         return;
10124         }
10125
10126         vcpu->stat.l1d_flush++;
10127
10128         if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
10129                 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
10130                 return;
10131         }
10132
10133         asm volatile(
10134                 /* First ensure the pages are in the TLB */
10135                 "xorl   %%eax, %%eax\n"
10136                 ".Lpopulate_tlb:\n\t"
10137                 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
10138                 "addl   $4096, %%eax\n\t"
10139                 "cmpl   %%eax, %[size]\n\t"
10140                 "jne    .Lpopulate_tlb\n\t"
10141                 "xorl   %%eax, %%eax\n\t"
10142                 "cpuid\n\t"
10143                 /* Now fill the cache */
10144                 "xorl   %%eax, %%eax\n"
10145                 ".Lfill_cache:\n"
10146                 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
10147                 "addl   $64, %%eax\n\t"
10148                 "cmpl   %%eax, %[size]\n\t"
10149                 "jne    .Lfill_cache\n\t"
10150                 "lfence\n"
10151                 :: [flush_pages] "r" (vmx_l1d_flush_pages),
10152                     [size] "r" (size)
10153                 : "eax", "ebx", "ecx", "edx");
10154 }
10155
10156 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
10157 {
10158         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
10159
10160         if (is_guest_mode(vcpu) &&
10161                 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
10162                 return;
10163
10164         if (irr == -1 || tpr < irr) {
10165                 vmcs_write32(TPR_THRESHOLD, 0);
10166                 return;
10167         }
10168
10169         vmcs_write32(TPR_THRESHOLD, irr);
10170 }
10171
10172 static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
10173 {
10174         u32 sec_exec_control;
10175
10176         if (!lapic_in_kernel(vcpu))
10177                 return;
10178
10179         if (!flexpriority_enabled &&
10180             !cpu_has_vmx_virtualize_x2apic_mode())
10181                 return;
10182
10183         /* Postpone execution until vmcs01 is the current VMCS. */
10184         if (is_guest_mode(vcpu)) {
10185                 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
10186                 return;
10187         }
10188
10189         sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
10190         sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
10191                               SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
10192
10193         switch (kvm_get_apic_mode(vcpu)) {
10194         case LAPIC_MODE_INVALID:
10195                 WARN_ONCE(true, "Invalid local APIC state");
10196         case LAPIC_MODE_DISABLED:
10197                 break;
10198         case LAPIC_MODE_XAPIC:
10199                 if (flexpriority_enabled) {
10200                         sec_exec_control |=
10201                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
10202                         vmx_flush_tlb(vcpu, true);
10203                 }
10204                 break;
10205         case LAPIC_MODE_X2APIC:
10206                 if (cpu_has_vmx_virtualize_x2apic_mode())
10207                         sec_exec_control |=
10208                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
10209                 break;
10210         }
10211         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
10212
10213         vmx_update_msr_bitmap(vcpu);
10214 }
10215
10216 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
10217 {
10218         if (!is_guest_mode(vcpu)) {
10219                 vmcs_write64(APIC_ACCESS_ADDR, hpa);
10220                 vmx_flush_tlb(vcpu, true);
10221         }
10222 }
10223
10224 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
10225 {
10226         u16 status;
10227         u8 old;
10228
10229         if (max_isr == -1)
10230                 max_isr = 0;
10231
10232         status = vmcs_read16(GUEST_INTR_STATUS);
10233         old = status >> 8;
10234         if (max_isr != old) {
10235                 status &= 0xff;
10236                 status |= max_isr << 8;
10237                 vmcs_write16(GUEST_INTR_STATUS, status);
10238         }
10239 }
10240
10241 static void vmx_set_rvi(int vector)
10242 {
10243         u16 status;
10244         u8 old;
10245
10246         if (vector == -1)
10247                 vector = 0;
10248
10249         status = vmcs_read16(GUEST_INTR_STATUS);
10250         old = (u8)status & 0xff;
10251         if ((u8)vector != old) {
10252                 status &= ~0xff;
10253                 status |= (u8)vector;
10254                 vmcs_write16(GUEST_INTR_STATUS, status);
10255         }
10256 }
10257
10258 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
10259 {
10260         /*
10261          * When running L2, updating RVI is only relevant when
10262          * vmcs12 virtual-interrupt-delivery enabled.
10263          * However, it can be enabled only when L1 also
10264          * intercepts external-interrupts and in that case
10265          * we should not update vmcs02 RVI but instead intercept
10266          * interrupt. Therefore, do nothing when running L2.
10267          */
10268         if (!is_guest_mode(vcpu))
10269                 vmx_set_rvi(max_irr);
10270 }
10271
10272 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
10273 {
10274         struct vcpu_vmx *vmx = to_vmx(vcpu);
10275         int max_irr;
10276         bool max_irr_updated;
10277
10278         WARN_ON(!vcpu->arch.apicv_active);
10279         if (pi_test_on(&vmx->pi_desc)) {
10280                 pi_clear_on(&vmx->pi_desc);
10281                 /*
10282                  * IOMMU can write to PIR.ON, so the barrier matters even on UP.
10283                  * But on x86 this is just a compiler barrier anyway.
10284                  */
10285                 smp_mb__after_atomic();
10286                 max_irr_updated =
10287                         kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
10288
10289                 /*
10290                  * If we are running L2 and L1 has a new pending interrupt
10291                  * which can be injected, we should re-evaluate
10292                  * what should be done with this new L1 interrupt.
10293                  * If L1 intercepts external-interrupts, we should
10294                  * exit from L2 to L1. Otherwise, interrupt should be
10295                  * delivered directly to L2.
10296                  */
10297                 if (is_guest_mode(vcpu) && max_irr_updated) {
10298                         if (nested_exit_on_intr(vcpu))
10299                                 kvm_vcpu_exiting_guest_mode(vcpu);
10300                         else
10301                                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10302                 }
10303         } else {
10304                 max_irr = kvm_lapic_find_highest_irr(vcpu);
10305         }
10306         vmx_hwapic_irr_update(vcpu, max_irr);
10307         return max_irr;
10308 }
10309
10310 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
10311 {
10312         u8 rvi = vmx_get_rvi();
10313         u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
10314
10315         return ((rvi & 0xf0) > (vppr & 0xf0));
10316 }
10317
10318 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
10319 {
10320         if (!kvm_vcpu_apicv_active(vcpu))
10321                 return;
10322
10323         vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
10324         vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
10325         vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
10326         vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
10327 }
10328
10329 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
10330 {
10331         struct vcpu_vmx *vmx = to_vmx(vcpu);
10332
10333         pi_clear_on(&vmx->pi_desc);
10334         memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
10335 }
10336
10337 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
10338 {
10339         u32 exit_intr_info = 0;
10340         u16 basic_exit_reason = (u16)vmx->exit_reason;
10341
10342         if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
10343               || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
10344                 return;
10345
10346         if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
10347                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10348         vmx->exit_intr_info = exit_intr_info;
10349
10350         /* if exit due to PF check for async PF */
10351         if (is_page_fault(exit_intr_info))
10352                 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
10353
10354         /* Handle machine checks before interrupts are enabled */
10355         if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
10356             is_machine_check(exit_intr_info))
10357                 kvm_machine_check();
10358
10359         /* We need to handle NMIs before interrupts are enabled */
10360         if (is_nmi(exit_intr_info)) {
10361                 kvm_before_interrupt(&vmx->vcpu);
10362                 asm("int $2");
10363                 kvm_after_interrupt(&vmx->vcpu);
10364         }
10365 }
10366
10367 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
10368 {
10369         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10370
10371         if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
10372                         == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
10373                 unsigned int vector;
10374                 unsigned long entry;
10375                 gate_desc *desc;
10376                 struct vcpu_vmx *vmx = to_vmx(vcpu);
10377 #ifdef CONFIG_X86_64
10378                 unsigned long tmp;
10379 #endif
10380
10381                 vector =  exit_intr_info & INTR_INFO_VECTOR_MASK;
10382                 desc = (gate_desc *)vmx->host_idt_base + vector;
10383                 entry = gate_offset(desc);
10384                 asm volatile(
10385 #ifdef CONFIG_X86_64
10386                         "mov %%" _ASM_SP ", %[sp]\n\t"
10387                         "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
10388                         "push $%c[ss]\n\t"
10389                         "push %[sp]\n\t"
10390 #endif
10391                         "pushf\n\t"
10392                         __ASM_SIZE(push) " $%c[cs]\n\t"
10393                         CALL_NOSPEC
10394                         :
10395 #ifdef CONFIG_X86_64
10396                         [sp]"=&r"(tmp),
10397 #endif
10398                         ASM_CALL_CONSTRAINT
10399                         :
10400                         THUNK_TARGET(entry),
10401                         [ss]"i"(__KERNEL_DS),
10402                         [cs]"i"(__KERNEL_CS)
10403                         );
10404         }
10405 }
10406 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
10407
10408 static bool vmx_has_emulated_msr(int index)
10409 {
10410         switch (index) {
10411         case MSR_IA32_SMBASE:
10412                 /*
10413                  * We cannot do SMM unless we can run the guest in big
10414                  * real mode.
10415                  */
10416                 return enable_unrestricted_guest || emulate_invalid_guest_state;
10417         case MSR_AMD64_VIRT_SPEC_CTRL:
10418                 /* This is AMD only.  */
10419                 return false;
10420         default:
10421                 return true;
10422         }
10423 }
10424
10425 static bool vmx_mpx_supported(void)
10426 {
10427         return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
10428                 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
10429 }
10430
10431 static bool vmx_xsaves_supported(void)
10432 {
10433         return vmcs_config.cpu_based_2nd_exec_ctrl &
10434                 SECONDARY_EXEC_XSAVES;
10435 }
10436
10437 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
10438 {
10439         u32 exit_intr_info;
10440         bool unblock_nmi;
10441         u8 vector;
10442         bool idtv_info_valid;
10443
10444         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
10445
10446         if (enable_vnmi) {
10447                 if (vmx->loaded_vmcs->nmi_known_unmasked)
10448                         return;
10449                 /*
10450                  * Can't use vmx->exit_intr_info since we're not sure what
10451                  * the exit reason is.
10452                  */
10453                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10454                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
10455                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
10456                 /*
10457                  * SDM 3: 27.7.1.2 (September 2008)
10458                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
10459                  * a guest IRET fault.
10460                  * SDM 3: 23.2.2 (September 2008)
10461                  * Bit 12 is undefined in any of the following cases:
10462                  *  If the VM exit sets the valid bit in the IDT-vectoring
10463                  *   information field.
10464                  *  If the VM exit is due to a double fault.
10465                  */
10466                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
10467                     vector != DF_VECTOR && !idtv_info_valid)
10468                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
10469                                       GUEST_INTR_STATE_NMI);
10470                 else
10471                         vmx->loaded_vmcs->nmi_known_unmasked =
10472                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
10473                                   & GUEST_INTR_STATE_NMI);
10474         } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
10475                 vmx->loaded_vmcs->vnmi_blocked_time +=
10476                         ktime_to_ns(ktime_sub(ktime_get(),
10477                                               vmx->loaded_vmcs->entry_time));
10478 }
10479
10480 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
10481                                       u32 idt_vectoring_info,
10482                                       int instr_len_field,
10483                                       int error_code_field)
10484 {
10485         u8 vector;
10486         int type;
10487         bool idtv_info_valid;
10488
10489         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
10490
10491         vcpu->arch.nmi_injected = false;
10492         kvm_clear_exception_queue(vcpu);
10493         kvm_clear_interrupt_queue(vcpu);
10494
10495         if (!idtv_info_valid)
10496                 return;
10497
10498         kvm_make_request(KVM_REQ_EVENT, vcpu);
10499
10500         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
10501         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
10502
10503         switch (type) {
10504         case INTR_TYPE_NMI_INTR:
10505                 vcpu->arch.nmi_injected = true;
10506                 /*
10507                  * SDM 3: 27.7.1.2 (September 2008)
10508                  * Clear bit "block by NMI" before VM entry if a NMI
10509                  * delivery faulted.
10510                  */
10511                 vmx_set_nmi_mask(vcpu, false);
10512                 break;
10513         case INTR_TYPE_SOFT_EXCEPTION:
10514                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
10515                 /* fall through */
10516         case INTR_TYPE_HARD_EXCEPTION:
10517                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
10518                         u32 err = vmcs_read32(error_code_field);
10519                         kvm_requeue_exception_e(vcpu, vector, err);
10520                 } else
10521                         kvm_requeue_exception(vcpu, vector);
10522                 break;
10523         case INTR_TYPE_SOFT_INTR:
10524                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
10525                 /* fall through */
10526         case INTR_TYPE_EXT_INTR:
10527                 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
10528                 break;
10529         default:
10530                 break;
10531         }
10532 }
10533
10534 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
10535 {
10536         __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
10537                                   VM_EXIT_INSTRUCTION_LEN,
10538                                   IDT_VECTORING_ERROR_CODE);
10539 }
10540
10541 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
10542 {
10543         __vmx_complete_interrupts(vcpu,
10544                                   vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
10545                                   VM_ENTRY_INSTRUCTION_LEN,
10546                                   VM_ENTRY_EXCEPTION_ERROR_CODE);
10547
10548         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
10549 }
10550
10551 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
10552 {
10553         int i, nr_msrs;
10554         struct perf_guest_switch_msr *msrs;
10555
10556         msrs = perf_guest_get_msrs(&nr_msrs);
10557
10558         if (!msrs)
10559                 return;
10560
10561         for (i = 0; i < nr_msrs; i++)
10562                 if (msrs[i].host == msrs[i].guest)
10563                         clear_atomic_switch_msr(vmx, msrs[i].msr);
10564                 else
10565                         add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
10566                                         msrs[i].host, false);
10567 }
10568
10569 static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
10570 {
10571         vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
10572         if (!vmx->loaded_vmcs->hv_timer_armed)
10573                 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10574                               PIN_BASED_VMX_PREEMPTION_TIMER);
10575         vmx->loaded_vmcs->hv_timer_armed = true;
10576 }
10577
10578 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
10579 {
10580         struct vcpu_vmx *vmx = to_vmx(vcpu);
10581         u64 tscl;
10582         u32 delta_tsc;
10583
10584         if (vmx->req_immediate_exit) {
10585                 vmx_arm_hv_timer(vmx, 0);
10586                 return;
10587         }
10588
10589         if (vmx->hv_deadline_tsc != -1) {
10590                 tscl = rdtsc();
10591                 if (vmx->hv_deadline_tsc > tscl)
10592                         /* set_hv_timer ensures the delta fits in 32-bits */
10593                         delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10594                                 cpu_preemption_timer_multi);
10595                 else
10596                         delta_tsc = 0;
10597
10598                 vmx_arm_hv_timer(vmx, delta_tsc);
10599                 return;
10600         }
10601
10602         if (vmx->loaded_vmcs->hv_timer_armed)
10603                 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10604                                 PIN_BASED_VMX_PREEMPTION_TIMER);
10605         vmx->loaded_vmcs->hv_timer_armed = false;
10606 }
10607
10608 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
10609 {
10610         struct vcpu_vmx *vmx = to_vmx(vcpu);
10611         unsigned long cr3, cr4, evmcs_rsp;
10612
10613         /* Record the guest's net vcpu time for enforced NMI injections. */
10614         if (unlikely(!enable_vnmi &&
10615                      vmx->loaded_vmcs->soft_vnmi_blocked))
10616                 vmx->loaded_vmcs->entry_time = ktime_get();
10617
10618         /* Don't enter VMX if guest state is invalid, let the exit handler
10619            start emulation until we arrive back to a valid state */
10620         if (vmx->emulation_required)
10621                 return;
10622
10623         if (vmx->ple_window_dirty) {
10624                 vmx->ple_window_dirty = false;
10625                 vmcs_write32(PLE_WINDOW, vmx->ple_window);
10626         }
10627
10628         if (vmx->nested.sync_shadow_vmcs) {
10629                 copy_vmcs12_to_shadow(vmx);
10630                 vmx->nested.sync_shadow_vmcs = false;
10631         }
10632
10633         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
10634                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
10635         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
10636                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
10637
10638         cr3 = __get_current_cr3_fast();
10639         if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
10640                 vmcs_writel(HOST_CR3, cr3);
10641                 vmx->loaded_vmcs->host_state.cr3 = cr3;
10642         }
10643
10644         cr4 = cr4_read_shadow();
10645         if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
10646                 vmcs_writel(HOST_CR4, cr4);
10647                 vmx->loaded_vmcs->host_state.cr4 = cr4;
10648         }
10649
10650         /* When single-stepping over STI and MOV SS, we must clear the
10651          * corresponding interruptibility bits in the guest state. Otherwise
10652          * vmentry fails as it then expects bit 14 (BS) in pending debug
10653          * exceptions being set, but that's not correct for the guest debugging
10654          * case. */
10655         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
10656                 vmx_set_interrupt_shadow(vcpu, 0);
10657
10658         if (static_cpu_has(X86_FEATURE_PKU) &&
10659             kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
10660             vcpu->arch.pkru != vmx->host_pkru)
10661                 __write_pkru(vcpu->arch.pkru);
10662
10663         atomic_switch_perf_msrs(vmx);
10664
10665         vmx_update_hv_timer(vcpu);
10666
10667         /*
10668          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
10669          * it's non-zero. Since vmentry is serialising on affected CPUs, there
10670          * is no need to worry about the conditional branch over the wrmsr
10671          * being speculatively taken.
10672          */
10673         x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
10674
10675         vmx->__launched = vmx->loaded_vmcs->launched;
10676
10677         evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
10678                 (unsigned long)&current_evmcs->host_rsp : 0;
10679
10680         if (static_branch_unlikely(&vmx_l1d_should_flush))
10681                 vmx_l1d_flush(vcpu);
10682
10683         asm(
10684                 /* Store host registers */
10685                 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
10686                 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
10687                 "push %%" _ASM_CX " \n\t"
10688                 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
10689                 "je 1f \n\t"
10690                 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
10691                 /* Avoid VMWRITE when Enlightened VMCS is in use */
10692                 "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
10693                 "jz 2f \n\t"
10694                 "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
10695                 "jmp 1f \n\t"
10696                 "2: \n\t"
10697                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
10698                 "1: \n\t"
10699                 /* Reload cr2 if changed */
10700                 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
10701                 "mov %%cr2, %%" _ASM_DX " \n\t"
10702                 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
10703                 "je 3f \n\t"
10704                 "mov %%" _ASM_AX", %%cr2 \n\t"
10705                 "3: \n\t"
10706                 /* Check if vmlaunch of vmresume is needed */
10707                 "cmpl $0, %c[launched](%0) \n\t"
10708                 /* Load guest registers.  Don't clobber flags. */
10709                 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
10710                 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
10711                 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
10712                 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
10713                 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
10714                 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
10715 #ifdef CONFIG_X86_64
10716                 "mov %c[r8](%0),  %%r8  \n\t"
10717                 "mov %c[r9](%0),  %%r9  \n\t"
10718                 "mov %c[r10](%0), %%r10 \n\t"
10719                 "mov %c[r11](%0), %%r11 \n\t"
10720                 "mov %c[r12](%0), %%r12 \n\t"
10721                 "mov %c[r13](%0), %%r13 \n\t"
10722                 "mov %c[r14](%0), %%r14 \n\t"
10723                 "mov %c[r15](%0), %%r15 \n\t"
10724 #endif
10725                 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
10726
10727                 /* Enter guest mode */
10728                 "jne 1f \n\t"
10729                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
10730                 "jmp 2f \n\t"
10731                 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
10732                 "2: "
10733                 /* Save guest registers, load host registers, keep flags */
10734                 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
10735                 "pop %0 \n\t"
10736                 "setbe %c[fail](%0)\n\t"
10737                 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
10738                 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
10739                 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
10740                 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
10741                 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
10742                 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
10743                 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
10744 #ifdef CONFIG_X86_64
10745                 "mov %%r8,  %c[r8](%0) \n\t"
10746                 "mov %%r9,  %c[r9](%0) \n\t"
10747                 "mov %%r10, %c[r10](%0) \n\t"
10748                 "mov %%r11, %c[r11](%0) \n\t"
10749                 "mov %%r12, %c[r12](%0) \n\t"
10750                 "mov %%r13, %c[r13](%0) \n\t"
10751                 "mov %%r14, %c[r14](%0) \n\t"
10752                 "mov %%r15, %c[r15](%0) \n\t"
10753                 "xor %%r8d,  %%r8d \n\t"
10754                 "xor %%r9d,  %%r9d \n\t"
10755                 "xor %%r10d, %%r10d \n\t"
10756                 "xor %%r11d, %%r11d \n\t"
10757                 "xor %%r12d, %%r12d \n\t"
10758                 "xor %%r13d, %%r13d \n\t"
10759                 "xor %%r14d, %%r14d \n\t"
10760                 "xor %%r15d, %%r15d \n\t"
10761 #endif
10762                 "mov %%cr2, %%" _ASM_AX "   \n\t"
10763                 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
10764
10765                 "xor %%eax, %%eax \n\t"
10766                 "xor %%ebx, %%ebx \n\t"
10767                 "xor %%esi, %%esi \n\t"
10768                 "xor %%edi, %%edi \n\t"
10769                 "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
10770                 ".pushsection .rodata \n\t"
10771                 ".global vmx_return \n\t"
10772                 "vmx_return: " _ASM_PTR " 2b \n\t"
10773                 ".popsection"
10774               : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
10775                 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
10776                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
10777                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
10778                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
10779                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
10780                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
10781                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
10782                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
10783                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
10784                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
10785 #ifdef CONFIG_X86_64
10786                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
10787                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
10788                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
10789                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
10790                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
10791                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
10792                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
10793                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
10794 #endif
10795                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
10796                 [wordsize]"i"(sizeof(ulong))
10797               : "cc", "memory"
10798 #ifdef CONFIG_X86_64
10799                 , "rax", "rbx", "rdi"
10800                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
10801 #else
10802                 , "eax", "ebx", "edi"
10803 #endif
10804               );
10805
10806         /*
10807          * We do not use IBRS in the kernel. If this vCPU has used the
10808          * SPEC_CTRL MSR it may have left it on; save the value and
10809          * turn it off. This is much more efficient than blindly adding
10810          * it to the atomic save/restore list. Especially as the former
10811          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
10812          *
10813          * For non-nested case:
10814          * If the L01 MSR bitmap does not intercept the MSR, then we need to
10815          * save it.
10816          *
10817          * For nested case:
10818          * If the L02 MSR bitmap does not intercept the MSR, then we need to
10819          * save it.
10820          */
10821         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
10822                 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
10823
10824         x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
10825
10826         /* Eliminate branch target predictions from guest mode */
10827         vmexit_fill_RSB();
10828
10829         /* All fields are clean at this point */
10830         if (static_branch_unlikely(&enable_evmcs))
10831                 current_evmcs->hv_clean_fields |=
10832                         HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
10833
10834         /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
10835         if (vmx->host_debugctlmsr)
10836                 update_debugctlmsr(vmx->host_debugctlmsr);
10837
10838 #ifndef CONFIG_X86_64
10839         /*
10840          * The sysexit path does not restore ds/es, so we must set them to
10841          * a reasonable value ourselves.
10842          *
10843          * We can't defer this to vmx_prepare_switch_to_host() since that
10844          * function may be executed in interrupt context, which saves and
10845          * restore segments around it, nullifying its effect.
10846          */
10847         loadsegment(ds, __USER_DS);
10848         loadsegment(es, __USER_DS);
10849 #endif
10850
10851         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
10852                                   | (1 << VCPU_EXREG_RFLAGS)
10853                                   | (1 << VCPU_EXREG_PDPTR)
10854                                   | (1 << VCPU_EXREG_SEGMENTS)
10855                                   | (1 << VCPU_EXREG_CR3));
10856         vcpu->arch.regs_dirty = 0;
10857
10858         /*
10859          * eager fpu is enabled if PKEY is supported and CR4 is switched
10860          * back on host, so it is safe to read guest PKRU from current
10861          * XSAVE.
10862          */
10863         if (static_cpu_has(X86_FEATURE_PKU) &&
10864             kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
10865                 vcpu->arch.pkru = __read_pkru();
10866                 if (vcpu->arch.pkru != vmx->host_pkru)
10867                         __write_pkru(vmx->host_pkru);
10868         }
10869
10870         vmx->nested.nested_run_pending = 0;
10871         vmx->idt_vectoring_info = 0;
10872
10873         vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
10874         if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
10875                 return;
10876
10877         vmx->loaded_vmcs->launched = 1;
10878         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
10879
10880         vmx_complete_atomic_exit(vmx);
10881         vmx_recover_nmi_blocking(vmx);
10882         vmx_complete_interrupts(vmx);
10883 }
10884 STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
10885
10886 static struct kvm *vmx_vm_alloc(void)
10887 {
10888         struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
10889         return &kvm_vmx->kvm;
10890 }
10891
10892 static void vmx_vm_free(struct kvm *kvm)
10893 {
10894         vfree(to_kvm_vmx(kvm));
10895 }
10896
10897 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
10898 {
10899         struct vcpu_vmx *vmx = to_vmx(vcpu);
10900         int cpu;
10901
10902         if (vmx->loaded_vmcs == vmcs)
10903                 return;
10904
10905         cpu = get_cpu();
10906         vmx_vcpu_put(vcpu);
10907         vmx->loaded_vmcs = vmcs;
10908         vmx_vcpu_load(vcpu, cpu);
10909         put_cpu();
10910
10911         vm_entry_controls_reset_shadow(vmx);
10912         vm_exit_controls_reset_shadow(vmx);
10913         vmx_segment_cache_clear(vmx);
10914 }
10915
10916 /*
10917  * Ensure that the current vmcs of the logical processor is the
10918  * vmcs01 of the vcpu before calling free_nested().
10919  */
10920 static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
10921 {
10922        struct vcpu_vmx *vmx = to_vmx(vcpu);
10923
10924        vcpu_load(vcpu);
10925        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
10926        free_nested(vmx);
10927        vcpu_put(vcpu);
10928 }
10929
10930 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
10931 {
10932         struct vcpu_vmx *vmx = to_vmx(vcpu);
10933
10934         if (enable_pml)
10935                 vmx_destroy_pml_buffer(vmx);
10936         free_vpid(vmx->vpid);
10937         leave_guest_mode(vcpu);
10938         vmx_free_vcpu_nested(vcpu);
10939         free_loaded_vmcs(vmx->loaded_vmcs);
10940         kfree(vmx->guest_msrs);
10941         kvm_vcpu_uninit(vcpu);
10942         kmem_cache_free(kvm_vcpu_cache, vmx);
10943 }
10944
10945 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
10946 {
10947         int err;
10948         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
10949         unsigned long *msr_bitmap;
10950         int cpu;
10951
10952         if (!vmx)
10953                 return ERR_PTR(-ENOMEM);
10954
10955         vmx->vpid = allocate_vpid();
10956
10957         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
10958         if (err)
10959                 goto free_vcpu;
10960
10961         err = -ENOMEM;
10962
10963         /*
10964          * If PML is turned on, failure on enabling PML just results in failure
10965          * of creating the vcpu, therefore we can simplify PML logic (by
10966          * avoiding dealing with cases, such as enabling PML partially on vcpus
10967          * for the guest, etc.
10968          */
10969         if (enable_pml) {
10970                 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
10971                 if (!vmx->pml_pg)
10972                         goto uninit_vcpu;
10973         }
10974
10975         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
10976         BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
10977                      > PAGE_SIZE);
10978
10979         if (!vmx->guest_msrs)
10980                 goto free_pml;
10981
10982         err = alloc_loaded_vmcs(&vmx->vmcs01);
10983         if (err < 0)
10984                 goto free_msrs;
10985
10986         msr_bitmap = vmx->vmcs01.msr_bitmap;
10987         vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
10988         vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
10989         vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
10990         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
10991         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
10992         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
10993         vmx->msr_bitmap_mode = 0;
10994
10995         vmx->loaded_vmcs = &vmx->vmcs01;
10996         cpu = get_cpu();
10997         vmx_vcpu_load(&vmx->vcpu, cpu);
10998         vmx->vcpu.cpu = cpu;
10999         vmx_vcpu_setup(vmx);
11000         vmx_vcpu_put(&vmx->vcpu);
11001         put_cpu();
11002         if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
11003                 err = alloc_apic_access_page(kvm);
11004                 if (err)
11005                         goto free_vmcs;
11006         }
11007
11008         if (enable_ept && !enable_unrestricted_guest) {
11009                 err = init_rmode_identity_map(kvm);
11010                 if (err)
11011                         goto free_vmcs;
11012         }
11013
11014         if (nested)
11015                 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
11016                                            kvm_vcpu_apicv_active(&vmx->vcpu));
11017
11018         vmx->nested.posted_intr_nv = -1;
11019         vmx->nested.current_vmptr = -1ull;
11020
11021         vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
11022
11023         /*
11024          * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
11025          * or POSTED_INTR_WAKEUP_VECTOR.
11026          */
11027         vmx->pi_desc.nv = POSTED_INTR_VECTOR;
11028         vmx->pi_desc.sn = 1;
11029
11030         return &vmx->vcpu;
11031
11032 free_vmcs:
11033         free_loaded_vmcs(vmx->loaded_vmcs);
11034 free_msrs:
11035         kfree(vmx->guest_msrs);
11036 free_pml:
11037         vmx_destroy_pml_buffer(vmx);
11038 uninit_vcpu:
11039         kvm_vcpu_uninit(&vmx->vcpu);
11040 free_vcpu:
11041         free_vpid(vmx->vpid);
11042         kmem_cache_free(kvm_vcpu_cache, vmx);
11043         return ERR_PTR(err);
11044 }
11045
11046 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
11047 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
11048
11049 static int vmx_vm_init(struct kvm *kvm)
11050 {
11051         spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
11052
11053         if (!ple_gap)
11054                 kvm->arch.pause_in_guest = true;
11055
11056         if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
11057                 switch (l1tf_mitigation) {
11058                 case L1TF_MITIGATION_OFF:
11059                 case L1TF_MITIGATION_FLUSH_NOWARN:
11060                         /* 'I explicitly don't care' is set */
11061                         break;
11062                 case L1TF_MITIGATION_FLUSH:
11063                 case L1TF_MITIGATION_FLUSH_NOSMT:
11064                 case L1TF_MITIGATION_FULL:
11065                         /*
11066                          * Warn upon starting the first VM in a potentially
11067                          * insecure environment.
11068                          */
11069                         if (cpu_smt_control == CPU_SMT_ENABLED)
11070                                 pr_warn_once(L1TF_MSG_SMT);
11071                         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
11072                                 pr_warn_once(L1TF_MSG_L1D);
11073                         break;
11074                 case L1TF_MITIGATION_FULL_FORCE:
11075                         /* Flush is enforced */
11076                         break;
11077                 }
11078         }
11079         return 0;
11080 }
11081
11082 static void __init vmx_check_processor_compat(void *rtn)
11083 {
11084         struct vmcs_config vmcs_conf;
11085
11086         *(int *)rtn = 0;
11087         if (setup_vmcs_config(&vmcs_conf) < 0)
11088                 *(int *)rtn = -EIO;
11089         nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv);
11090         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
11091                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
11092                                 smp_processor_id());
11093                 *(int *)rtn = -EIO;
11094         }
11095 }
11096
11097 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
11098 {
11099         u8 cache;
11100         u64 ipat = 0;
11101
11102         /* For VT-d and EPT combination
11103          * 1. MMIO: always map as UC
11104          * 2. EPT with VT-d:
11105          *   a. VT-d without snooping control feature: can't guarantee the
11106          *      result, try to trust guest.
11107          *   b. VT-d with snooping control feature: snooping control feature of
11108          *      VT-d engine can guarantee the cache correctness. Just set it
11109          *      to WB to keep consistent with host. So the same as item 3.
11110          * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
11111          *    consistent with host MTRR
11112          */
11113         if (is_mmio) {
11114                 cache = MTRR_TYPE_UNCACHABLE;
11115                 goto exit;
11116         }
11117
11118         if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
11119                 ipat = VMX_EPT_IPAT_BIT;
11120                 cache = MTRR_TYPE_WRBACK;
11121                 goto exit;
11122         }
11123
11124         if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
11125                 ipat = VMX_EPT_IPAT_BIT;
11126                 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
11127                         cache = MTRR_TYPE_WRBACK;
11128                 else
11129                         cache = MTRR_TYPE_UNCACHABLE;
11130                 goto exit;
11131         }
11132
11133         cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
11134
11135 exit:
11136         return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
11137 }
11138
11139 static int vmx_get_lpage_level(void)
11140 {
11141         if (enable_ept && !cpu_has_vmx_ept_1g_page())
11142                 return PT_DIRECTORY_LEVEL;
11143         else
11144                 /* For shadow and EPT supported 1GB page */
11145                 return PT_PDPE_LEVEL;
11146 }
11147
11148 static void vmcs_set_secondary_exec_control(u32 new_ctl)
11149 {
11150         /*
11151          * These bits in the secondary execution controls field
11152          * are dynamic, the others are mostly based on the hypervisor
11153          * architecture and the guest's CPUID.  Do not touch the
11154          * dynamic bits.
11155          */
11156         u32 mask =
11157                 SECONDARY_EXEC_SHADOW_VMCS |
11158                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
11159                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
11160                 SECONDARY_EXEC_DESC;
11161
11162         u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
11163
11164         vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
11165                      (new_ctl & ~mask) | (cur_ctl & mask));
11166 }
11167
11168 /*
11169  * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
11170  * (indicating "allowed-1") if they are supported in the guest's CPUID.
11171  */
11172 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
11173 {
11174         struct vcpu_vmx *vmx = to_vmx(vcpu);
11175         struct kvm_cpuid_entry2 *entry;
11176
11177         vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
11178         vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
11179
11180 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {            \
11181         if (entry && (entry->_reg & (_cpuid_mask)))                     \
11182                 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);     \
11183 } while (0)
11184
11185         entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
11186         cr4_fixed1_update(X86_CR4_VME,        edx, bit(X86_FEATURE_VME));
11187         cr4_fixed1_update(X86_CR4_PVI,        edx, bit(X86_FEATURE_VME));
11188         cr4_fixed1_update(X86_CR4_TSD,        edx, bit(X86_FEATURE_TSC));
11189         cr4_fixed1_update(X86_CR4_DE,         edx, bit(X86_FEATURE_DE));
11190         cr4_fixed1_update(X86_CR4_PSE,        edx, bit(X86_FEATURE_PSE));
11191         cr4_fixed1_update(X86_CR4_PAE,        edx, bit(X86_FEATURE_PAE));
11192         cr4_fixed1_update(X86_CR4_MCE,        edx, bit(X86_FEATURE_MCE));
11193         cr4_fixed1_update(X86_CR4_PGE,        edx, bit(X86_FEATURE_PGE));
11194         cr4_fixed1_update(X86_CR4_OSFXSR,     edx, bit(X86_FEATURE_FXSR));
11195         cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
11196         cr4_fixed1_update(X86_CR4_VMXE,       ecx, bit(X86_FEATURE_VMX));
11197         cr4_fixed1_update(X86_CR4_SMXE,       ecx, bit(X86_FEATURE_SMX));
11198         cr4_fixed1_update(X86_CR4_PCIDE,      ecx, bit(X86_FEATURE_PCID));
11199         cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, bit(X86_FEATURE_XSAVE));
11200
11201         entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
11202         cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, bit(X86_FEATURE_FSGSBASE));
11203         cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
11204         cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
11205         cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
11206         cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
11207
11208 #undef cr4_fixed1_update
11209 }
11210
11211 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
11212 {
11213         struct vcpu_vmx *vmx = to_vmx(vcpu);
11214
11215         if (kvm_mpx_supported()) {
11216                 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
11217
11218                 if (mpx_enabled) {
11219                         vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
11220                         vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
11221                 } else {
11222                         vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
11223                         vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
11224                 }
11225         }
11226 }
11227
11228 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
11229 {
11230         struct vcpu_vmx *vmx = to_vmx(vcpu);
11231
11232         if (cpu_has_secondary_exec_ctrls()) {
11233                 vmx_compute_secondary_exec_control(vmx);
11234                 vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
11235         }
11236
11237         if (nested_vmx_allowed(vcpu))
11238                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
11239                         FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
11240         else
11241                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
11242                         ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
11243
11244         if (nested_vmx_allowed(vcpu)) {
11245                 nested_vmx_cr_fixed1_bits_update(vcpu);
11246                 nested_vmx_entry_exit_ctls_update(vcpu);
11247         }
11248 }
11249
11250 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
11251 {
11252         if (func == 1 && nested)
11253                 entry->ecx |= bit(X86_FEATURE_VMX);
11254 }
11255
11256 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
11257                 struct x86_exception *fault)
11258 {
11259         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11260         struct vcpu_vmx *vmx = to_vmx(vcpu);
11261         u32 exit_reason;
11262         unsigned long exit_qualification = vcpu->arch.exit_qualification;
11263
11264         if (vmx->nested.pml_full) {
11265                 exit_reason = EXIT_REASON_PML_FULL;
11266                 vmx->nested.pml_full = false;
11267                 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
11268         } else if (fault->error_code & PFERR_RSVD_MASK)
11269                 exit_reason = EXIT_REASON_EPT_MISCONFIG;
11270         else
11271                 exit_reason = EXIT_REASON_EPT_VIOLATION;
11272
11273         nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
11274         vmcs12->guest_physical_address = fault->address;
11275 }
11276
11277 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
11278 {
11279         return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
11280 }
11281
11282 /* Callbacks for nested_ept_init_mmu_context: */
11283
11284 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
11285 {
11286         /* return the page table to be shadowed - in our case, EPT12 */
11287         return get_vmcs12(vcpu)->ept_pointer;
11288 }
11289
11290 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
11291 {
11292         WARN_ON(mmu_is_nested(vcpu));
11293
11294         kvm_init_shadow_ept_mmu(vcpu,
11295                         to_vmx(vcpu)->nested.msrs.ept_caps &
11296                         VMX_EPT_EXECUTE_ONLY_BIT,
11297                         nested_ept_ad_enabled(vcpu),
11298                         nested_ept_get_cr3(vcpu));
11299         vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
11300         vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
11301         vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
11302
11303         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
11304 }
11305
11306 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
11307 {
11308         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
11309 }
11310
11311 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
11312                                             u16 error_code)
11313 {
11314         bool inequality, bit;
11315
11316         bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
11317         inequality =
11318                 (error_code & vmcs12->page_fault_error_code_mask) !=
11319                  vmcs12->page_fault_error_code_match;
11320         return inequality ^ bit;
11321 }
11322
11323 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
11324                 struct x86_exception *fault)
11325 {
11326         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11327
11328         WARN_ON(!is_guest_mode(vcpu));
11329
11330         if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
11331                 !to_vmx(vcpu)->nested.nested_run_pending) {
11332                 vmcs12->vm_exit_intr_error_code = fault->error_code;
11333                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
11334                                   PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
11335                                   INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
11336                                   fault->address);
11337         } else {
11338                 kvm_inject_page_fault(vcpu, fault);
11339         }
11340 }
11341
11342 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
11343                                                  struct vmcs12 *vmcs12);
11344
11345 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
11346 {
11347         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11348         struct vcpu_vmx *vmx = to_vmx(vcpu);
11349         struct page *page;
11350         u64 hpa;
11351
11352         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11353                 /*
11354                  * Translate L1 physical address to host physical
11355                  * address for vmcs02. Keep the page pinned, so this
11356                  * physical address remains valid. We keep a reference
11357                  * to it so we can release it later.
11358                  */
11359                 if (vmx->nested.apic_access_page) { /* shouldn't happen */
11360                         kvm_release_page_dirty(vmx->nested.apic_access_page);
11361                         vmx->nested.apic_access_page = NULL;
11362                 }
11363                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
11364                 /*
11365                  * If translation failed, no matter: This feature asks
11366                  * to exit when accessing the given address, and if it
11367                  * can never be accessed, this feature won't do
11368                  * anything anyway.
11369                  */
11370                 if (!is_error_page(page)) {
11371                         vmx->nested.apic_access_page = page;
11372                         hpa = page_to_phys(vmx->nested.apic_access_page);
11373                         vmcs_write64(APIC_ACCESS_ADDR, hpa);
11374                 } else {
11375                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
11376                                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
11377                 }
11378         }
11379
11380         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
11381                 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
11382                         kvm_release_page_dirty(vmx->nested.virtual_apic_page);
11383                         vmx->nested.virtual_apic_page = NULL;
11384                 }
11385                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
11386
11387                 /*
11388                  * If translation failed, VM entry will fail because
11389                  * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
11390                  * Failing the vm entry is _not_ what the processor
11391                  * does but it's basically the only possibility we
11392                  * have.  We could still enter the guest if CR8 load
11393                  * exits are enabled, CR8 store exits are enabled, and
11394                  * virtualize APIC access is disabled; in this case
11395                  * the processor would never use the TPR shadow and we
11396                  * could simply clear the bit from the execution
11397                  * control.  But such a configuration is useless, so
11398                  * let's keep the code simple.
11399                  */
11400                 if (!is_error_page(page)) {
11401                         vmx->nested.virtual_apic_page = page;
11402                         hpa = page_to_phys(vmx->nested.virtual_apic_page);
11403                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
11404                 }
11405         }
11406
11407         if (nested_cpu_has_posted_intr(vmcs12)) {
11408                 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
11409                         kunmap(vmx->nested.pi_desc_page);
11410                         kvm_release_page_dirty(vmx->nested.pi_desc_page);
11411                         vmx->nested.pi_desc_page = NULL;
11412                 }
11413                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
11414                 if (is_error_page(page))
11415                         return;
11416                 vmx->nested.pi_desc_page = page;
11417                 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
11418                 vmx->nested.pi_desc =
11419                         (struct pi_desc *)((void *)vmx->nested.pi_desc +
11420                         (unsigned long)(vmcs12->posted_intr_desc_addr &
11421                         (PAGE_SIZE - 1)));
11422                 vmcs_write64(POSTED_INTR_DESC_ADDR,
11423                         page_to_phys(vmx->nested.pi_desc_page) +
11424                         (unsigned long)(vmcs12->posted_intr_desc_addr &
11425                         (PAGE_SIZE - 1)));
11426         }
11427         if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
11428                 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
11429                               CPU_BASED_USE_MSR_BITMAPS);
11430         else
11431                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
11432                                 CPU_BASED_USE_MSR_BITMAPS);
11433 }
11434
11435 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
11436 {
11437         u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
11438         struct vcpu_vmx *vmx = to_vmx(vcpu);
11439
11440         /*
11441          * A timer value of zero is architecturally guaranteed to cause
11442          * a VMExit prior to executing any instructions in the guest.
11443          */
11444         if (preemption_timeout == 0) {
11445                 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
11446                 return;
11447         }
11448
11449         if (vcpu->arch.virtual_tsc_khz == 0)
11450                 return;
11451
11452         preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
11453         preemption_timeout *= 1000000;
11454         do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
11455         hrtimer_start(&vmx->nested.preemption_timer,
11456                       ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
11457 }
11458
11459 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
11460                                                struct vmcs12 *vmcs12)
11461 {
11462         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
11463                 return 0;
11464
11465         if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
11466             !page_address_valid(vcpu, vmcs12->io_bitmap_b))
11467                 return -EINVAL;
11468
11469         return 0;
11470 }
11471
11472 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
11473                                                 struct vmcs12 *vmcs12)
11474 {
11475         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
11476                 return 0;
11477
11478         if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
11479                 return -EINVAL;
11480
11481         return 0;
11482 }
11483
11484 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
11485                                                 struct vmcs12 *vmcs12)
11486 {
11487         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
11488                 return 0;
11489
11490         if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
11491                 return -EINVAL;
11492
11493         return 0;
11494 }
11495
11496 /*
11497  * Merge L0's and L1's MSR bitmap, return false to indicate that
11498  * we do not use the hardware.
11499  */
11500 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
11501                                                  struct vmcs12 *vmcs12)
11502 {
11503         int msr;
11504         struct page *page;
11505         unsigned long *msr_bitmap_l1;
11506         unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
11507         /*
11508          * pred_cmd & spec_ctrl are trying to verify two things:
11509          *
11510          * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
11511          *    ensures that we do not accidentally generate an L02 MSR bitmap
11512          *    from the L12 MSR bitmap that is too permissive.
11513          * 2. That L1 or L2s have actually used the MSR. This avoids
11514          *    unnecessarily merging of the bitmap if the MSR is unused. This
11515          *    works properly because we only update the L01 MSR bitmap lazily.
11516          *    So even if L0 should pass L1 these MSRs, the L01 bitmap is only
11517          *    updated to reflect this when L1 (or its L2s) actually write to
11518          *    the MSR.
11519          */
11520         bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
11521         bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
11522
11523         /* Nothing to do if the MSR bitmap is not in use.  */
11524         if (!cpu_has_vmx_msr_bitmap() ||
11525             !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
11526                 return false;
11527
11528         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11529             !pred_cmd && !spec_ctrl)
11530                 return false;
11531
11532         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
11533         if (is_error_page(page))
11534                 return false;
11535
11536         msr_bitmap_l1 = (unsigned long *)kmap(page);
11537         if (nested_cpu_has_apic_reg_virt(vmcs12)) {
11538                 /*
11539                  * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
11540                  * just lets the processor take the value from the virtual-APIC page;
11541                  * take those 256 bits directly from the L1 bitmap.
11542                  */
11543                 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11544                         unsigned word = msr / BITS_PER_LONG;
11545                         msr_bitmap_l0[word] = msr_bitmap_l1[word];
11546                         msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
11547                 }
11548         } else {
11549                 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11550                         unsigned word = msr / BITS_PER_LONG;
11551                         msr_bitmap_l0[word] = ~0;
11552                         msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
11553                 }
11554         }
11555
11556         nested_vmx_disable_intercept_for_msr(
11557                 msr_bitmap_l1, msr_bitmap_l0,
11558                 X2APIC_MSR(APIC_TASKPRI),
11559                 MSR_TYPE_W);
11560
11561         if (nested_cpu_has_vid(vmcs12)) {
11562                 nested_vmx_disable_intercept_for_msr(
11563                         msr_bitmap_l1, msr_bitmap_l0,
11564                         X2APIC_MSR(APIC_EOI),
11565                         MSR_TYPE_W);
11566                 nested_vmx_disable_intercept_for_msr(
11567                         msr_bitmap_l1, msr_bitmap_l0,
11568                         X2APIC_MSR(APIC_SELF_IPI),
11569                         MSR_TYPE_W);
11570         }
11571
11572         if (spec_ctrl)
11573                 nested_vmx_disable_intercept_for_msr(
11574                                         msr_bitmap_l1, msr_bitmap_l0,
11575                                         MSR_IA32_SPEC_CTRL,
11576                                         MSR_TYPE_R | MSR_TYPE_W);
11577
11578         if (pred_cmd)
11579                 nested_vmx_disable_intercept_for_msr(
11580                                         msr_bitmap_l1, msr_bitmap_l0,
11581                                         MSR_IA32_PRED_CMD,
11582                                         MSR_TYPE_W);
11583
11584         kunmap(page);
11585         kvm_release_page_clean(page);
11586
11587         return true;
11588 }
11589
11590 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
11591                                        struct vmcs12 *vmcs12)
11592 {
11593         struct vmcs12 *shadow;
11594         struct page *page;
11595
11596         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
11597             vmcs12->vmcs_link_pointer == -1ull)
11598                 return;
11599
11600         shadow = get_shadow_vmcs12(vcpu);
11601         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
11602
11603         memcpy(shadow, kmap(page), VMCS12_SIZE);
11604
11605         kunmap(page);
11606         kvm_release_page_clean(page);
11607 }
11608
11609 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
11610                                               struct vmcs12 *vmcs12)
11611 {
11612         struct vcpu_vmx *vmx = to_vmx(vcpu);
11613
11614         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
11615             vmcs12->vmcs_link_pointer == -1ull)
11616                 return;
11617
11618         kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
11619                         get_shadow_vmcs12(vcpu), VMCS12_SIZE);
11620 }
11621
11622 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
11623                                           struct vmcs12 *vmcs12)
11624 {
11625         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
11626             !page_address_valid(vcpu, vmcs12->apic_access_addr))
11627                 return -EINVAL;
11628         else
11629                 return 0;
11630 }
11631
11632 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
11633                                            struct vmcs12 *vmcs12)
11634 {
11635         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11636             !nested_cpu_has_apic_reg_virt(vmcs12) &&
11637             !nested_cpu_has_vid(vmcs12) &&
11638             !nested_cpu_has_posted_intr(vmcs12))
11639                 return 0;
11640
11641         /*
11642          * If virtualize x2apic mode is enabled,
11643          * virtualize apic access must be disabled.
11644          */
11645         if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11646             nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
11647                 return -EINVAL;
11648
11649         /*
11650          * If virtual interrupt delivery is enabled,
11651          * we must exit on external interrupts.
11652          */
11653         if (nested_cpu_has_vid(vmcs12) &&
11654            !nested_exit_on_intr(vcpu))
11655                 return -EINVAL;
11656
11657         /*
11658          * bits 15:8 should be zero in posted_intr_nv,
11659          * the descriptor address has been already checked
11660          * in nested_get_vmcs12_pages.
11661          *
11662          * bits 5:0 of posted_intr_desc_addr should be zero.
11663          */
11664         if (nested_cpu_has_posted_intr(vmcs12) &&
11665            (!nested_cpu_has_vid(vmcs12) ||
11666             !nested_exit_intr_ack_set(vcpu) ||
11667             (vmcs12->posted_intr_nv & 0xff00) ||
11668             (vmcs12->posted_intr_desc_addr & 0x3f) ||
11669             (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
11670                 return -EINVAL;
11671
11672         /* tpr shadow is needed by all apicv features. */
11673         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
11674                 return -EINVAL;
11675
11676         return 0;
11677 }
11678
11679 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
11680                                        unsigned long count_field,
11681                                        unsigned long addr_field)
11682 {
11683         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11684         int maxphyaddr;
11685         u64 count, addr;
11686
11687         if (vmcs12_read_any(vmcs12, count_field, &count) ||
11688             vmcs12_read_any(vmcs12, addr_field, &addr)) {
11689                 WARN_ON(1);
11690                 return -EINVAL;
11691         }
11692         if (count == 0)
11693                 return 0;
11694         maxphyaddr = cpuid_maxphyaddr(vcpu);
11695         if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
11696             (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
11697                 pr_debug_ratelimited(
11698                         "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
11699                         addr_field, maxphyaddr, count, addr);
11700                 return -EINVAL;
11701         }
11702         return 0;
11703 }
11704
11705 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
11706                                                 struct vmcs12 *vmcs12)
11707 {
11708         if (vmcs12->vm_exit_msr_load_count == 0 &&
11709             vmcs12->vm_exit_msr_store_count == 0 &&
11710             vmcs12->vm_entry_msr_load_count == 0)
11711                 return 0; /* Fast path */
11712         if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
11713                                         VM_EXIT_MSR_LOAD_ADDR) ||
11714             nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
11715                                         VM_EXIT_MSR_STORE_ADDR) ||
11716             nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
11717                                         VM_ENTRY_MSR_LOAD_ADDR))
11718                 return -EINVAL;
11719         return 0;
11720 }
11721
11722 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
11723                                          struct vmcs12 *vmcs12)
11724 {
11725         if (!nested_cpu_has_pml(vmcs12))
11726                 return 0;
11727
11728         if (!nested_cpu_has_ept(vmcs12) ||
11729             !page_address_valid(vcpu, vmcs12->pml_address))
11730                 return -EINVAL;
11731
11732         return 0;
11733 }
11734
11735 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
11736                                                  struct vmcs12 *vmcs12)
11737 {
11738         if (!nested_cpu_has_shadow_vmcs(vmcs12))
11739                 return 0;
11740
11741         if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
11742             !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
11743                 return -EINVAL;
11744
11745         return 0;
11746 }
11747
11748 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
11749                                        struct vmx_msr_entry *e)
11750 {
11751         /* x2APIC MSR accesses are not allowed */
11752         if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
11753                 return -EINVAL;
11754         if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
11755             e->index == MSR_IA32_UCODE_REV)
11756                 return -EINVAL;
11757         if (e->reserved != 0)
11758                 return -EINVAL;
11759         return 0;
11760 }
11761
11762 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
11763                                      struct vmx_msr_entry *e)
11764 {
11765         if (e->index == MSR_FS_BASE ||
11766             e->index == MSR_GS_BASE ||
11767             e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
11768             nested_vmx_msr_check_common(vcpu, e))
11769                 return -EINVAL;
11770         return 0;
11771 }
11772
11773 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
11774                                       struct vmx_msr_entry *e)
11775 {
11776         if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
11777             nested_vmx_msr_check_common(vcpu, e))
11778                 return -EINVAL;
11779         return 0;
11780 }
11781
11782 /*
11783  * Load guest's/host's msr at nested entry/exit.
11784  * return 0 for success, entry index for failure.
11785  */
11786 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11787 {
11788         u32 i;
11789         struct vmx_msr_entry e;
11790         struct msr_data msr;
11791
11792         msr.host_initiated = false;
11793         for (i = 0; i < count; i++) {
11794                 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
11795                                         &e, sizeof(e))) {
11796                         pr_debug_ratelimited(
11797                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11798                                 __func__, i, gpa + i * sizeof(e));
11799                         goto fail;
11800                 }
11801                 if (nested_vmx_load_msr_check(vcpu, &e)) {
11802                         pr_debug_ratelimited(
11803                                 "%s check failed (%u, 0x%x, 0x%x)\n",
11804                                 __func__, i, e.index, e.reserved);
11805                         goto fail;
11806                 }
11807                 msr.index = e.index;
11808                 msr.data = e.value;
11809                 if (kvm_set_msr(vcpu, &msr)) {
11810                         pr_debug_ratelimited(
11811                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
11812                                 __func__, i, e.index, e.value);
11813                         goto fail;
11814                 }
11815         }
11816         return 0;
11817 fail:
11818         return i + 1;
11819 }
11820
11821 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11822 {
11823         u32 i;
11824         struct vmx_msr_entry e;
11825
11826         for (i = 0; i < count; i++) {
11827                 struct msr_data msr_info;
11828                 if (kvm_vcpu_read_guest(vcpu,
11829                                         gpa + i * sizeof(e),
11830                                         &e, 2 * sizeof(u32))) {
11831                         pr_debug_ratelimited(
11832                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11833                                 __func__, i, gpa + i * sizeof(e));
11834                         return -EINVAL;
11835                 }
11836                 if (nested_vmx_store_msr_check(vcpu, &e)) {
11837                         pr_debug_ratelimited(
11838                                 "%s check failed (%u, 0x%x, 0x%x)\n",
11839                                 __func__, i, e.index, e.reserved);
11840                         return -EINVAL;
11841                 }
11842                 msr_info.host_initiated = false;
11843                 msr_info.index = e.index;
11844                 if (kvm_get_msr(vcpu, &msr_info)) {
11845                         pr_debug_ratelimited(
11846                                 "%s cannot read MSR (%u, 0x%x)\n",
11847                                 __func__, i, e.index);
11848                         return -EINVAL;
11849                 }
11850                 if (kvm_vcpu_write_guest(vcpu,
11851                                          gpa + i * sizeof(e) +
11852                                              offsetof(struct vmx_msr_entry, value),
11853                                          &msr_info.data, sizeof(msr_info.data))) {
11854                         pr_debug_ratelimited(
11855                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
11856                                 __func__, i, e.index, msr_info.data);
11857                         return -EINVAL;
11858                 }
11859         }
11860         return 0;
11861 }
11862
11863 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
11864 {
11865         unsigned long invalid_mask;
11866
11867         invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
11868         return (val & invalid_mask) == 0;
11869 }
11870
11871 /*
11872  * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
11873  * emulating VM entry into a guest with EPT enabled.
11874  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
11875  * is assigned to entry_failure_code on failure.
11876  */
11877 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
11878                                u32 *entry_failure_code)
11879 {
11880         if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
11881                 if (!nested_cr3_valid(vcpu, cr3)) {
11882                         *entry_failure_code = ENTRY_FAIL_DEFAULT;
11883                         return 1;
11884                 }
11885
11886                 /*
11887                  * If PAE paging and EPT are both on, CR3 is not used by the CPU and
11888                  * must not be dereferenced.
11889                  */
11890                 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
11891                     !nested_ept) {
11892                         if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
11893                                 *entry_failure_code = ENTRY_FAIL_PDPTE;
11894                                 return 1;
11895                         }
11896                 }
11897         }
11898
11899         if (!nested_ept)
11900                 kvm_mmu_new_cr3(vcpu, cr3, false);
11901
11902         vcpu->arch.cr3 = cr3;
11903         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
11904
11905         kvm_init_mmu(vcpu, false);
11906
11907         return 0;
11908 }
11909
11910 /*
11911  * Returns if KVM is able to config CPU to tag TLB entries
11912  * populated by L2 differently than TLB entries populated
11913  * by L1.
11914  *
11915  * If L1 uses EPT, then TLB entries are tagged with different EPTP.
11916  *
11917  * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
11918  * with different VPID (L1 entries are tagged with vmx->vpid
11919  * while L2 entries are tagged with vmx->nested.vpid02).
11920  */
11921 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
11922 {
11923         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11924
11925         return nested_cpu_has_ept(vmcs12) ||
11926                (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
11927 }
11928
11929 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
11930 {
11931         if (vmx->nested.nested_run_pending &&
11932             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
11933                 return vmcs12->guest_ia32_efer;
11934         else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
11935                 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
11936         else
11937                 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
11938 }
11939
11940 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
11941 {
11942         /*
11943          * If vmcs02 hasn't been initialized, set the constant vmcs02 state
11944          * according to L0's settings (vmcs12 is irrelevant here).  Host
11945          * fields that come from L0 and are not constant, e.g. HOST_CR3,
11946          * will be set as needed prior to VMLAUNCH/VMRESUME.
11947          */
11948         if (vmx->nested.vmcs02_initialized)
11949                 return;
11950         vmx->nested.vmcs02_initialized = true;
11951
11952         /* All VMFUNCs are currently emulated through L0 vmexits.  */
11953         if (cpu_has_vmx_vmfunc())
11954                 vmcs_write64(VM_FUNCTION_CONTROL, 0);
11955
11956         if (cpu_has_vmx_posted_intr())
11957                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
11958
11959         if (cpu_has_vmx_msr_bitmap())
11960                 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
11961
11962         if (enable_pml)
11963                 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
11964
11965         /*
11966          * Set the MSR load/store lists to match L0's settings.  Only the
11967          * addresses are constant (for vmcs02), the counts can change based
11968          * on L2's behavior, e.g. switching to/from long mode.
11969          */
11970         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
11971         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
11972         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
11973
11974         vmx_set_constant_host_state(vmx);
11975 }
11976
11977 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
11978                                       struct vmcs12 *vmcs12)
11979 {
11980         prepare_vmcs02_constant_state(vmx);
11981
11982         vmcs_write64(VMCS_LINK_POINTER, -1ull);
11983
11984         if (enable_vpid) {
11985                 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
11986                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
11987                 else
11988                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
11989         }
11990 }
11991
11992 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
11993 {
11994         u32 exec_control, vmcs12_exec_ctrl;
11995         u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
11996
11997         if (vmx->nested.dirty_vmcs12)
11998                 prepare_vmcs02_early_full(vmx, vmcs12);
11999
12000         /*
12001          * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
12002          * entry, but only if the current (host) sp changed from the value
12003          * we wrote last (vmx->host_rsp).  This cache is no longer relevant
12004          * if we switch vmcs, and rather than hold a separate cache per vmcs,
12005          * here we just force the write to happen on entry.
12006          */
12007         vmx->host_rsp = 0;
12008
12009         /*
12010          * PIN CONTROLS
12011          */
12012         exec_control = vmcs12->pin_based_vm_exec_control;
12013
12014         /* Preemption timer setting is computed directly in vmx_vcpu_run.  */
12015         exec_control |= vmcs_config.pin_based_exec_ctrl;
12016         exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12017         vmx->loaded_vmcs->hv_timer_armed = false;
12018
12019         /* Posted interrupts setting is only taken from vmcs12.  */
12020         if (nested_cpu_has_posted_intr(vmcs12)) {
12021                 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
12022                 vmx->nested.pi_pending = false;
12023         } else {
12024                 exec_control &= ~PIN_BASED_POSTED_INTR;
12025         }
12026         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
12027
12028         /*
12029          * EXEC CONTROLS
12030          */
12031         exec_control = vmx_exec_control(vmx); /* L0's desires */
12032         exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
12033         exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
12034         exec_control &= ~CPU_BASED_TPR_SHADOW;
12035         exec_control |= vmcs12->cpu_based_vm_exec_control;
12036
12037         /*
12038          * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
12039          * nested_get_vmcs12_pages can't fix it up, the illegal value
12040          * will result in a VM entry failure.
12041          */
12042         if (exec_control & CPU_BASED_TPR_SHADOW) {
12043                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
12044                 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
12045         } else {
12046 #ifdef CONFIG_X86_64
12047                 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
12048                                 CPU_BASED_CR8_STORE_EXITING;
12049 #endif
12050         }
12051
12052         /*
12053          * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
12054          * for I/O port accesses.
12055          */
12056         exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
12057         exec_control |= CPU_BASED_UNCOND_IO_EXITING;
12058         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
12059
12060         /*
12061          * SECONDARY EXEC CONTROLS
12062          */
12063         if (cpu_has_secondary_exec_ctrls()) {
12064                 exec_control = vmx->secondary_exec_control;
12065
12066                 /* Take the following fields only from vmcs12 */
12067                 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
12068                                   SECONDARY_EXEC_ENABLE_INVPCID |
12069                                   SECONDARY_EXEC_RDTSCP |
12070                                   SECONDARY_EXEC_XSAVES |
12071                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
12072                                   SECONDARY_EXEC_APIC_REGISTER_VIRT |
12073                                   SECONDARY_EXEC_ENABLE_VMFUNC);
12074                 if (nested_cpu_has(vmcs12,
12075                                    CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
12076                         vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
12077                                 ~SECONDARY_EXEC_ENABLE_PML;
12078                         exec_control |= vmcs12_exec_ctrl;
12079                 }
12080
12081                 /* VMCS shadowing for L2 is emulated for now */
12082                 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
12083
12084                 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
12085                         vmcs_write16(GUEST_INTR_STATUS,
12086                                 vmcs12->guest_intr_status);
12087
12088                 /*
12089                  * Write an illegal value to APIC_ACCESS_ADDR. Later,
12090                  * nested_get_vmcs12_pages will either fix it up or
12091                  * remove the VM execution control.
12092                  */
12093                 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
12094                         vmcs_write64(APIC_ACCESS_ADDR, -1ull);
12095
12096                 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
12097                         vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
12098
12099                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
12100         }
12101
12102         /*
12103          * ENTRY CONTROLS
12104          *
12105          * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
12106          * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
12107          * on the related bits (if supported by the CPU) in the hope that
12108          * we can avoid VMWrites during vmx_set_efer().
12109          */
12110         exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) &
12111                         ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
12112         if (cpu_has_load_ia32_efer) {
12113                 if (guest_efer & EFER_LMA)
12114                         exec_control |= VM_ENTRY_IA32E_MODE;
12115                 if (guest_efer != host_efer)
12116                         exec_control |= VM_ENTRY_LOAD_IA32_EFER;
12117         }
12118         vm_entry_controls_init(vmx, exec_control);
12119
12120         /*
12121          * EXIT CONTROLS
12122          *
12123          * L2->L1 exit controls are emulated - the hardware exit is to L0 so
12124          * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
12125          * bits may be modified by vmx_set_efer() in prepare_vmcs02().
12126          */
12127         exec_control = vmcs_config.vmexit_ctrl;
12128         if (cpu_has_load_ia32_efer && guest_efer != host_efer)
12129                 exec_control |= VM_EXIT_LOAD_IA32_EFER;
12130         vm_exit_controls_init(vmx, exec_control);
12131
12132         /*
12133          * Conceptually we want to copy the PML address and index from
12134          * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
12135          * since we always flush the log on each vmexit and never change
12136          * the PML address (once set), this happens to be equivalent to
12137          * simply resetting the index in vmcs02.
12138          */
12139         if (enable_pml)
12140                 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
12141
12142         /*
12143          * Interrupt/Exception Fields
12144          */
12145         if (vmx->nested.nested_run_pending) {
12146                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
12147                              vmcs12->vm_entry_intr_info_field);
12148                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
12149                              vmcs12->vm_entry_exception_error_code);
12150                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
12151                              vmcs12->vm_entry_instruction_len);
12152                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
12153                              vmcs12->guest_interruptibility_info);
12154                 vmx->loaded_vmcs->nmi_known_unmasked =
12155                         !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
12156         } else {
12157                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
12158         }
12159 }
12160
12161 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
12162 {
12163         vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
12164         vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
12165         vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
12166         vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
12167         vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
12168         vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
12169         vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
12170         vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
12171         vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
12172         vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
12173         vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
12174         vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
12175         vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
12176         vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
12177         vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
12178         vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
12179         vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
12180         vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
12181         vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
12182         vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
12183         vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
12184         vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
12185         vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
12186         vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
12187         vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
12188         vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
12189         vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
12190         vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
12191         vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
12192         vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
12193         vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
12194
12195         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
12196         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
12197                 vmcs12->guest_pending_dbg_exceptions);
12198         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
12199         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
12200
12201         if (nested_cpu_has_xsaves(vmcs12))
12202                 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
12203
12204         /*
12205          * Whether page-faults are trapped is determined by a combination of
12206          * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
12207          * If enable_ept, L0 doesn't care about page faults and we should
12208          * set all of these to L1's desires. However, if !enable_ept, L0 does
12209          * care about (at least some) page faults, and because it is not easy
12210          * (if at all possible?) to merge L0 and L1's desires, we simply ask
12211          * to exit on each and every L2 page fault. This is done by setting
12212          * MASK=MATCH=0 and (see below) EB.PF=1.
12213          * Note that below we don't need special code to set EB.PF beyond the
12214          * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
12215          * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
12216          * !enable_ept, EB.PF is 1, so the "or" will always be 1.
12217          */
12218         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
12219                 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
12220         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
12221                 enable_ept ? vmcs12->page_fault_error_code_match : 0);
12222
12223         if (cpu_has_vmx_apicv()) {
12224                 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
12225                 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
12226                 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
12227                 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
12228         }
12229
12230         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
12231         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
12232
12233         set_cr4_guest_host_mask(vmx);
12234
12235         if (kvm_mpx_supported()) {
12236                 if (vmx->nested.nested_run_pending &&
12237                         (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12238                         vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
12239                 else
12240                         vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
12241         }
12242
12243         /*
12244          * L1 may access the L2's PDPTR, so save them to construct vmcs12
12245          */
12246         if (enable_ept) {
12247                 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
12248                 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
12249                 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
12250                 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
12251         }
12252 }
12253
12254 /*
12255  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
12256  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
12257  * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
12258  * guest in a way that will both be appropriate to L1's requests, and our
12259  * needs. In addition to modifying the active vmcs (which is vmcs02), this
12260  * function also has additional necessary side-effects, like setting various
12261  * vcpu->arch fields.
12262  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
12263  * is assigned to entry_failure_code on failure.
12264  */
12265 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12266                           u32 *entry_failure_code)
12267 {
12268         struct vcpu_vmx *vmx = to_vmx(vcpu);
12269
12270         if (vmx->nested.dirty_vmcs12) {
12271                 prepare_vmcs02_full(vmx, vmcs12);
12272                 vmx->nested.dirty_vmcs12 = false;
12273         }
12274
12275         /*
12276          * First, the fields that are shadowed.  This must be kept in sync
12277          * with vmx_shadow_fields.h.
12278          */
12279
12280         vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
12281         vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
12282         vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
12283         vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
12284         vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
12285
12286         if (vmx->nested.nested_run_pending &&
12287             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
12288                 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
12289                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
12290         } else {
12291                 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
12292                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
12293         }
12294         vmx_set_rflags(vcpu, vmcs12->guest_rflags);
12295
12296         vmx->nested.preemption_timer_expired = false;
12297         if (nested_cpu_has_preemption_timer(vmcs12))
12298                 vmx_start_preemption_timer(vcpu);
12299
12300         /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
12301          * bitwise-or of what L1 wants to trap for L2, and what we want to
12302          * trap. Note that CR0.TS also needs updating - we do this later.
12303          */
12304         update_exception_bitmap(vcpu);
12305         vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
12306         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
12307
12308         if (vmx->nested.nested_run_pending &&
12309             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
12310                 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
12311                 vcpu->arch.pat = vmcs12->guest_ia32_pat;
12312         } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
12313                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
12314         }
12315
12316         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
12317
12318         if (kvm_has_tsc_control)
12319                 decache_tsc_multiplier(vmx);
12320
12321         if (enable_vpid) {
12322                 /*
12323                  * There is no direct mapping between vpid02 and vpid12, the
12324                  * vpid02 is per-vCPU for L0 and reused while the value of
12325                  * vpid12 is changed w/ one invvpid during nested vmentry.
12326                  * The vpid12 is allocated by L1 for L2, so it will not
12327                  * influence global bitmap(for vpid01 and vpid02 allocation)
12328                  * even if spawn a lot of nested vCPUs.
12329                  */
12330                 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
12331                         if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
12332                                 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
12333                                 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
12334                         }
12335                 } else {
12336                         /*
12337                          * If L1 use EPT, then L0 needs to execute INVEPT on
12338                          * EPTP02 instead of EPTP01. Therefore, delay TLB
12339                          * flush until vmcs02->eptp is fully updated by
12340                          * KVM_REQ_LOAD_CR3. Note that this assumes
12341                          * KVM_REQ_TLB_FLUSH is evaluated after
12342                          * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
12343                          */
12344                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
12345                 }
12346         }
12347
12348         if (nested_cpu_has_ept(vmcs12))
12349                 nested_ept_init_mmu_context(vcpu);
12350         else if (nested_cpu_has2(vmcs12,
12351                                  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
12352                 vmx_flush_tlb(vcpu, true);
12353
12354         /*
12355          * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
12356          * bits which we consider mandatory enabled.
12357          * The CR0_READ_SHADOW is what L2 should have expected to read given
12358          * the specifications by L1; It's not enough to take
12359          * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
12360          * have more bits than L1 expected.
12361          */
12362         vmx_set_cr0(vcpu, vmcs12->guest_cr0);
12363         vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
12364
12365         vmx_set_cr4(vcpu, vmcs12->guest_cr4);
12366         vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
12367
12368         vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
12369         /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
12370         vmx_set_efer(vcpu, vcpu->arch.efer);
12371
12372         /*
12373          * Guest state is invalid and unrestricted guest is disabled,
12374          * which means L1 attempted VMEntry to L2 with invalid state.
12375          * Fail the VMEntry.
12376          */
12377         if (vmx->emulation_required) {
12378                 *entry_failure_code = ENTRY_FAIL_DEFAULT;
12379                 return 1;
12380         }
12381
12382         /* Shadow page tables on either EPT or shadow page tables. */
12383         if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
12384                                 entry_failure_code))
12385                 return 1;
12386
12387         if (!enable_ept)
12388                 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
12389
12390         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
12391         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
12392         return 0;
12393 }
12394
12395 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
12396 {
12397         if (!nested_cpu_has_nmi_exiting(vmcs12) &&
12398             nested_cpu_has_virtual_nmis(vmcs12))
12399                 return -EINVAL;
12400
12401         if (!nested_cpu_has_virtual_nmis(vmcs12) &&
12402             nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
12403                 return -EINVAL;
12404
12405         return 0;
12406 }
12407
12408 static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12409 {
12410         struct vcpu_vmx *vmx = to_vmx(vcpu);
12411         bool ia32e;
12412
12413         if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
12414             vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
12415                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12416
12417         if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
12418                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12419
12420         if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
12421                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12422
12423         if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
12424                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12425
12426         if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
12427                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12428
12429         if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
12430                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12431
12432         if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
12433                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12434
12435         if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
12436                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12437
12438         if (nested_vmx_check_pml_controls(vcpu, vmcs12))
12439                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12440
12441         if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
12442                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12443
12444         if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
12445                                 vmx->nested.msrs.procbased_ctls_low,
12446                                 vmx->nested.msrs.procbased_ctls_high) ||
12447             (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
12448              !vmx_control_verify(vmcs12->secondary_vm_exec_control,
12449                                  vmx->nested.msrs.secondary_ctls_low,
12450                                  vmx->nested.msrs.secondary_ctls_high)) ||
12451             !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
12452                                 vmx->nested.msrs.pinbased_ctls_low,
12453                                 vmx->nested.msrs.pinbased_ctls_high) ||
12454             !vmx_control_verify(vmcs12->vm_exit_controls,
12455                                 vmx->nested.msrs.exit_ctls_low,
12456                                 vmx->nested.msrs.exit_ctls_high) ||
12457             !vmx_control_verify(vmcs12->vm_entry_controls,
12458                                 vmx->nested.msrs.entry_ctls_low,
12459                                 vmx->nested.msrs.entry_ctls_high))
12460                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12461
12462         if (nested_vmx_check_nmi_controls(vmcs12))
12463                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12464
12465         if (nested_cpu_has_vmfunc(vmcs12)) {
12466                 if (vmcs12->vm_function_control &
12467                     ~vmx->nested.msrs.vmfunc_controls)
12468                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12469
12470                 if (nested_cpu_has_eptp_switching(vmcs12)) {
12471                         if (!nested_cpu_has_ept(vmcs12) ||
12472                             !page_address_valid(vcpu, vmcs12->eptp_list_address))
12473                                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12474                 }
12475         }
12476
12477         if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
12478                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12479
12480         if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
12481             !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
12482             !nested_cr3_valid(vcpu, vmcs12->host_cr3))
12483                 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
12484
12485         /*
12486          * If the load IA32_EFER VM-exit control is 1, bits reserved in the
12487          * IA32_EFER MSR must be 0 in the field for that register. In addition,
12488          * the values of the LMA and LME bits in the field must each be that of
12489          * the host address-space size VM-exit control.
12490          */
12491         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
12492                 ia32e = (vmcs12->vm_exit_controls &
12493                          VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
12494                 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
12495                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
12496                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
12497                         return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
12498         }
12499
12500         /*
12501          * From the Intel SDM, volume 3:
12502          * Fields relevant to VM-entry event injection must be set properly.
12503          * These fields are the VM-entry interruption-information field, the
12504          * VM-entry exception error code, and the VM-entry instruction length.
12505          */
12506         if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
12507                 u32 intr_info = vmcs12->vm_entry_intr_info_field;
12508                 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
12509                 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
12510                 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
12511                 bool should_have_error_code;
12512                 bool urg = nested_cpu_has2(vmcs12,
12513                                            SECONDARY_EXEC_UNRESTRICTED_GUEST);
12514                 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
12515
12516                 /* VM-entry interruption-info field: interruption type */
12517                 if (intr_type == INTR_TYPE_RESERVED ||
12518                     (intr_type == INTR_TYPE_OTHER_EVENT &&
12519                      !nested_cpu_supports_monitor_trap_flag(vcpu)))
12520                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12521
12522                 /* VM-entry interruption-info field: vector */
12523                 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
12524                     (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
12525                     (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
12526                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12527
12528                 /* VM-entry interruption-info field: deliver error code */
12529                 should_have_error_code =
12530                         intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
12531                         x86_exception_has_error_code(vector);
12532                 if (has_error_code != should_have_error_code)
12533                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12534
12535                 /* VM-entry exception error code */
12536                 if (has_error_code &&
12537                     vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
12538                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12539
12540                 /* VM-entry interruption-info field: reserved bits */
12541                 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
12542                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12543
12544                 /* VM-entry instruction length */
12545                 switch (intr_type) {
12546                 case INTR_TYPE_SOFT_EXCEPTION:
12547                 case INTR_TYPE_SOFT_INTR:
12548                 case INTR_TYPE_PRIV_SW_EXCEPTION:
12549                         if ((vmcs12->vm_entry_instruction_len > 15) ||
12550                             (vmcs12->vm_entry_instruction_len == 0 &&
12551                              !nested_cpu_has_zero_length_injection(vcpu)))
12552                                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12553                 }
12554         }
12555
12556         if (nested_cpu_has_ept(vmcs12) &&
12557             !valid_ept_address(vcpu, vmcs12->ept_pointer))
12558                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12559
12560         return 0;
12561 }
12562
12563 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
12564                                           struct vmcs12 *vmcs12)
12565 {
12566         int r;
12567         struct page *page;
12568         struct vmcs12 *shadow;
12569
12570         if (vmcs12->vmcs_link_pointer == -1ull)
12571                 return 0;
12572
12573         if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
12574                 return -EINVAL;
12575
12576         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
12577         if (is_error_page(page))
12578                 return -EINVAL;
12579
12580         r = 0;
12581         shadow = kmap(page);
12582         if (shadow->hdr.revision_id != VMCS12_REVISION ||
12583             shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
12584                 r = -EINVAL;
12585         kunmap(page);
12586         kvm_release_page_clean(page);
12587         return r;
12588 }
12589
12590 static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12591                                   u32 *exit_qual)
12592 {
12593         bool ia32e;
12594
12595         *exit_qual = ENTRY_FAIL_DEFAULT;
12596
12597         if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
12598             !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
12599                 return 1;
12600
12601         if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
12602                 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
12603                 return 1;
12604         }
12605
12606         /*
12607          * If the load IA32_EFER VM-entry control is 1, the following checks
12608          * are performed on the field for the IA32_EFER MSR:
12609          * - Bits reserved in the IA32_EFER MSR must be 0.
12610          * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
12611          *   the IA-32e mode guest VM-exit control. It must also be identical
12612          *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
12613          *   CR0.PG) is 1.
12614          */
12615         if (to_vmx(vcpu)->nested.nested_run_pending &&
12616             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
12617                 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
12618                 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
12619                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
12620                     ((vmcs12->guest_cr0 & X86_CR0_PG) &&
12621                      ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
12622                         return 1;
12623         }
12624
12625         if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
12626                 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
12627                 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
12628                         return 1;
12629
12630         return 0;
12631 }
12632
12633 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
12634                                    struct vmcs12 *vmcs12);
12635
12636 /*
12637  * If from_vmentry is false, this is being called from state restore (either RSM
12638  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
12639  */
12640 static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
12641                                           bool from_vmentry)
12642 {
12643         struct vcpu_vmx *vmx = to_vmx(vcpu);
12644         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12645         bool evaluate_pending_interrupts;
12646         u32 exit_reason = EXIT_REASON_INVALID_STATE;
12647         u32 exit_qual;
12648
12649         evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
12650                 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
12651         if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
12652                 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
12653
12654         if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
12655                 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
12656         if (kvm_mpx_supported() &&
12657                 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12658                 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
12659
12660         vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
12661
12662         prepare_vmcs02_early(vmx, vmcs12);
12663
12664         if (from_vmentry) {
12665                 nested_get_vmcs12_pages(vcpu);
12666
12667                 if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
12668                         goto vmentry_fail_vmexit;
12669         }
12670
12671         enter_guest_mode(vcpu);
12672         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12673                 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
12674
12675         if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
12676                 goto vmentry_fail_vmexit_guest_mode;
12677
12678         if (from_vmentry) {
12679                 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
12680                 exit_qual = nested_vmx_load_msr(vcpu,
12681                                                 vmcs12->vm_entry_msr_load_addr,
12682                                                 vmcs12->vm_entry_msr_load_count);
12683                 if (exit_qual)
12684                         goto vmentry_fail_vmexit_guest_mode;
12685         } else {
12686                 /*
12687                  * The MMU is not initialized to point at the right entities yet and
12688                  * "get pages" would need to read data from the guest (i.e. we will
12689                  * need to perform gpa to hpa translation). Request a call
12690                  * to nested_get_vmcs12_pages before the next VM-entry.  The MSRs
12691                  * have already been set at vmentry time and should not be reset.
12692                  */
12693                 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
12694         }
12695
12696         /*
12697          * If L1 had a pending IRQ/NMI until it executed
12698          * VMLAUNCH/VMRESUME which wasn't delivered because it was
12699          * disallowed (e.g. interrupts disabled), L0 needs to
12700          * evaluate if this pending event should cause an exit from L2
12701          * to L1 or delivered directly to L2 (e.g. In case L1 don't
12702          * intercept EXTERNAL_INTERRUPT).
12703          *
12704          * Usually this would be handled by the processor noticing an
12705          * IRQ/NMI window request, or checking RVI during evaluation of
12706          * pending virtual interrupts.  However, this setting was done
12707          * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
12708          * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
12709          */
12710         if (unlikely(evaluate_pending_interrupts))
12711                 kvm_make_request(KVM_REQ_EVENT, vcpu);
12712
12713         /*
12714          * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
12715          * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
12716          * returned as far as L1 is concerned. It will only return (and set
12717          * the success flag) when L2 exits (see nested_vmx_vmexit()).
12718          */
12719         return 0;
12720
12721         /*
12722          * A failed consistency check that leads to a VMExit during L1's
12723          * VMEnter to L2 is a variation of a normal VMexit, as explained in
12724          * 26.7 "VM-entry failures during or after loading guest state".
12725          */
12726 vmentry_fail_vmexit_guest_mode:
12727         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12728                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
12729         leave_guest_mode(vcpu);
12730
12731 vmentry_fail_vmexit:
12732         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
12733
12734         if (!from_vmentry)
12735                 return 1;
12736
12737         load_vmcs12_host_state(vcpu, vmcs12);
12738         vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
12739         vmcs12->exit_qualification = exit_qual;
12740         if (enable_shadow_vmcs)
12741                 vmx->nested.sync_shadow_vmcs = true;
12742         return 1;
12743 }
12744
12745 /*
12746  * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
12747  * for running an L2 nested guest.
12748  */
12749 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
12750 {
12751         struct vmcs12 *vmcs12;
12752         struct vcpu_vmx *vmx = to_vmx(vcpu);
12753         u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
12754         int ret;
12755
12756         if (!nested_vmx_check_permission(vcpu))
12757                 return 1;
12758
12759         if (vmx->nested.current_vmptr == -1ull)
12760                 return nested_vmx_failInvalid(vcpu);
12761
12762         vmcs12 = get_vmcs12(vcpu);
12763
12764         /*
12765          * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
12766          * that there *is* a valid VMCS pointer, RFLAGS.CF is set
12767          * rather than RFLAGS.ZF, and no error number is stored to the
12768          * VM-instruction error field.
12769          */
12770         if (vmcs12->hdr.shadow_vmcs)
12771                 return nested_vmx_failInvalid(vcpu);
12772
12773         if (enable_shadow_vmcs)
12774                 copy_shadow_to_vmcs12(vmx);
12775
12776         /*
12777          * The nested entry process starts with enforcing various prerequisites
12778          * on vmcs12 as required by the Intel SDM, and act appropriately when
12779          * they fail: As the SDM explains, some conditions should cause the
12780          * instruction to fail, while others will cause the instruction to seem
12781          * to succeed, but return an EXIT_REASON_INVALID_STATE.
12782          * To speed up the normal (success) code path, we should avoid checking
12783          * for misconfigurations which will anyway be caught by the processor
12784          * when using the merged vmcs02.
12785          */
12786         if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
12787                 return nested_vmx_failValid(vcpu,
12788                         VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
12789
12790         if (vmcs12->launch_state == launch)
12791                 return nested_vmx_failValid(vcpu,
12792                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
12793                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
12794
12795         ret = check_vmentry_prereqs(vcpu, vmcs12);
12796         if (ret)
12797                 return nested_vmx_failValid(vcpu, ret);
12798
12799         /*
12800          * We're finally done with prerequisite checking, and can start with
12801          * the nested entry.
12802          */
12803
12804         vmx->nested.nested_run_pending = 1;
12805         ret = nested_vmx_enter_non_root_mode(vcpu, true);
12806         if (ret) {
12807                 vmx->nested.nested_run_pending = 0;
12808                 return 1;
12809         }
12810
12811         /* Hide L1D cache contents from the nested guest.  */
12812         vmx->vcpu.arch.l1tf_flush_l1d = true;
12813
12814         /*
12815          * Must happen outside of nested_vmx_enter_non_root_mode() as it will
12816          * also be used as part of restoring nVMX state for
12817          * snapshot restore (migration).
12818          *
12819          * In this flow, it is assumed that vmcs12 cache was
12820          * trasferred as part of captured nVMX state and should
12821          * therefore not be read from guest memory (which may not
12822          * exist on destination host yet).
12823          */
12824         nested_cache_shadow_vmcs12(vcpu, vmcs12);
12825
12826         /*
12827          * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
12828          * by event injection, halt vcpu.
12829          */
12830         if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
12831             !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
12832                 vmx->nested.nested_run_pending = 0;
12833                 return kvm_vcpu_halt(vcpu);
12834         }
12835         return 1;
12836 }
12837
12838 /*
12839  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
12840  * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
12841  * This function returns the new value we should put in vmcs12.guest_cr0.
12842  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
12843  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
12844  *     available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
12845  *     didn't trap the bit, because if L1 did, so would L0).
12846  *  2. Bits that L1 asked to trap (and therefore L0 also did) could not have
12847  *     been modified by L2, and L1 knows it. So just leave the old value of
12848  *     the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
12849  *     isn't relevant, because if L0 traps this bit it can set it to anything.
12850  *  3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
12851  *     changed these bits, and therefore they need to be updated, but L0
12852  *     didn't necessarily allow them to be changed in GUEST_CR0 - and rather
12853  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
12854  */
12855 static inline unsigned long
12856 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12857 {
12858         return
12859         /*1*/   (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
12860         /*2*/   (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
12861         /*3*/   (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
12862                         vcpu->arch.cr0_guest_owned_bits));
12863 }
12864
12865 static inline unsigned long
12866 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12867 {
12868         return
12869         /*1*/   (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
12870         /*2*/   (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
12871         /*3*/   (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
12872                         vcpu->arch.cr4_guest_owned_bits));
12873 }
12874
12875 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
12876                                        struct vmcs12 *vmcs12)
12877 {
12878         u32 idt_vectoring;
12879         unsigned int nr;
12880
12881         if (vcpu->arch.exception.injected) {
12882                 nr = vcpu->arch.exception.nr;
12883                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12884
12885                 if (kvm_exception_is_soft(nr)) {
12886                         vmcs12->vm_exit_instruction_len =
12887                                 vcpu->arch.event_exit_inst_len;
12888                         idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
12889                 } else
12890                         idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
12891
12892                 if (vcpu->arch.exception.has_error_code) {
12893                         idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
12894                         vmcs12->idt_vectoring_error_code =
12895                                 vcpu->arch.exception.error_code;
12896                 }
12897
12898                 vmcs12->idt_vectoring_info_field = idt_vectoring;
12899         } else if (vcpu->arch.nmi_injected) {
12900                 vmcs12->idt_vectoring_info_field =
12901                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
12902         } else if (vcpu->arch.interrupt.injected) {
12903                 nr = vcpu->arch.interrupt.nr;
12904                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12905
12906                 if (vcpu->arch.interrupt.soft) {
12907                         idt_vectoring |= INTR_TYPE_SOFT_INTR;
12908                         vmcs12->vm_entry_instruction_len =
12909                                 vcpu->arch.event_exit_inst_len;
12910                 } else
12911                         idt_vectoring |= INTR_TYPE_EXT_INTR;
12912
12913                 vmcs12->idt_vectoring_info_field = idt_vectoring;
12914         }
12915 }
12916
12917 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
12918 {
12919         struct vcpu_vmx *vmx = to_vmx(vcpu);
12920         unsigned long exit_qual;
12921         bool block_nested_events =
12922             vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
12923
12924         if (vcpu->arch.exception.pending &&
12925                 nested_vmx_check_exception(vcpu, &exit_qual)) {
12926                 if (block_nested_events)
12927                         return -EBUSY;
12928                 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
12929                 return 0;
12930         }
12931
12932         if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
12933             vmx->nested.preemption_timer_expired) {
12934                 if (block_nested_events)
12935                         return -EBUSY;
12936                 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
12937                 return 0;
12938         }
12939
12940         if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
12941                 if (block_nested_events)
12942                         return -EBUSY;
12943                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
12944                                   NMI_VECTOR | INTR_TYPE_NMI_INTR |
12945                                   INTR_INFO_VALID_MASK, 0);
12946                 /*
12947                  * The NMI-triggered VM exit counts as injection:
12948                  * clear this one and block further NMIs.
12949                  */
12950                 vcpu->arch.nmi_pending = 0;
12951                 vmx_set_nmi_mask(vcpu, true);
12952                 return 0;
12953         }
12954
12955         if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
12956             nested_exit_on_intr(vcpu)) {
12957                 if (block_nested_events)
12958                         return -EBUSY;
12959                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
12960                 return 0;
12961         }
12962
12963         vmx_complete_nested_posted_interrupt(vcpu);
12964         return 0;
12965 }
12966
12967 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
12968 {
12969         to_vmx(vcpu)->req_immediate_exit = true;
12970 }
12971
12972 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
12973 {
12974         ktime_t remaining =
12975                 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
12976         u64 value;
12977
12978         if (ktime_to_ns(remaining) <= 0)
12979                 return 0;
12980
12981         value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
12982         do_div(value, 1000000);
12983         return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
12984 }
12985
12986 /*
12987  * Update the guest state fields of vmcs12 to reflect changes that
12988  * occurred while L2 was running. (The "IA-32e mode guest" bit of the
12989  * VM-entry controls is also updated, since this is really a guest
12990  * state bit.)
12991  */
12992 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12993 {
12994         vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
12995         vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
12996
12997         vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
12998         vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
12999         vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
13000
13001         vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
13002         vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
13003         vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
13004         vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
13005         vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
13006         vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
13007         vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
13008         vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
13009         vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
13010         vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
13011         vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
13012         vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
13013         vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
13014         vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
13015         vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
13016         vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
13017         vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
13018         vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
13019         vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
13020         vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
13021         vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
13022         vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
13023         vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
13024         vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
13025         vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
13026         vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
13027         vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
13028         vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
13029         vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
13030         vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
13031         vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
13032         vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
13033         vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
13034         vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
13035         vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
13036         vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
13037
13038         vmcs12->guest_interruptibility_info =
13039                 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
13040         vmcs12->guest_pending_dbg_exceptions =
13041                 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
13042         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
13043                 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
13044         else
13045                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
13046
13047         if (nested_cpu_has_preemption_timer(vmcs12)) {
13048                 if (vmcs12->vm_exit_controls &
13049                     VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
13050                         vmcs12->vmx_preemption_timer_value =
13051                                 vmx_get_preemption_timer_value(vcpu);
13052                 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
13053         }
13054
13055         /*
13056          * In some cases (usually, nested EPT), L2 is allowed to change its
13057          * own CR3 without exiting. If it has changed it, we must keep it.
13058          * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
13059          * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
13060          *
13061          * Additionally, restore L2's PDPTR to vmcs12.
13062          */
13063         if (enable_ept) {
13064                 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
13065                 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
13066                 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
13067                 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
13068                 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
13069         }
13070
13071         vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
13072
13073         if (nested_cpu_has_vid(vmcs12))
13074                 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
13075
13076         vmcs12->vm_entry_controls =
13077                 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
13078                 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
13079
13080         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
13081                 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
13082                 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
13083         }
13084
13085         /* TODO: These cannot have changed unless we have MSR bitmaps and
13086          * the relevant bit asks not to trap the change */
13087         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
13088                 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
13089         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
13090                 vmcs12->guest_ia32_efer = vcpu->arch.efer;
13091         vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
13092         vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
13093         vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
13094         if (kvm_mpx_supported())
13095                 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
13096 }
13097
13098 /*
13099  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
13100  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
13101  * and this function updates it to reflect the changes to the guest state while
13102  * L2 was running (and perhaps made some exits which were handled directly by L0
13103  * without going back to L1), and to reflect the exit reason.
13104  * Note that we do not have to copy here all VMCS fields, just those that
13105  * could have changed by the L2 guest or the exit - i.e., the guest-state and
13106  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
13107  * which already writes to vmcs12 directly.
13108  */
13109 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
13110                            u32 exit_reason, u32 exit_intr_info,
13111                            unsigned long exit_qualification)
13112 {
13113         /* update guest state fields: */
13114         sync_vmcs12(vcpu, vmcs12);
13115
13116         /* update exit information fields: */
13117
13118         vmcs12->vm_exit_reason = exit_reason;
13119         vmcs12->exit_qualification = exit_qualification;
13120         vmcs12->vm_exit_intr_info = exit_intr_info;
13121
13122         vmcs12->idt_vectoring_info_field = 0;
13123         vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
13124         vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
13125
13126         if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
13127                 vmcs12->launch_state = 1;
13128
13129                 /* vm_entry_intr_info_field is cleared on exit. Emulate this
13130                  * instead of reading the real value. */
13131                 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
13132
13133                 /*
13134                  * Transfer the event that L0 or L1 may wanted to inject into
13135                  * L2 to IDT_VECTORING_INFO_FIELD.
13136                  */
13137                 vmcs12_save_pending_event(vcpu, vmcs12);
13138         }
13139
13140         /*
13141          * Drop what we picked up for L2 via vmx_complete_interrupts. It is
13142          * preserved above and would only end up incorrectly in L1.
13143          */
13144         vcpu->arch.nmi_injected = false;
13145         kvm_clear_exception_queue(vcpu);
13146         kvm_clear_interrupt_queue(vcpu);
13147 }
13148
13149 /*
13150  * A part of what we need to when the nested L2 guest exits and we want to
13151  * run its L1 parent, is to reset L1's guest state to the host state specified
13152  * in vmcs12.
13153  * This function is to be called not only on normal nested exit, but also on
13154  * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
13155  * Failures During or After Loading Guest State").
13156  * This function should be called when the active VMCS is L1's (vmcs01).
13157  */
13158 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
13159                                    struct vmcs12 *vmcs12)
13160 {
13161         struct kvm_segment seg;
13162         u32 entry_failure_code;
13163
13164         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
13165                 vcpu->arch.efer = vmcs12->host_ia32_efer;
13166         else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
13167                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
13168         else
13169                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
13170         vmx_set_efer(vcpu, vcpu->arch.efer);
13171
13172         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
13173         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
13174         vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
13175         vmx_set_interrupt_shadow(vcpu, 0);
13176
13177         /*
13178          * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
13179          * actually changed, because vmx_set_cr0 refers to efer set above.
13180          *
13181          * CR0_GUEST_HOST_MASK is already set in the original vmcs01
13182          * (KVM doesn't change it);
13183          */
13184         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
13185         vmx_set_cr0(vcpu, vmcs12->host_cr0);
13186
13187         /* Same as above - no reason to call set_cr4_guest_host_mask().  */
13188         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
13189         vmx_set_cr4(vcpu, vmcs12->host_cr4);
13190
13191         nested_ept_uninit_mmu_context(vcpu);
13192
13193         /*
13194          * Only PDPTE load can fail as the value of cr3 was checked on entry and
13195          * couldn't have changed.
13196          */
13197         if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
13198                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
13199
13200         if (!enable_ept)
13201                 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
13202
13203         /*
13204          * If vmcs01 doesn't use VPID, CPU flushes TLB on every
13205          * VMEntry/VMExit. Thus, no need to flush TLB.
13206          *
13207          * If vmcs12 doesn't use VPID, L1 expects TLB to be
13208          * flushed on every VMEntry/VMExit.
13209          *
13210          * Otherwise, we can preserve TLB entries as long as we are
13211          * able to tag L1 TLB entries differently than L2 TLB entries.
13212          *
13213          * If vmcs12 uses EPT, we need to execute this flush on EPTP01
13214          * and therefore we request the TLB flush to happen only after VMCS EPTP
13215          * has been set by KVM_REQ_LOAD_CR3.
13216          */
13217         if (enable_vpid &&
13218             (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
13219                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
13220         }
13221
13222         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
13223         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
13224         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
13225         vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
13226         vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
13227         vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
13228         vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
13229
13230         /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
13231         if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
13232                 vmcs_write64(GUEST_BNDCFGS, 0);
13233
13234         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
13235                 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
13236                 vcpu->arch.pat = vmcs12->host_ia32_pat;
13237         }
13238         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
13239                 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
13240                         vmcs12->host_ia32_perf_global_ctrl);
13241
13242         /* Set L1 segment info according to Intel SDM
13243             27.5.2 Loading Host Segment and Descriptor-Table Registers */
13244         seg = (struct kvm_segment) {
13245                 .base = 0,
13246                 .limit = 0xFFFFFFFF,
13247                 .selector = vmcs12->host_cs_selector,
13248                 .type = 11,
13249                 .present = 1,
13250                 .s = 1,
13251                 .g = 1
13252         };
13253         if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
13254                 seg.l = 1;
13255         else
13256                 seg.db = 1;
13257         vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
13258         seg = (struct kvm_segment) {
13259                 .base = 0,
13260                 .limit = 0xFFFFFFFF,
13261                 .type = 3,
13262                 .present = 1,
13263                 .s = 1,
13264                 .db = 1,
13265                 .g = 1
13266         };
13267         seg.selector = vmcs12->host_ds_selector;
13268         vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
13269         seg.selector = vmcs12->host_es_selector;
13270         vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
13271         seg.selector = vmcs12->host_ss_selector;
13272         vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
13273         seg.selector = vmcs12->host_fs_selector;
13274         seg.base = vmcs12->host_fs_base;
13275         vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
13276         seg.selector = vmcs12->host_gs_selector;
13277         seg.base = vmcs12->host_gs_base;
13278         vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
13279         seg = (struct kvm_segment) {
13280                 .base = vmcs12->host_tr_base,
13281                 .limit = 0x67,
13282                 .selector = vmcs12->host_tr_selector,
13283                 .type = 11,
13284                 .present = 1
13285         };
13286         vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
13287
13288         kvm_set_dr(vcpu, 7, 0x400);
13289         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
13290
13291         if (cpu_has_vmx_msr_bitmap())
13292                 vmx_update_msr_bitmap(vcpu);
13293
13294         if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
13295                                 vmcs12->vm_exit_msr_load_count))
13296                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
13297 }
13298
13299 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
13300 {
13301         struct shared_msr_entry *efer_msr;
13302         unsigned int i;
13303
13304         if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
13305                 return vmcs_read64(GUEST_IA32_EFER);
13306
13307         if (cpu_has_load_ia32_efer)
13308                 return host_efer;
13309
13310         for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
13311                 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
13312                         return vmx->msr_autoload.guest.val[i].value;
13313         }
13314
13315         efer_msr = find_msr_entry(vmx, MSR_EFER);
13316         if (efer_msr)
13317                 return efer_msr->data;
13318
13319         return host_efer;
13320 }
13321
13322 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
13323 {
13324         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13325         struct vcpu_vmx *vmx = to_vmx(vcpu);
13326         struct vmx_msr_entry g, h;
13327         struct msr_data msr;
13328         gpa_t gpa;
13329         u32 i, j;
13330
13331         vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
13332
13333         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
13334                 /*
13335                  * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
13336                  * as vmcs01.GUEST_DR7 contains a userspace defined value
13337                  * and vcpu->arch.dr7 is not squirreled away before the
13338                  * nested VMENTER (not worth adding a variable in nested_vmx).
13339                  */
13340                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
13341                         kvm_set_dr(vcpu, 7, DR7_FIXED_1);
13342                 else
13343                         WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
13344         }
13345
13346         /*
13347          * Note that calling vmx_set_{efer,cr0,cr4} is important as they
13348          * handle a variety of side effects to KVM's software model.
13349          */
13350         vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
13351
13352         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
13353         vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
13354
13355         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
13356         vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
13357
13358         nested_ept_uninit_mmu_context(vcpu);
13359         vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
13360         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
13361
13362         /*
13363          * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
13364          * from vmcs01 (if necessary).  The PDPTRs are not loaded on
13365          * VMFail, like everything else we just need to ensure our
13366          * software model is up-to-date.
13367          */
13368         ept_save_pdptrs(vcpu);
13369
13370         kvm_mmu_reset_context(vcpu);
13371
13372         if (cpu_has_vmx_msr_bitmap())
13373                 vmx_update_msr_bitmap(vcpu);
13374
13375         /*
13376          * This nasty bit of open coding is a compromise between blindly
13377          * loading L1's MSRs using the exit load lists (incorrect emulation
13378          * of VMFail), leaving the nested VM's MSRs in the software model
13379          * (incorrect behavior) and snapshotting the modified MSRs (too
13380          * expensive since the lists are unbound by hardware).  For each
13381          * MSR that was (prematurely) loaded from the nested VMEntry load
13382          * list, reload it from the exit load list if it exists and differs
13383          * from the guest value.  The intent is to stuff host state as
13384          * silently as possible, not to fully process the exit load list.
13385          */
13386         msr.host_initiated = false;
13387         for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
13388                 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
13389                 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
13390                         pr_debug_ratelimited(
13391                                 "%s read MSR index failed (%u, 0x%08llx)\n",
13392                                 __func__, i, gpa);
13393                         goto vmabort;
13394                 }
13395
13396                 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
13397                         gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
13398                         if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
13399                                 pr_debug_ratelimited(
13400                                         "%s read MSR failed (%u, 0x%08llx)\n",
13401                                         __func__, j, gpa);
13402                                 goto vmabort;
13403                         }
13404                         if (h.index != g.index)
13405                                 continue;
13406                         if (h.value == g.value)
13407                                 break;
13408
13409                         if (nested_vmx_load_msr_check(vcpu, &h)) {
13410                                 pr_debug_ratelimited(
13411                                         "%s check failed (%u, 0x%x, 0x%x)\n",
13412                                         __func__, j, h.index, h.reserved);
13413                                 goto vmabort;
13414                         }
13415
13416                         msr.index = h.index;
13417                         msr.data = h.value;
13418                         if (kvm_set_msr(vcpu, &msr)) {
13419                                 pr_debug_ratelimited(
13420                                         "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
13421                                         __func__, j, h.index, h.value);
13422                                 goto vmabort;
13423                         }
13424                 }
13425         }
13426
13427         return;
13428
13429 vmabort:
13430         nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
13431 }
13432
13433 /*
13434  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
13435  * and modify vmcs12 to make it see what it would expect to see there if
13436  * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
13437  */
13438 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
13439                               u32 exit_intr_info,
13440                               unsigned long exit_qualification)
13441 {
13442         struct vcpu_vmx *vmx = to_vmx(vcpu);
13443         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13444
13445         /* trying to cancel vmlaunch/vmresume is a bug */
13446         WARN_ON_ONCE(vmx->nested.nested_run_pending);
13447
13448         /*
13449          * The only expected VM-instruction error is "VM entry with
13450          * invalid control field(s)." Anything else indicates a
13451          * problem with L0.
13452          */
13453         WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
13454                                    VMXERR_ENTRY_INVALID_CONTROL_FIELD));
13455
13456         leave_guest_mode(vcpu);
13457
13458         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
13459                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
13460
13461         if (likely(!vmx->fail)) {
13462                 if (exit_reason == -1)
13463                         sync_vmcs12(vcpu, vmcs12);
13464                 else
13465                         prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
13466                                        exit_qualification);
13467
13468                 /*
13469                  * Must happen outside of sync_vmcs12() as it will
13470                  * also be used to capture vmcs12 cache as part of
13471                  * capturing nVMX state for snapshot (migration).
13472                  *
13473                  * Otherwise, this flush will dirty guest memory at a
13474                  * point it is already assumed by user-space to be
13475                  * immutable.
13476                  */
13477                 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
13478
13479                 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
13480                                          vmcs12->vm_exit_msr_store_count))
13481                         nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
13482         }
13483
13484         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
13485
13486         /* Update any VMCS fields that might have changed while L2 ran */
13487         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
13488         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
13489         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
13490
13491         if (kvm_has_tsc_control)
13492                 decache_tsc_multiplier(vmx);
13493
13494         if (vmx->nested.change_vmcs01_virtual_apic_mode) {
13495                 vmx->nested.change_vmcs01_virtual_apic_mode = false;
13496                 vmx_set_virtual_apic_mode(vcpu);
13497         } else if (!nested_cpu_has_ept(vmcs12) &&
13498                    nested_cpu_has2(vmcs12,
13499                                    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
13500                 vmx_flush_tlb(vcpu, true);
13501         }
13502
13503         /* This is needed for same reason as it was needed in prepare_vmcs02 */
13504         vmx->host_rsp = 0;
13505
13506         /* Unpin physical memory we referred to in vmcs02 */
13507         if (vmx->nested.apic_access_page) {
13508                 kvm_release_page_dirty(vmx->nested.apic_access_page);
13509                 vmx->nested.apic_access_page = NULL;
13510         }
13511         if (vmx->nested.virtual_apic_page) {
13512                 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
13513                 vmx->nested.virtual_apic_page = NULL;
13514         }
13515         if (vmx->nested.pi_desc_page) {
13516                 kunmap(vmx->nested.pi_desc_page);
13517                 kvm_release_page_dirty(vmx->nested.pi_desc_page);
13518                 vmx->nested.pi_desc_page = NULL;
13519                 vmx->nested.pi_desc = NULL;
13520         }
13521
13522         /*
13523          * We are now running in L2, mmu_notifier will force to reload the
13524          * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
13525          */
13526         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
13527
13528         if (enable_shadow_vmcs && exit_reason != -1)
13529                 vmx->nested.sync_shadow_vmcs = true;
13530
13531         /* in case we halted in L2 */
13532         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
13533
13534         if (likely(!vmx->fail)) {
13535                 /*
13536                  * TODO: SDM says that with acknowledge interrupt on
13537                  * exit, bit 31 of the VM-exit interrupt information
13538                  * (valid interrupt) is always set to 1 on
13539                  * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
13540                  * need kvm_cpu_has_interrupt().  See the commit
13541                  * message for details.
13542                  */
13543                 if (nested_exit_intr_ack_set(vcpu) &&
13544                     exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
13545                     kvm_cpu_has_interrupt(vcpu)) {
13546                         int irq = kvm_cpu_get_interrupt(vcpu);
13547                         WARN_ON(irq < 0);
13548                         vmcs12->vm_exit_intr_info = irq |
13549                                 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
13550                 }
13551
13552                 if (exit_reason != -1)
13553                         trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
13554                                                        vmcs12->exit_qualification,
13555                                                        vmcs12->idt_vectoring_info_field,
13556                                                        vmcs12->vm_exit_intr_info,
13557                                                        vmcs12->vm_exit_intr_error_code,
13558                                                        KVM_ISA_VMX);
13559
13560                 load_vmcs12_host_state(vcpu, vmcs12);
13561
13562                 return;
13563         }
13564
13565         /*
13566          * After an early L2 VM-entry failure, we're now back
13567          * in L1 which thinks it just finished a VMLAUNCH or
13568          * VMRESUME instruction, so we need to set the failure
13569          * flag and the VM-instruction error field of the VMCS
13570          * accordingly, and skip the emulated instruction.
13571          */
13572         (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
13573
13574         /*
13575          * Restore L1's host state to KVM's software model.  We're here
13576          * because a consistency check was caught by hardware, which
13577          * means some amount of guest state has been propagated to KVM's
13578          * model and needs to be unwound to the host's state.
13579          */
13580         nested_vmx_restore_host_state(vcpu);
13581
13582         vmx->fail = 0;
13583 }
13584
13585 /*
13586  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
13587  */
13588 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
13589 {
13590         if (is_guest_mode(vcpu)) {
13591                 to_vmx(vcpu)->nested.nested_run_pending = 0;
13592                 nested_vmx_vmexit(vcpu, -1, 0, 0);
13593         }
13594         free_nested(to_vmx(vcpu));
13595 }
13596
13597 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
13598                                struct x86_instruction_info *info,
13599                                enum x86_intercept_stage stage)
13600 {
13601         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13602         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
13603
13604         /*
13605          * RDPID causes #UD if disabled through secondary execution controls.
13606          * Because it is marked as EmulateOnUD, we need to intercept it here.
13607          */
13608         if (info->intercept == x86_intercept_rdtscp &&
13609             !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
13610                 ctxt->exception.vector = UD_VECTOR;
13611                 ctxt->exception.error_code_valid = false;
13612                 return X86EMUL_PROPAGATE_FAULT;
13613         }
13614
13615         /* TODO: check more intercepts... */
13616         return X86EMUL_CONTINUE;
13617 }
13618
13619 #ifdef CONFIG_X86_64
13620 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
13621 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
13622                                   u64 divisor, u64 *result)
13623 {
13624         u64 low = a << shift, high = a >> (64 - shift);
13625
13626         /* To avoid the overflow on divq */
13627         if (high >= divisor)
13628                 return 1;
13629
13630         /* Low hold the result, high hold rem which is discarded */
13631         asm("divq %2\n\t" : "=a" (low), "=d" (high) :
13632             "rm" (divisor), "0" (low), "1" (high));
13633         *result = low;
13634
13635         return 0;
13636 }
13637
13638 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
13639 {
13640         struct vcpu_vmx *vmx;
13641         u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
13642
13643         if (kvm_mwait_in_guest(vcpu->kvm))
13644                 return -EOPNOTSUPP;
13645
13646         vmx = to_vmx(vcpu);
13647         tscl = rdtsc();
13648         guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
13649         delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
13650         lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
13651
13652         if (delta_tsc > lapic_timer_advance_cycles)
13653                 delta_tsc -= lapic_timer_advance_cycles;
13654         else
13655                 delta_tsc = 0;
13656
13657         /* Convert to host delta tsc if tsc scaling is enabled */
13658         if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
13659                         u64_shl_div_u64(delta_tsc,
13660                                 kvm_tsc_scaling_ratio_frac_bits,
13661                                 vcpu->arch.tsc_scaling_ratio,
13662                                 &delta_tsc))
13663                 return -ERANGE;
13664
13665         /*
13666          * If the delta tsc can't fit in the 32 bit after the multi shift,
13667          * we can't use the preemption timer.
13668          * It's possible that it fits on later vmentries, but checking
13669          * on every vmentry is costly so we just use an hrtimer.
13670          */
13671         if (delta_tsc >> (cpu_preemption_timer_multi + 32))
13672                 return -ERANGE;
13673
13674         vmx->hv_deadline_tsc = tscl + delta_tsc;
13675         return delta_tsc == 0;
13676 }
13677
13678 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
13679 {
13680         to_vmx(vcpu)->hv_deadline_tsc = -1;
13681 }
13682 #endif
13683
13684 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
13685 {
13686         if (!kvm_pause_in_guest(vcpu->kvm))
13687                 shrink_ple_window(vcpu);
13688 }
13689
13690 static void vmx_slot_enable_log_dirty(struct kvm *kvm,
13691                                      struct kvm_memory_slot *slot)
13692 {
13693         kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
13694         kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
13695 }
13696
13697 static void vmx_slot_disable_log_dirty(struct kvm *kvm,
13698                                        struct kvm_memory_slot *slot)
13699 {
13700         kvm_mmu_slot_set_dirty(kvm, slot);
13701 }
13702
13703 static void vmx_flush_log_dirty(struct kvm *kvm)
13704 {
13705         kvm_flush_pml_buffers(kvm);
13706 }
13707
13708 static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
13709 {
13710         struct vmcs12 *vmcs12;
13711         struct vcpu_vmx *vmx = to_vmx(vcpu);
13712         gpa_t gpa;
13713         struct page *page = NULL;
13714         u64 *pml_address;
13715
13716         if (is_guest_mode(vcpu)) {
13717                 WARN_ON_ONCE(vmx->nested.pml_full);
13718
13719                 /*
13720                  * Check if PML is enabled for the nested guest.
13721                  * Whether eptp bit 6 is set is already checked
13722                  * as part of A/D emulation.
13723                  */
13724                 vmcs12 = get_vmcs12(vcpu);
13725                 if (!nested_cpu_has_pml(vmcs12))
13726                         return 0;
13727
13728                 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
13729                         vmx->nested.pml_full = true;
13730                         return 1;
13731                 }
13732
13733                 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
13734
13735                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
13736                 if (is_error_page(page))
13737                         return 0;
13738
13739                 pml_address = kmap(page);
13740                 pml_address[vmcs12->guest_pml_index--] = gpa;
13741                 kunmap(page);
13742                 kvm_release_page_clean(page);
13743         }
13744
13745         return 0;
13746 }
13747
13748 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
13749                                            struct kvm_memory_slot *memslot,
13750                                            gfn_t offset, unsigned long mask)
13751 {
13752         kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
13753 }
13754
13755 static void __pi_post_block(struct kvm_vcpu *vcpu)
13756 {
13757         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
13758         struct pi_desc old, new;
13759         unsigned int dest;
13760
13761         do {
13762                 old.control = new.control = pi_desc->control;
13763                 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
13764                      "Wakeup handler not enabled while the VCPU is blocked\n");
13765
13766                 dest = cpu_physical_id(vcpu->cpu);
13767
13768                 if (x2apic_enabled())
13769                         new.ndst = dest;
13770                 else
13771                         new.ndst = (dest << 8) & 0xFF00;
13772
13773                 /* set 'NV' to 'notification vector' */
13774                 new.nv = POSTED_INTR_VECTOR;
13775         } while (cmpxchg64(&pi_desc->control, old.control,
13776                            new.control) != old.control);
13777
13778         if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
13779                 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13780                 list_del(&vcpu->blocked_vcpu_list);
13781                 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13782                 vcpu->pre_pcpu = -1;
13783         }
13784 }
13785
13786 /*
13787  * This routine does the following things for vCPU which is going
13788  * to be blocked if VT-d PI is enabled.
13789  * - Store the vCPU to the wakeup list, so when interrupts happen
13790  *   we can find the right vCPU to wake up.
13791  * - Change the Posted-interrupt descriptor as below:
13792  *      'NDST' <-- vcpu->pre_pcpu
13793  *      'NV' <-- POSTED_INTR_WAKEUP_VECTOR
13794  * - If 'ON' is set during this process, which means at least one
13795  *   interrupt is posted for this vCPU, we cannot block it, in
13796  *   this case, return 1, otherwise, return 0.
13797  *
13798  */
13799 static int pi_pre_block(struct kvm_vcpu *vcpu)
13800 {
13801         unsigned int dest;
13802         struct pi_desc old, new;
13803         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
13804
13805         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
13806                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
13807                 !kvm_vcpu_apicv_active(vcpu))
13808                 return 0;
13809
13810         WARN_ON(irqs_disabled());
13811         local_irq_disable();
13812         if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
13813                 vcpu->pre_pcpu = vcpu->cpu;
13814                 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13815                 list_add_tail(&vcpu->blocked_vcpu_list,
13816                               &per_cpu(blocked_vcpu_on_cpu,
13817                                        vcpu->pre_pcpu));
13818                 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13819         }
13820
13821         do {
13822                 old.control = new.control = pi_desc->control;
13823
13824                 WARN((pi_desc->sn == 1),
13825                      "Warning: SN field of posted-interrupts "
13826                      "is set before blocking\n");
13827
13828                 /*
13829                  * Since vCPU can be preempted during this process,
13830                  * vcpu->cpu could be different with pre_pcpu, we
13831                  * need to set pre_pcpu as the destination of wakeup
13832                  * notification event, then we can find the right vCPU
13833                  * to wakeup in wakeup handler if interrupts happen
13834                  * when the vCPU is in blocked state.
13835                  */
13836                 dest = cpu_physical_id(vcpu->pre_pcpu);
13837
13838                 if (x2apic_enabled())
13839                         new.ndst = dest;
13840                 else
13841                         new.ndst = (dest << 8) & 0xFF00;
13842
13843                 /* set 'NV' to 'wakeup vector' */
13844                 new.nv = POSTED_INTR_WAKEUP_VECTOR;
13845         } while (cmpxchg64(&pi_desc->control, old.control,
13846                            new.control) != old.control);
13847
13848         /* We should not block the vCPU if an interrupt is posted for it.  */
13849         if (pi_test_on(pi_desc) == 1)
13850                 __pi_post_block(vcpu);
13851
13852         local_irq_enable();
13853         return (vcpu->pre_pcpu == -1);
13854 }
13855
13856 static int vmx_pre_block(struct kvm_vcpu *vcpu)
13857 {
13858         if (pi_pre_block(vcpu))
13859                 return 1;
13860
13861         if (kvm_lapic_hv_timer_in_use(vcpu))
13862                 kvm_lapic_switch_to_sw_timer(vcpu);
13863
13864         return 0;
13865 }
13866
13867 static void pi_post_block(struct kvm_vcpu *vcpu)
13868 {
13869         if (vcpu->pre_pcpu == -1)
13870                 return;
13871
13872         WARN_ON(irqs_disabled());
13873         local_irq_disable();
13874         __pi_post_block(vcpu);
13875         local_irq_enable();
13876 }
13877
13878 static void vmx_post_block(struct kvm_vcpu *vcpu)
13879 {
13880         if (kvm_x86_ops->set_hv_timer)
13881                 kvm_lapic_switch_to_hv_timer(vcpu);
13882
13883         pi_post_block(vcpu);
13884 }
13885
13886 /*
13887  * vmx_update_pi_irte - set IRTE for Posted-Interrupts
13888  *
13889  * @kvm: kvm
13890  * @host_irq: host irq of the interrupt
13891  * @guest_irq: gsi of the interrupt
13892  * @set: set or unset PI
13893  * returns 0 on success, < 0 on failure
13894  */
13895 static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
13896                               uint32_t guest_irq, bool set)
13897 {
13898         struct kvm_kernel_irq_routing_entry *e;
13899         struct kvm_irq_routing_table *irq_rt;
13900         struct kvm_lapic_irq irq;
13901         struct kvm_vcpu *vcpu;
13902         struct vcpu_data vcpu_info;
13903         int idx, ret = 0;
13904
13905         if (!kvm_arch_has_assigned_device(kvm) ||
13906                 !irq_remapping_cap(IRQ_POSTING_CAP) ||
13907                 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
13908                 return 0;
13909
13910         idx = srcu_read_lock(&kvm->irq_srcu);
13911         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
13912         if (guest_irq >= irq_rt->nr_rt_entries ||
13913             hlist_empty(&irq_rt->map[guest_irq])) {
13914                 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
13915                              guest_irq, irq_rt->nr_rt_entries);
13916                 goto out;
13917         }
13918
13919         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
13920                 if (e->type != KVM_IRQ_ROUTING_MSI)
13921                         continue;
13922                 /*
13923                  * VT-d PI cannot support posting multicast/broadcast
13924                  * interrupts to a vCPU, we still use interrupt remapping
13925                  * for these kind of interrupts.
13926                  *
13927                  * For lowest-priority interrupts, we only support
13928                  * those with single CPU as the destination, e.g. user
13929                  * configures the interrupts via /proc/irq or uses
13930                  * irqbalance to make the interrupts single-CPU.
13931                  *
13932                  * We will support full lowest-priority interrupt later.
13933                  */
13934
13935                 kvm_set_msi_irq(kvm, e, &irq);
13936                 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
13937                         /*
13938                          * Make sure the IRTE is in remapped mode if
13939                          * we don't handle it in posted mode.
13940                          */
13941                         ret = irq_set_vcpu_affinity(host_irq, NULL);
13942                         if (ret < 0) {
13943                                 printk(KERN_INFO
13944                                    "failed to back to remapped mode, irq: %u\n",
13945                                    host_irq);
13946                                 goto out;
13947                         }
13948
13949                         continue;
13950                 }
13951
13952                 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
13953                 vcpu_info.vector = irq.vector;
13954
13955                 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
13956                                 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
13957
13958                 if (set)
13959                         ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
13960                 else
13961                         ret = irq_set_vcpu_affinity(host_irq, NULL);
13962
13963                 if (ret < 0) {
13964                         printk(KERN_INFO "%s: failed to update PI IRTE\n",
13965                                         __func__);
13966                         goto out;
13967                 }
13968         }
13969
13970         ret = 0;
13971 out:
13972         srcu_read_unlock(&kvm->irq_srcu, idx);
13973         return ret;
13974 }
13975
13976 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
13977 {
13978         if (vcpu->arch.mcg_cap & MCG_LMCE_P)
13979                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
13980                         FEATURE_CONTROL_LMCE;
13981         else
13982                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
13983                         ~FEATURE_CONTROL_LMCE;
13984 }
13985
13986 static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
13987 {
13988         /* we need a nested vmexit to enter SMM, postpone if run is pending */
13989         if (to_vmx(vcpu)->nested.nested_run_pending)
13990                 return 0;
13991         return 1;
13992 }
13993
13994 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
13995 {
13996         struct vcpu_vmx *vmx = to_vmx(vcpu);
13997
13998         vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
13999         if (vmx->nested.smm.guest_mode)
14000                 nested_vmx_vmexit(vcpu, -1, 0, 0);
14001
14002         vmx->nested.smm.vmxon = vmx->nested.vmxon;
14003         vmx->nested.vmxon = false;
14004         vmx_clear_hlt(vcpu);
14005         return 0;
14006 }
14007
14008 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
14009 {
14010         struct vcpu_vmx *vmx = to_vmx(vcpu);
14011         int ret;
14012
14013         if (vmx->nested.smm.vmxon) {
14014                 vmx->nested.vmxon = true;
14015                 vmx->nested.smm.vmxon = false;
14016         }
14017
14018         if (vmx->nested.smm.guest_mode) {
14019                 vcpu->arch.hflags &= ~HF_SMM_MASK;
14020                 ret = nested_vmx_enter_non_root_mode(vcpu, false);
14021                 vcpu->arch.hflags |= HF_SMM_MASK;
14022                 if (ret)
14023                         return ret;
14024
14025                 vmx->nested.smm.guest_mode = false;
14026         }
14027         return 0;
14028 }
14029
14030 static int enable_smi_window(struct kvm_vcpu *vcpu)
14031 {
14032         return 0;
14033 }
14034
14035 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
14036                                 struct kvm_nested_state __user *user_kvm_nested_state,
14037                                 u32 user_data_size)
14038 {
14039         struct vcpu_vmx *vmx;
14040         struct vmcs12 *vmcs12;
14041         struct kvm_nested_state kvm_state = {
14042                 .flags = 0,
14043                 .format = 0,
14044                 .size = sizeof(kvm_state),
14045                 .vmx.vmxon_pa = -1ull,
14046                 .vmx.vmcs_pa = -1ull,
14047         };
14048
14049         if (!vcpu)
14050                 return kvm_state.size + 2 * VMCS12_SIZE;
14051
14052         vmx = to_vmx(vcpu);
14053         vmcs12 = get_vmcs12(vcpu);
14054         if (nested_vmx_allowed(vcpu) &&
14055             (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
14056                 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
14057                 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
14058
14059                 if (vmx->nested.current_vmptr != -1ull) {
14060                         kvm_state.size += VMCS12_SIZE;
14061
14062                         if (is_guest_mode(vcpu) &&
14063                             nested_cpu_has_shadow_vmcs(vmcs12) &&
14064                             vmcs12->vmcs_link_pointer != -1ull)
14065                                 kvm_state.size += VMCS12_SIZE;
14066                 }
14067
14068                 if (vmx->nested.smm.vmxon)
14069                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
14070
14071                 if (vmx->nested.smm.guest_mode)
14072                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
14073
14074                 if (is_guest_mode(vcpu)) {
14075                         kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
14076
14077                         if (vmx->nested.nested_run_pending)
14078                                 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
14079                 }
14080         }
14081
14082         if (user_data_size < kvm_state.size)
14083                 goto out;
14084
14085         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
14086                 return -EFAULT;
14087
14088         if (vmx->nested.current_vmptr == -1ull)
14089                 goto out;
14090
14091         /*
14092          * When running L2, the authoritative vmcs12 state is in the
14093          * vmcs02. When running L1, the authoritative vmcs12 state is
14094          * in the shadow vmcs linked to vmcs01, unless
14095          * sync_shadow_vmcs is set, in which case, the authoritative
14096          * vmcs12 state is in the vmcs12 already.
14097          */
14098         if (is_guest_mode(vcpu))
14099                 sync_vmcs12(vcpu, vmcs12);
14100         else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
14101                 copy_shadow_to_vmcs12(vmx);
14102
14103         if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
14104                 return -EFAULT;
14105
14106         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
14107             vmcs12->vmcs_link_pointer != -1ull) {
14108                 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
14109                                  get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
14110                         return -EFAULT;
14111         }
14112
14113 out:
14114         return kvm_state.size;
14115 }
14116
14117 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
14118                                 struct kvm_nested_state __user *user_kvm_nested_state,
14119                                 struct kvm_nested_state *kvm_state)
14120 {
14121         struct vcpu_vmx *vmx = to_vmx(vcpu);
14122         struct vmcs12 *vmcs12;
14123         u32 exit_qual;
14124         int ret;
14125
14126         if (kvm_state->format != 0)
14127                 return -EINVAL;
14128
14129         if (!nested_vmx_allowed(vcpu))
14130                 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
14131
14132         if (kvm_state->vmx.vmxon_pa == -1ull) {
14133                 if (kvm_state->vmx.smm.flags)
14134                         return -EINVAL;
14135
14136                 if (kvm_state->vmx.vmcs_pa != -1ull)
14137                         return -EINVAL;
14138
14139                 vmx_leave_nested(vcpu);
14140                 return 0;
14141         }
14142
14143         if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
14144                 return -EINVAL;
14145
14146         if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
14147                 return -EINVAL;
14148
14149         if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
14150             !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
14151                 return -EINVAL;
14152
14153         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
14154             (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
14155                 return -EINVAL;
14156
14157         if (kvm_state->vmx.smm.flags &
14158             ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
14159                 return -EINVAL;
14160
14161         /*
14162          * SMM temporarily disables VMX, so we cannot be in guest mode,
14163          * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
14164          * must be zero.
14165          */
14166         if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
14167                 return -EINVAL;
14168
14169         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
14170             !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
14171                 return -EINVAL;
14172
14173         vmx_leave_nested(vcpu);
14174         if (kvm_state->vmx.vmxon_pa == -1ull)
14175                 return 0;
14176
14177         vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
14178         ret = enter_vmx_operation(vcpu);
14179         if (ret)
14180                 return ret;
14181
14182         set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
14183
14184         if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
14185                 vmx->nested.smm.vmxon = true;
14186                 vmx->nested.vmxon = false;
14187
14188                 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
14189                         vmx->nested.smm.guest_mode = true;
14190         }
14191
14192         vmcs12 = get_vmcs12(vcpu);
14193         if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
14194                 return -EFAULT;
14195
14196         if (vmcs12->hdr.revision_id != VMCS12_REVISION)
14197                 return -EINVAL;
14198
14199         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
14200                 return 0;
14201
14202         vmx->nested.nested_run_pending =
14203                 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
14204
14205         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
14206             vmcs12->vmcs_link_pointer != -1ull) {
14207                 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
14208                 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
14209                         return -EINVAL;
14210
14211                 if (copy_from_user(shadow_vmcs12,
14212                                    user_kvm_nested_state->data + VMCS12_SIZE,
14213                                    sizeof(*vmcs12)))
14214                         return -EFAULT;
14215
14216                 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
14217                     !shadow_vmcs12->hdr.shadow_vmcs)
14218                         return -EINVAL;
14219         }
14220
14221         if (check_vmentry_prereqs(vcpu, vmcs12) ||
14222             check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
14223                 return -EINVAL;
14224
14225         vmx->nested.dirty_vmcs12 = true;
14226         ret = nested_vmx_enter_non_root_mode(vcpu, false);
14227         if (ret)
14228                 return -EINVAL;
14229
14230         return 0;
14231 }
14232
14233 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
14234         .cpu_has_kvm_support = cpu_has_kvm_support,
14235         .disabled_by_bios = vmx_disabled_by_bios,
14236         .hardware_setup = hardware_setup,
14237         .hardware_unsetup = hardware_unsetup,
14238         .check_processor_compatibility = vmx_check_processor_compat,
14239         .hardware_enable = hardware_enable,
14240         .hardware_disable = hardware_disable,
14241         .cpu_has_accelerated_tpr = report_flexpriority,
14242         .has_emulated_msr = vmx_has_emulated_msr,
14243
14244         .vm_init = vmx_vm_init,
14245         .vm_alloc = vmx_vm_alloc,
14246         .vm_free = vmx_vm_free,
14247
14248         .vcpu_create = vmx_create_vcpu,
14249         .vcpu_free = vmx_free_vcpu,
14250         .vcpu_reset = vmx_vcpu_reset,
14251
14252         .prepare_guest_switch = vmx_prepare_switch_to_guest,
14253         .vcpu_load = vmx_vcpu_load,
14254         .vcpu_put = vmx_vcpu_put,
14255
14256         .update_bp_intercept = update_exception_bitmap,
14257         .get_msr_feature = vmx_get_msr_feature,
14258         .get_msr = vmx_get_msr,
14259         .set_msr = vmx_set_msr,
14260         .get_segment_base = vmx_get_segment_base,
14261         .get_segment = vmx_get_segment,
14262         .set_segment = vmx_set_segment,
14263         .get_cpl = vmx_get_cpl,
14264         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
14265         .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
14266         .decache_cr3 = vmx_decache_cr3,
14267         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
14268         .set_cr0 = vmx_set_cr0,
14269         .set_cr3 = vmx_set_cr3,
14270         .set_cr4 = vmx_set_cr4,
14271         .set_efer = vmx_set_efer,
14272         .get_idt = vmx_get_idt,
14273         .set_idt = vmx_set_idt,
14274         .get_gdt = vmx_get_gdt,
14275         .set_gdt = vmx_set_gdt,
14276         .get_dr6 = vmx_get_dr6,
14277         .set_dr6 = vmx_set_dr6,
14278         .set_dr7 = vmx_set_dr7,
14279         .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
14280         .cache_reg = vmx_cache_reg,
14281         .get_rflags = vmx_get_rflags,
14282         .set_rflags = vmx_set_rflags,
14283
14284         .tlb_flush = vmx_flush_tlb,
14285         .tlb_flush_gva = vmx_flush_tlb_gva,
14286
14287         .run = vmx_vcpu_run,
14288         .handle_exit = vmx_handle_exit,
14289         .skip_emulated_instruction = skip_emulated_instruction,
14290         .set_interrupt_shadow = vmx_set_interrupt_shadow,
14291         .get_interrupt_shadow = vmx_get_interrupt_shadow,
14292         .patch_hypercall = vmx_patch_hypercall,
14293         .set_irq = vmx_inject_irq,
14294         .set_nmi = vmx_inject_nmi,
14295         .queue_exception = vmx_queue_exception,
14296         .cancel_injection = vmx_cancel_injection,
14297         .interrupt_allowed = vmx_interrupt_allowed,
14298         .nmi_allowed = vmx_nmi_allowed,
14299         .get_nmi_mask = vmx_get_nmi_mask,
14300         .set_nmi_mask = vmx_set_nmi_mask,
14301         .enable_nmi_window = enable_nmi_window,
14302         .enable_irq_window = enable_irq_window,
14303         .update_cr8_intercept = update_cr8_intercept,
14304         .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
14305         .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
14306         .get_enable_apicv = vmx_get_enable_apicv,
14307         .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
14308         .load_eoi_exitmap = vmx_load_eoi_exitmap,
14309         .apicv_post_state_restore = vmx_apicv_post_state_restore,
14310         .hwapic_irr_update = vmx_hwapic_irr_update,
14311         .hwapic_isr_update = vmx_hwapic_isr_update,
14312         .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
14313         .sync_pir_to_irr = vmx_sync_pir_to_irr,
14314         .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
14315
14316         .set_tss_addr = vmx_set_tss_addr,
14317         .set_identity_map_addr = vmx_set_identity_map_addr,
14318         .get_tdp_level = get_ept_level,
14319         .get_mt_mask = vmx_get_mt_mask,
14320
14321         .get_exit_info = vmx_get_exit_info,
14322
14323         .get_lpage_level = vmx_get_lpage_level,
14324
14325         .cpuid_update = vmx_cpuid_update,
14326
14327         .rdtscp_supported = vmx_rdtscp_supported,
14328         .invpcid_supported = vmx_invpcid_supported,
14329
14330         .set_supported_cpuid = vmx_set_supported_cpuid,
14331
14332         .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
14333
14334         .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
14335         .write_tsc_offset = vmx_write_tsc_offset,
14336
14337         .set_tdp_cr3 = vmx_set_cr3,
14338
14339         .check_intercept = vmx_check_intercept,
14340         .handle_external_intr = vmx_handle_external_intr,
14341         .mpx_supported = vmx_mpx_supported,
14342         .xsaves_supported = vmx_xsaves_supported,
14343         .umip_emulated = vmx_umip_emulated,
14344
14345         .check_nested_events = vmx_check_nested_events,
14346         .request_immediate_exit = vmx_request_immediate_exit,
14347
14348         .sched_in = vmx_sched_in,
14349
14350         .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
14351         .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
14352         .flush_log_dirty = vmx_flush_log_dirty,
14353         .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
14354         .write_log_dirty = vmx_write_pml_buffer,
14355
14356         .pre_block = vmx_pre_block,
14357         .post_block = vmx_post_block,
14358
14359         .pmu_ops = &intel_pmu_ops,
14360
14361         .update_pi_irte = vmx_update_pi_irte,
14362
14363 #ifdef CONFIG_X86_64
14364         .set_hv_timer = vmx_set_hv_timer,
14365         .cancel_hv_timer = vmx_cancel_hv_timer,
14366 #endif
14367
14368         .setup_mce = vmx_setup_mce,
14369
14370         .get_nested_state = vmx_get_nested_state,
14371         .set_nested_state = vmx_set_nested_state,
14372         .get_vmcs12_pages = nested_get_vmcs12_pages,
14373
14374         .smi_allowed = vmx_smi_allowed,
14375         .pre_enter_smm = vmx_pre_enter_smm,
14376         .pre_leave_smm = vmx_pre_leave_smm,
14377         .enable_smi_window = enable_smi_window,
14378 };
14379
14380 static void vmx_cleanup_l1d_flush(void)
14381 {
14382         if (vmx_l1d_flush_pages) {
14383                 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
14384                 vmx_l1d_flush_pages = NULL;
14385         }
14386         /* Restore state so sysfs ignores VMX */
14387         l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
14388 }
14389
14390 static void vmx_exit(void)
14391 {
14392 #ifdef CONFIG_KEXEC_CORE
14393         RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
14394         synchronize_rcu();
14395 #endif
14396
14397         kvm_exit();
14398
14399 #if IS_ENABLED(CONFIG_HYPERV)
14400         if (static_branch_unlikely(&enable_evmcs)) {
14401                 int cpu;
14402                 struct hv_vp_assist_page *vp_ap;
14403                 /*
14404                  * Reset everything to support using non-enlightened VMCS
14405                  * access later (e.g. when we reload the module with
14406                  * enlightened_vmcs=0)
14407                  */
14408                 for_each_online_cpu(cpu) {
14409                         vp_ap = hv_get_vp_assist_page(cpu);
14410
14411                         if (!vp_ap)
14412                                 continue;
14413
14414                         vp_ap->current_nested_vmcs = 0;
14415                         vp_ap->enlighten_vmentry = 0;
14416                 }
14417
14418                 static_branch_disable(&enable_evmcs);
14419         }
14420 #endif
14421         vmx_cleanup_l1d_flush();
14422 }
14423 module_exit(vmx_exit);
14424
14425 static int __init vmx_init(void)
14426 {
14427         int r;
14428
14429 #if IS_ENABLED(CONFIG_HYPERV)
14430         /*
14431          * Enlightened VMCS usage should be recommended and the host needs
14432          * to support eVMCS v1 or above. We can also disable eVMCS support
14433          * with module parameter.
14434          */
14435         if (enlightened_vmcs &&
14436             ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
14437             (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
14438             KVM_EVMCS_VERSION) {
14439                 int cpu;
14440
14441                 /* Check that we have assist pages on all online CPUs */
14442                 for_each_online_cpu(cpu) {
14443                         if (!hv_get_vp_assist_page(cpu)) {
14444                                 enlightened_vmcs = false;
14445                                 break;
14446                         }
14447                 }
14448
14449                 if (enlightened_vmcs) {
14450                         pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
14451                         static_branch_enable(&enable_evmcs);
14452                 }
14453         } else {
14454                 enlightened_vmcs = false;
14455         }
14456 #endif
14457
14458         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
14459                      __alignof__(struct vcpu_vmx), THIS_MODULE);
14460         if (r)
14461                 return r;
14462
14463         /*
14464          * Must be called after kvm_init() so enable_ept is properly set
14465          * up. Hand the parameter mitigation value in which was stored in
14466          * the pre module init parser. If no parameter was given, it will
14467          * contain 'auto' which will be turned into the default 'cond'
14468          * mitigation mode.
14469          */
14470         if (boot_cpu_has(X86_BUG_L1TF)) {
14471                 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
14472                 if (r) {
14473                         vmx_exit();
14474                         return r;
14475                 }
14476         }
14477
14478 #ifdef CONFIG_KEXEC_CORE
14479         rcu_assign_pointer(crash_vmclear_loaded_vmcss,
14480                            crash_vmclear_local_loaded_vmcss);
14481 #endif
14482         vmx_check_vmcs12_offsets();
14483
14484         return 0;
14485 }
14486 module_init(vmx_init);