Merge branch 'for-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / svm.c
1 #define pr_fmt(fmt) "SVM: " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "cpuid.h"
10 #include "pmu.h"
11
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/kernel.h>
15 #include <linux/vmalloc.h>
16 #include <linux/highmem.h>
17 #include <linux/amd-iommu.h>
18 #include <linux/sched.h>
19 #include <linux/trace_events.h>
20 #include <linux/slab.h>
21 #include <linux/hashtable.h>
22 #include <linux/frame.h>
23 #include <linux/psp-sev.h>
24 #include <linux/file.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <linux/rwsem.h>
28
29 #include <asm/apic.h>
30 #include <asm/perf_event.h>
31 #include <asm/tlbflush.h>
32 #include <asm/desc.h>
33 #include <asm/debugreg.h>
34 #include <asm/kvm_para.h>
35 #include <asm/irq_remapping.h>
36 #include <asm/spec-ctrl.h>
37 #include <asm/cpu_device_id.h>
38
39 #include <asm/virtext.h>
40 #include "trace.h"
41
42 #include "svm.h"
43
44 #define __ex(x) __kvm_handle_fault_on_reboot(x)
45
46 MODULE_AUTHOR("Qumranet");
47 MODULE_LICENSE("GPL");
48
49 #ifdef MODULE
50 static const struct x86_cpu_id svm_cpu_id[] = {
51         X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
52         {}
53 };
54 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
55 #endif
56
57 #define IOPM_ALLOC_ORDER 2
58 #define MSRPM_ALLOC_ORDER 1
59
60 #define SEG_TYPE_LDT 2
61 #define SEG_TYPE_BUSY_TSS16 3
62
63 #define SVM_FEATURE_LBRV           (1 <<  1)
64 #define SVM_FEATURE_SVML           (1 <<  2)
65 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
66 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
67 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
68 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
69 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
70
71 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
72
73 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
74 #define TSC_RATIO_MIN           0x0000000000000001ULL
75 #define TSC_RATIO_MAX           0x000000ffffffffffULL
76
77 static bool erratum_383_found __read_mostly;
78
79 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
80
81 /*
82  * Set osvw_len to higher value when updated Revision Guides
83  * are published and we know what the new status bits are
84  */
85 static uint64_t osvw_len = 4, osvw_status;
86
87 static DEFINE_PER_CPU(u64, current_tsc_ratio);
88 #define TSC_RATIO_DEFAULT       0x0100000000ULL
89
90 static const struct svm_direct_access_msrs {
91         u32 index;   /* Index of the MSR */
92         bool always; /* True if intercept is always on */
93 } direct_access_msrs[] = {
94         { .index = MSR_STAR,                            .always = true  },
95         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
96 #ifdef CONFIG_X86_64
97         { .index = MSR_GS_BASE,                         .always = true  },
98         { .index = MSR_FS_BASE,                         .always = true  },
99         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
100         { .index = MSR_LSTAR,                           .always = true  },
101         { .index = MSR_CSTAR,                           .always = true  },
102         { .index = MSR_SYSCALL_MASK,                    .always = true  },
103 #endif
104         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
105         { .index = MSR_IA32_PRED_CMD,                   .always = false },
106         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
107         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
108         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
109         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
110         { .index = MSR_INVALID,                         .always = false },
111 };
112
113 /* enable NPT for AMD64 and X86 with PAE */
114 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
115 bool npt_enabled = true;
116 #else
117 bool npt_enabled;
118 #endif
119
120 /*
121  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
122  * pause_filter_count: On processors that support Pause filtering(indicated
123  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
124  *      count value. On VMRUN this value is loaded into an internal counter.
125  *      Each time a pause instruction is executed, this counter is decremented
126  *      until it reaches zero at which time a #VMEXIT is generated if pause
127  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
128  *      Intercept Filtering for more details.
129  *      This also indicate if ple logic enabled.
130  *
131  * pause_filter_thresh: In addition, some processor families support advanced
132  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
133  *      the amount of time a guest is allowed to execute in a pause loop.
134  *      In this mode, a 16-bit pause filter threshold field is added in the
135  *      VMCB. The threshold value is a cycle count that is used to reset the
136  *      pause counter. As with simple pause filtering, VMRUN loads the pause
137  *      count value from VMCB into an internal counter. Then, on each pause
138  *      instruction the hardware checks the elapsed number of cycles since
139  *      the most recent pause instruction against the pause filter threshold.
140  *      If the elapsed cycle count is greater than the pause filter threshold,
141  *      then the internal pause count is reloaded from the VMCB and execution
142  *      continues. If the elapsed cycle count is less than the pause filter
143  *      threshold, then the internal pause count is decremented. If the count
144  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
145  *      triggered. If advanced pause filtering is supported and pause filter
146  *      threshold field is set to zero, the filter will operate in the simpler,
147  *      count only mode.
148  */
149
150 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
151 module_param(pause_filter_thresh, ushort, 0444);
152
153 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
154 module_param(pause_filter_count, ushort, 0444);
155
156 /* Default doubles per-vcpu window every exit. */
157 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
158 module_param(pause_filter_count_grow, ushort, 0444);
159
160 /* Default resets per-vcpu window every exit to pause_filter_count. */
161 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
162 module_param(pause_filter_count_shrink, ushort, 0444);
163
164 /* Default is to compute the maximum so we can never overflow. */
165 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
166 module_param(pause_filter_count_max, ushort, 0444);
167
168 /* allow nested paging (virtualized MMU) for all guests */
169 static int npt = true;
170 module_param(npt, int, S_IRUGO);
171
172 /* allow nested virtualization in KVM/SVM */
173 static int nested = true;
174 module_param(nested, int, S_IRUGO);
175
176 /* enable/disable Next RIP Save */
177 static int nrips = true;
178 module_param(nrips, int, 0444);
179
180 /* enable/disable Virtual VMLOAD VMSAVE */
181 static int vls = true;
182 module_param(vls, int, 0444);
183
184 /* enable/disable Virtual GIF */
185 static int vgif = true;
186 module_param(vgif, int, 0444);
187
188 /* enable/disable SEV support */
189 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
190 module_param(sev, int, 0444);
191
192 static bool __read_mostly dump_invalid_vmcb = 0;
193 module_param(dump_invalid_vmcb, bool, 0644);
194
195 static u8 rsm_ins_bytes[] = "\x0f\xaa";
196
197 static void svm_complete_interrupts(struct vcpu_svm *svm);
198
199 static unsigned long iopm_base;
200
201 struct kvm_ldttss_desc {
202         u16 limit0;
203         u16 base0;
204         unsigned base1:8, type:5, dpl:2, p:1;
205         unsigned limit1:4, zero0:3, g:1, base2:8;
206         u32 base3;
207         u32 zero1;
208 } __attribute__((packed));
209
210 DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
211
212 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
213
214 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
215 #define MSRS_RANGE_SIZE 2048
216 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
217
218 u32 svm_msrpm_offset(u32 msr)
219 {
220         u32 offset;
221         int i;
222
223         for (i = 0; i < NUM_MSR_MAPS; i++) {
224                 if (msr < msrpm_ranges[i] ||
225                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
226                         continue;
227
228                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
229                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
230
231                 /* Now we have the u8 offset - but need the u32 offset */
232                 return offset / 4;
233         }
234
235         /* MSR not in any range */
236         return MSR_INVALID;
237 }
238
239 #define MAX_INST_SIZE 15
240
241 static inline void clgi(void)
242 {
243         asm volatile (__ex("clgi"));
244 }
245
246 static inline void stgi(void)
247 {
248         asm volatile (__ex("stgi"));
249 }
250
251 static inline void invlpga(unsigned long addr, u32 asid)
252 {
253         asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
254 }
255
256 static int get_npt_level(struct kvm_vcpu *vcpu)
257 {
258 #ifdef CONFIG_X86_64
259         return PT64_ROOT_4LEVEL;
260 #else
261         return PT32E_ROOT_LEVEL;
262 #endif
263 }
264
265 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
266 {
267         vcpu->arch.efer = efer;
268
269         if (!npt_enabled) {
270                 /* Shadow paging assumes NX to be available.  */
271                 efer |= EFER_NX;
272
273                 if (!(efer & EFER_LMA))
274                         efer &= ~EFER_LME;
275         }
276
277         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
278         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
279 }
280
281 static int is_external_interrupt(u32 info)
282 {
283         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
284         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
285 }
286
287 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
288 {
289         struct vcpu_svm *svm = to_svm(vcpu);
290         u32 ret = 0;
291
292         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
293                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
294         return ret;
295 }
296
297 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
298 {
299         struct vcpu_svm *svm = to_svm(vcpu);
300
301         if (mask == 0)
302                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
303         else
304                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
305
306 }
307
308 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
309 {
310         struct vcpu_svm *svm = to_svm(vcpu);
311
312         if (nrips && svm->vmcb->control.next_rip != 0) {
313                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
314                 svm->next_rip = svm->vmcb->control.next_rip;
315         }
316
317         if (!svm->next_rip) {
318                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
319                         return 0;
320         } else {
321                 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
322                         pr_err("%s: ip 0x%lx next 0x%llx\n",
323                                __func__, kvm_rip_read(vcpu), svm->next_rip);
324                 kvm_rip_write(vcpu, svm->next_rip);
325         }
326         svm_set_interrupt_shadow(vcpu, 0);
327
328         return 1;
329 }
330
331 static void svm_queue_exception(struct kvm_vcpu *vcpu)
332 {
333         struct vcpu_svm *svm = to_svm(vcpu);
334         unsigned nr = vcpu->arch.exception.nr;
335         bool has_error_code = vcpu->arch.exception.has_error_code;
336         bool reinject = vcpu->arch.exception.injected;
337         u32 error_code = vcpu->arch.exception.error_code;
338
339         /*
340          * If we are within a nested VM we'd better #VMEXIT and let the guest
341          * handle the exception
342          */
343         if (!reinject &&
344             nested_svm_check_exception(svm, nr, has_error_code, error_code))
345                 return;
346
347         kvm_deliver_exception_payload(&svm->vcpu);
348
349         if (nr == BP_VECTOR && !nrips) {
350                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
351
352                 /*
353                  * For guest debugging where we have to reinject #BP if some
354                  * INT3 is guest-owned:
355                  * Emulate nRIP by moving RIP forward. Will fail if injection
356                  * raises a fault that is not intercepted. Still better than
357                  * failing in all cases.
358                  */
359                 (void)skip_emulated_instruction(&svm->vcpu);
360                 rip = kvm_rip_read(&svm->vcpu);
361                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
362                 svm->int3_injected = rip - old_rip;
363         }
364
365         svm->vmcb->control.event_inj = nr
366                 | SVM_EVTINJ_VALID
367                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
368                 | SVM_EVTINJ_TYPE_EXEPT;
369         svm->vmcb->control.event_inj_err = error_code;
370 }
371
372 static void svm_init_erratum_383(void)
373 {
374         u32 low, high;
375         int err;
376         u64 val;
377
378         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
379                 return;
380
381         /* Use _safe variants to not break nested virtualization */
382         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
383         if (err)
384                 return;
385
386         val |= (1ULL << 47);
387
388         low  = lower_32_bits(val);
389         high = upper_32_bits(val);
390
391         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
392
393         erratum_383_found = true;
394 }
395
396 static void svm_init_osvw(struct kvm_vcpu *vcpu)
397 {
398         /*
399          * Guests should see errata 400 and 415 as fixed (assuming that
400          * HLT and IO instructions are intercepted).
401          */
402         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
403         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
404
405         /*
406          * By increasing VCPU's osvw.length to 3 we are telling the guest that
407          * all osvw.status bits inside that length, including bit 0 (which is
408          * reserved for erratum 298), are valid. However, if host processor's
409          * osvw_len is 0 then osvw_status[0] carries no information. We need to
410          * be conservative here and therefore we tell the guest that erratum 298
411          * is present (because we really don't know).
412          */
413         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
414                 vcpu->arch.osvw.status |= 1;
415 }
416
417 static int has_svm(void)
418 {
419         const char *msg;
420
421         if (!cpu_has_svm(&msg)) {
422                 printk(KERN_INFO "has_svm: %s\n", msg);
423                 return 0;
424         }
425
426         return 1;
427 }
428
429 static void svm_hardware_disable(void)
430 {
431         /* Make sure we clean up behind us */
432         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
433                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
434
435         cpu_svm_disable();
436
437         amd_pmu_disable_virt();
438 }
439
440 static int svm_hardware_enable(void)
441 {
442
443         struct svm_cpu_data *sd;
444         uint64_t efer;
445         struct desc_struct *gdt;
446         int me = raw_smp_processor_id();
447
448         rdmsrl(MSR_EFER, efer);
449         if (efer & EFER_SVME)
450                 return -EBUSY;
451
452         if (!has_svm()) {
453                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
454                 return -EINVAL;
455         }
456         sd = per_cpu(svm_data, me);
457         if (!sd) {
458                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
459                 return -EINVAL;
460         }
461
462         sd->asid_generation = 1;
463         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
464         sd->next_asid = sd->max_asid + 1;
465         sd->min_asid = max_sev_asid + 1;
466
467         gdt = get_current_gdt_rw();
468         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
469
470         wrmsrl(MSR_EFER, efer | EFER_SVME);
471
472         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
473
474         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
475                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
476                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
477         }
478
479
480         /*
481          * Get OSVW bits.
482          *
483          * Note that it is possible to have a system with mixed processor
484          * revisions and therefore different OSVW bits. If bits are not the same
485          * on different processors then choose the worst case (i.e. if erratum
486          * is present on one processor and not on another then assume that the
487          * erratum is present everywhere).
488          */
489         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
490                 uint64_t len, status = 0;
491                 int err;
492
493                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
494                 if (!err)
495                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
496                                                       &err);
497
498                 if (err)
499                         osvw_status = osvw_len = 0;
500                 else {
501                         if (len < osvw_len)
502                                 osvw_len = len;
503                         osvw_status |= status;
504                         osvw_status &= (1ULL << osvw_len) - 1;
505                 }
506         } else
507                 osvw_status = osvw_len = 0;
508
509         svm_init_erratum_383();
510
511         amd_pmu_enable_virt();
512
513         return 0;
514 }
515
516 static void svm_cpu_uninit(int cpu)
517 {
518         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
519
520         if (!sd)
521                 return;
522
523         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
524         kfree(sd->sev_vmcbs);
525         __free_page(sd->save_area);
526         kfree(sd);
527 }
528
529 static int svm_cpu_init(int cpu)
530 {
531         struct svm_cpu_data *sd;
532
533         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
534         if (!sd)
535                 return -ENOMEM;
536         sd->cpu = cpu;
537         sd->save_area = alloc_page(GFP_KERNEL);
538         if (!sd->save_area)
539                 goto free_cpu_data;
540
541         if (svm_sev_enabled()) {
542                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
543                                               sizeof(void *),
544                                               GFP_KERNEL);
545                 if (!sd->sev_vmcbs)
546                         goto free_save_area;
547         }
548
549         per_cpu(svm_data, cpu) = sd;
550
551         return 0;
552
553 free_save_area:
554         __free_page(sd->save_area);
555 free_cpu_data:
556         kfree(sd);
557         return -ENOMEM;
558
559 }
560
561 static bool valid_msr_intercept(u32 index)
562 {
563         int i;
564
565         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
566                 if (direct_access_msrs[i].index == index)
567                         return true;
568
569         return false;
570 }
571
572 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
573 {
574         u8 bit_write;
575         unsigned long tmp;
576         u32 offset;
577         u32 *msrpm;
578
579         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
580                                       to_svm(vcpu)->msrpm;
581
582         offset    = svm_msrpm_offset(msr);
583         bit_write = 2 * (msr & 0x0f) + 1;
584         tmp       = msrpm[offset];
585
586         BUG_ON(offset == MSR_INVALID);
587
588         return !!test_bit(bit_write,  &tmp);
589 }
590
591 static void set_msr_interception(u32 *msrpm, unsigned msr,
592                                  int read, int write)
593 {
594         u8 bit_read, bit_write;
595         unsigned long tmp;
596         u32 offset;
597
598         /*
599          * If this warning triggers extend the direct_access_msrs list at the
600          * beginning of the file
601          */
602         WARN_ON(!valid_msr_intercept(msr));
603
604         offset    = svm_msrpm_offset(msr);
605         bit_read  = 2 * (msr & 0x0f);
606         bit_write = 2 * (msr & 0x0f) + 1;
607         tmp       = msrpm[offset];
608
609         BUG_ON(offset == MSR_INVALID);
610
611         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
612         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
613
614         msrpm[offset] = tmp;
615 }
616
617 static void svm_vcpu_init_msrpm(u32 *msrpm)
618 {
619         int i;
620
621         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
622
623         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
624                 if (!direct_access_msrs[i].always)
625                         continue;
626
627                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
628         }
629 }
630
631 static void add_msr_offset(u32 offset)
632 {
633         int i;
634
635         for (i = 0; i < MSRPM_OFFSETS; ++i) {
636
637                 /* Offset already in list? */
638                 if (msrpm_offsets[i] == offset)
639                         return;
640
641                 /* Slot used by another offset? */
642                 if (msrpm_offsets[i] != MSR_INVALID)
643                         continue;
644
645                 /* Add offset to list */
646                 msrpm_offsets[i] = offset;
647
648                 return;
649         }
650
651         /*
652          * If this BUG triggers the msrpm_offsets table has an overflow. Just
653          * increase MSRPM_OFFSETS in this case.
654          */
655         BUG();
656 }
657
658 static void init_msrpm_offsets(void)
659 {
660         int i;
661
662         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
663
664         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
665                 u32 offset;
666
667                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
668                 BUG_ON(offset == MSR_INVALID);
669
670                 add_msr_offset(offset);
671         }
672 }
673
674 static void svm_enable_lbrv(struct vcpu_svm *svm)
675 {
676         u32 *msrpm = svm->msrpm;
677
678         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
679         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
680         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
681         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
682         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
683 }
684
685 static void svm_disable_lbrv(struct vcpu_svm *svm)
686 {
687         u32 *msrpm = svm->msrpm;
688
689         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
690         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
691         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
692         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
693         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
694 }
695
696 void disable_nmi_singlestep(struct vcpu_svm *svm)
697 {
698         svm->nmi_singlestep = false;
699
700         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
701                 /* Clear our flags if they were not set by the guest */
702                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
703                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
704                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
705                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
706         }
707 }
708
709 static void grow_ple_window(struct kvm_vcpu *vcpu)
710 {
711         struct vcpu_svm *svm = to_svm(vcpu);
712         struct vmcb_control_area *control = &svm->vmcb->control;
713         int old = control->pause_filter_count;
714
715         control->pause_filter_count = __grow_ple_window(old,
716                                                         pause_filter_count,
717                                                         pause_filter_count_grow,
718                                                         pause_filter_count_max);
719
720         if (control->pause_filter_count != old) {
721                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
722                 trace_kvm_ple_window_update(vcpu->vcpu_id,
723                                             control->pause_filter_count, old);
724         }
725 }
726
727 static void shrink_ple_window(struct kvm_vcpu *vcpu)
728 {
729         struct vcpu_svm *svm = to_svm(vcpu);
730         struct vmcb_control_area *control = &svm->vmcb->control;
731         int old = control->pause_filter_count;
732
733         control->pause_filter_count =
734                                 __shrink_ple_window(old,
735                                                     pause_filter_count,
736                                                     pause_filter_count_shrink,
737                                                     pause_filter_count);
738         if (control->pause_filter_count != old) {
739                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
740                 trace_kvm_ple_window_update(vcpu->vcpu_id,
741                                             control->pause_filter_count, old);
742         }
743 }
744
745 /*
746  * The default MMIO mask is a single bit (excluding the present bit),
747  * which could conflict with the memory encryption bit. Check for
748  * memory encryption support and override the default MMIO mask if
749  * memory encryption is enabled.
750  */
751 static __init void svm_adjust_mmio_mask(void)
752 {
753         unsigned int enc_bit, mask_bit;
754         u64 msr, mask;
755
756         /* If there is no memory encryption support, use existing mask */
757         if (cpuid_eax(0x80000000) < 0x8000001f)
758                 return;
759
760         /* If memory encryption is not enabled, use existing mask */
761         rdmsrl(MSR_K8_SYSCFG, msr);
762         if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
763                 return;
764
765         enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
766         mask_bit = boot_cpu_data.x86_phys_bits;
767
768         /* Increment the mask bit if it is the same as the encryption bit */
769         if (enc_bit == mask_bit)
770                 mask_bit++;
771
772         /*
773          * If the mask bit location is below 52, then some bits above the
774          * physical addressing limit will always be reserved, so use the
775          * rsvd_bits() function to generate the mask. This mask, along with
776          * the present bit, will be used to generate a page fault with
777          * PFER.RSV = 1.
778          *
779          * If the mask bit location is 52 (or above), then clear the mask.
780          */
781         mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
782
783         kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
784 }
785
786 static void svm_hardware_teardown(void)
787 {
788         int cpu;
789
790         if (svm_sev_enabled())
791                 sev_hardware_teardown();
792
793         for_each_possible_cpu(cpu)
794                 svm_cpu_uninit(cpu);
795
796         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
797         iopm_base = 0;
798 }
799
800 static __init void svm_set_cpu_caps(void)
801 {
802         kvm_set_cpu_caps();
803
804         supported_xss = 0;
805
806         /* CPUID 0x80000001 and 0x8000000A (SVM features) */
807         if (nested) {
808                 kvm_cpu_cap_set(X86_FEATURE_SVM);
809
810                 if (nrips)
811                         kvm_cpu_cap_set(X86_FEATURE_NRIPS);
812
813                 if (npt_enabled)
814                         kvm_cpu_cap_set(X86_FEATURE_NPT);
815         }
816
817         /* CPUID 0x80000008 */
818         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
819             boot_cpu_has(X86_FEATURE_AMD_SSBD))
820                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
821 }
822
823 static __init int svm_hardware_setup(void)
824 {
825         int cpu;
826         struct page *iopm_pages;
827         void *iopm_va;
828         int r;
829
830         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
831
832         if (!iopm_pages)
833                 return -ENOMEM;
834
835         iopm_va = page_address(iopm_pages);
836         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
837         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
838
839         init_msrpm_offsets();
840
841         supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
842
843         if (boot_cpu_has(X86_FEATURE_NX))
844                 kvm_enable_efer_bits(EFER_NX);
845
846         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
847                 kvm_enable_efer_bits(EFER_FFXSR);
848
849         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
850                 kvm_has_tsc_control = true;
851                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
852                 kvm_tsc_scaling_ratio_frac_bits = 32;
853         }
854
855         /* Check for pause filtering support */
856         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
857                 pause_filter_count = 0;
858                 pause_filter_thresh = 0;
859         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
860                 pause_filter_thresh = 0;
861         }
862
863         if (nested) {
864                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
865                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
866         }
867
868         if (sev) {
869                 if (boot_cpu_has(X86_FEATURE_SEV) &&
870                     IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
871                         r = sev_hardware_setup();
872                         if (r)
873                                 sev = false;
874                 } else {
875                         sev = false;
876                 }
877         }
878
879         svm_adjust_mmio_mask();
880
881         for_each_possible_cpu(cpu) {
882                 r = svm_cpu_init(cpu);
883                 if (r)
884                         goto err;
885         }
886
887         if (!boot_cpu_has(X86_FEATURE_NPT))
888                 npt_enabled = false;
889
890         if (npt_enabled && !npt)
891                 npt_enabled = false;
892
893         kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
894         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
895
896         if (nrips) {
897                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
898                         nrips = false;
899         }
900
901         if (avic) {
902                 if (!npt_enabled ||
903                     !boot_cpu_has(X86_FEATURE_AVIC) ||
904                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
905                         avic = false;
906                 } else {
907                         pr_info("AVIC enabled\n");
908
909                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
910                 }
911         }
912
913         if (vls) {
914                 if (!npt_enabled ||
915                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
916                     !IS_ENABLED(CONFIG_X86_64)) {
917                         vls = false;
918                 } else {
919                         pr_info("Virtual VMLOAD VMSAVE supported\n");
920                 }
921         }
922
923         if (vgif) {
924                 if (!boot_cpu_has(X86_FEATURE_VGIF))
925                         vgif = false;
926                 else
927                         pr_info("Virtual GIF supported\n");
928         }
929
930         svm_set_cpu_caps();
931
932         return 0;
933
934 err:
935         svm_hardware_teardown();
936         return r;
937 }
938
939 static void init_seg(struct vmcb_seg *seg)
940 {
941         seg->selector = 0;
942         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
943                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
944         seg->limit = 0xffff;
945         seg->base = 0;
946 }
947
948 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
949 {
950         seg->selector = 0;
951         seg->attrib = SVM_SELECTOR_P_MASK | type;
952         seg->limit = 0xffff;
953         seg->base = 0;
954 }
955
956 static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
957 {
958         struct vcpu_svm *svm = to_svm(vcpu);
959
960         if (is_guest_mode(vcpu))
961                 return svm->nested.hsave->control.tsc_offset;
962
963         return vcpu->arch.tsc_offset;
964 }
965
966 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
967 {
968         struct vcpu_svm *svm = to_svm(vcpu);
969         u64 g_tsc_offset = 0;
970
971         if (is_guest_mode(vcpu)) {
972                 /* Write L1's TSC offset.  */
973                 g_tsc_offset = svm->vmcb->control.tsc_offset -
974                                svm->nested.hsave->control.tsc_offset;
975                 svm->nested.hsave->control.tsc_offset = offset;
976         }
977
978         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
979                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
980                                    offset);
981
982         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
983
984         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
985         return svm->vmcb->control.tsc_offset;
986 }
987
988 static void init_vmcb(struct vcpu_svm *svm)
989 {
990         struct vmcb_control_area *control = &svm->vmcb->control;
991         struct vmcb_save_area *save = &svm->vmcb->save;
992
993         svm->vcpu.arch.hflags = 0;
994
995         set_cr_intercept(svm, INTERCEPT_CR0_READ);
996         set_cr_intercept(svm, INTERCEPT_CR3_READ);
997         set_cr_intercept(svm, INTERCEPT_CR4_READ);
998         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
999         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1000         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1001         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1002                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1003
1004         set_dr_intercepts(svm);
1005
1006         set_exception_intercept(svm, PF_VECTOR);
1007         set_exception_intercept(svm, UD_VECTOR);
1008         set_exception_intercept(svm, MC_VECTOR);
1009         set_exception_intercept(svm, AC_VECTOR);
1010         set_exception_intercept(svm, DB_VECTOR);
1011         /*
1012          * Guest access to VMware backdoor ports could legitimately
1013          * trigger #GP because of TSS I/O permission bitmap.
1014          * We intercept those #GP and allow access to them anyway
1015          * as VMware does.
1016          */
1017         if (enable_vmware_backdoor)
1018                 set_exception_intercept(svm, GP_VECTOR);
1019
1020         set_intercept(svm, INTERCEPT_INTR);
1021         set_intercept(svm, INTERCEPT_NMI);
1022         set_intercept(svm, INTERCEPT_SMI);
1023         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1024         set_intercept(svm, INTERCEPT_RDPMC);
1025         set_intercept(svm, INTERCEPT_CPUID);
1026         set_intercept(svm, INTERCEPT_INVD);
1027         set_intercept(svm, INTERCEPT_INVLPG);
1028         set_intercept(svm, INTERCEPT_INVLPGA);
1029         set_intercept(svm, INTERCEPT_IOIO_PROT);
1030         set_intercept(svm, INTERCEPT_MSR_PROT);
1031         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1032         set_intercept(svm, INTERCEPT_SHUTDOWN);
1033         set_intercept(svm, INTERCEPT_VMRUN);
1034         set_intercept(svm, INTERCEPT_VMMCALL);
1035         set_intercept(svm, INTERCEPT_VMLOAD);
1036         set_intercept(svm, INTERCEPT_VMSAVE);
1037         set_intercept(svm, INTERCEPT_STGI);
1038         set_intercept(svm, INTERCEPT_CLGI);
1039         set_intercept(svm, INTERCEPT_SKINIT);
1040         set_intercept(svm, INTERCEPT_WBINVD);
1041         set_intercept(svm, INTERCEPT_XSETBV);
1042         set_intercept(svm, INTERCEPT_RDPRU);
1043         set_intercept(svm, INTERCEPT_RSM);
1044
1045         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1046                 set_intercept(svm, INTERCEPT_MONITOR);
1047                 set_intercept(svm, INTERCEPT_MWAIT);
1048         }
1049
1050         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1051                 set_intercept(svm, INTERCEPT_HLT);
1052
1053         control->iopm_base_pa = __sme_set(iopm_base);
1054         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1055         control->int_ctl = V_INTR_MASKING_MASK;
1056
1057         init_seg(&save->es);
1058         init_seg(&save->ss);
1059         init_seg(&save->ds);
1060         init_seg(&save->fs);
1061         init_seg(&save->gs);
1062
1063         save->cs.selector = 0xf000;
1064         save->cs.base = 0xffff0000;
1065         /* Executable/Readable Code Segment */
1066         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1067                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1068         save->cs.limit = 0xffff;
1069
1070         save->gdtr.limit = 0xffff;
1071         save->idtr.limit = 0xffff;
1072
1073         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1074         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1075
1076         svm_set_efer(&svm->vcpu, 0);
1077         save->dr6 = 0xffff0ff0;
1078         kvm_set_rflags(&svm->vcpu, 2);
1079         save->rip = 0x0000fff0;
1080         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1081
1082         /*
1083          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1084          * It also updates the guest-visible cr0 value.
1085          */
1086         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1087         kvm_mmu_reset_context(&svm->vcpu);
1088
1089         save->cr4 = X86_CR4_PAE;
1090         /* rdx = ?? */
1091
1092         if (npt_enabled) {
1093                 /* Setup VMCB for Nested Paging */
1094                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1095                 clr_intercept(svm, INTERCEPT_INVLPG);
1096                 clr_exception_intercept(svm, PF_VECTOR);
1097                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1098                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1099                 save->g_pat = svm->vcpu.arch.pat;
1100                 save->cr3 = 0;
1101                 save->cr4 = 0;
1102         }
1103         svm->asid_generation = 0;
1104
1105         svm->nested.vmcb = 0;
1106         svm->vcpu.arch.hflags = 0;
1107
1108         if (pause_filter_count) {
1109                 control->pause_filter_count = pause_filter_count;
1110                 if (pause_filter_thresh)
1111                         control->pause_filter_thresh = pause_filter_thresh;
1112                 set_intercept(svm, INTERCEPT_PAUSE);
1113         } else {
1114                 clr_intercept(svm, INTERCEPT_PAUSE);
1115         }
1116
1117         if (kvm_vcpu_apicv_active(&svm->vcpu))
1118                 avic_init_vmcb(svm);
1119
1120         /*
1121          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1122          * in VMCB and clear intercepts to avoid #VMEXIT.
1123          */
1124         if (vls) {
1125                 clr_intercept(svm, INTERCEPT_VMLOAD);
1126                 clr_intercept(svm, INTERCEPT_VMSAVE);
1127                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1128         }
1129
1130         if (vgif) {
1131                 clr_intercept(svm, INTERCEPT_STGI);
1132                 clr_intercept(svm, INTERCEPT_CLGI);
1133                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1134         }
1135
1136         if (sev_guest(svm->vcpu.kvm)) {
1137                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1138                 clr_exception_intercept(svm, UD_VECTOR);
1139         }
1140
1141         mark_all_dirty(svm->vmcb);
1142
1143         enable_gif(svm);
1144
1145 }
1146
1147 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1148 {
1149         struct vcpu_svm *svm = to_svm(vcpu);
1150         u32 dummy;
1151         u32 eax = 1;
1152
1153         svm->spec_ctrl = 0;
1154         svm->virt_spec_ctrl = 0;
1155
1156         if (!init_event) {
1157                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1158                                            MSR_IA32_APICBASE_ENABLE;
1159                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1160                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1161         }
1162         init_vmcb(svm);
1163
1164         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
1165         kvm_rdx_write(vcpu, eax);
1166
1167         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1168                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
1169 }
1170
1171 static int svm_create_vcpu(struct kvm_vcpu *vcpu)
1172 {
1173         struct vcpu_svm *svm;
1174         struct page *page;
1175         struct page *msrpm_pages;
1176         struct page *hsave_page;
1177         struct page *nested_msrpm_pages;
1178         int err;
1179
1180         BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1181         svm = to_svm(vcpu);
1182
1183         err = -ENOMEM;
1184         page = alloc_page(GFP_KERNEL_ACCOUNT);
1185         if (!page)
1186                 goto out;
1187
1188         msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
1189         if (!msrpm_pages)
1190                 goto free_page1;
1191
1192         nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
1193         if (!nested_msrpm_pages)
1194                 goto free_page2;
1195
1196         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
1197         if (!hsave_page)
1198                 goto free_page3;
1199
1200         err = avic_init_vcpu(svm);
1201         if (err)
1202                 goto free_page4;
1203
1204         /* We initialize this flag to true to make sure that the is_running
1205          * bit would be set the first time the vcpu is loaded.
1206          */
1207         if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
1208                 svm->avic_is_running = true;
1209
1210         svm->nested.hsave = page_address(hsave_page);
1211
1212         svm->msrpm = page_address(msrpm_pages);
1213         svm_vcpu_init_msrpm(svm->msrpm);
1214
1215         svm->nested.msrpm = page_address(nested_msrpm_pages);
1216         svm_vcpu_init_msrpm(svm->nested.msrpm);
1217
1218         svm->vmcb = page_address(page);
1219         clear_page(svm->vmcb);
1220         svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
1221         svm->asid_generation = 0;
1222         init_vmcb(svm);
1223
1224         svm_init_osvw(vcpu);
1225         vcpu->arch.microcode_version = 0x01000065;
1226
1227         return 0;
1228
1229 free_page4:
1230         __free_page(hsave_page);
1231 free_page3:
1232         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1233 free_page2:
1234         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1235 free_page1:
1236         __free_page(page);
1237 out:
1238         return err;
1239 }
1240
1241 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1242 {
1243         int i;
1244
1245         for_each_online_cpu(i)
1246                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1247 }
1248
1249 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1250 {
1251         struct vcpu_svm *svm = to_svm(vcpu);
1252
1253         /*
1254          * The vmcb page can be recycled, causing a false negative in
1255          * svm_vcpu_load(). So, ensure that no logical CPU has this
1256          * vmcb page recorded as its current vmcb.
1257          */
1258         svm_clear_current_vmcb(svm->vmcb);
1259
1260         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
1261         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1262         __free_page(virt_to_page(svm->nested.hsave));
1263         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1264 }
1265
1266 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1267 {
1268         struct vcpu_svm *svm = to_svm(vcpu);
1269         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1270         int i;
1271
1272         if (unlikely(cpu != vcpu->cpu)) {
1273                 svm->asid_generation = 0;
1274                 mark_all_dirty(svm->vmcb);
1275         }
1276
1277 #ifdef CONFIG_X86_64
1278         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1279 #endif
1280         savesegment(fs, svm->host.fs);
1281         savesegment(gs, svm->host.gs);
1282         svm->host.ldt = kvm_read_ldt();
1283
1284         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1285                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1286
1287         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1288                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1289                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1290                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
1291                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1292                 }
1293         }
1294         /* This assumes that the kernel never uses MSR_TSC_AUX */
1295         if (static_cpu_has(X86_FEATURE_RDTSCP))
1296                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
1297
1298         if (sd->current_vmcb != svm->vmcb) {
1299                 sd->current_vmcb = svm->vmcb;
1300                 indirect_branch_prediction_barrier();
1301         }
1302         avic_vcpu_load(vcpu, cpu);
1303 }
1304
1305 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1306 {
1307         struct vcpu_svm *svm = to_svm(vcpu);
1308         int i;
1309
1310         avic_vcpu_put(vcpu);
1311
1312         ++vcpu->stat.host_state_reload;
1313         kvm_load_ldt(svm->host.ldt);
1314 #ifdef CONFIG_X86_64
1315         loadsegment(fs, svm->host.fs);
1316         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
1317         load_gs_index(svm->host.gs);
1318 #else
1319 #ifdef CONFIG_X86_32_LAZY_GS
1320         loadsegment(gs, svm->host.gs);
1321 #endif
1322 #endif
1323         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1324                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1325 }
1326
1327 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1328 {
1329         struct vcpu_svm *svm = to_svm(vcpu);
1330         unsigned long rflags = svm->vmcb->save.rflags;
1331
1332         if (svm->nmi_singlestep) {
1333                 /* Hide our flags if they were not set by the guest */
1334                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1335                         rflags &= ~X86_EFLAGS_TF;
1336                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1337                         rflags &= ~X86_EFLAGS_RF;
1338         }
1339         return rflags;
1340 }
1341
1342 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1343 {
1344         if (to_svm(vcpu)->nmi_singlestep)
1345                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1346
1347        /*
1348         * Any change of EFLAGS.VM is accompanied by a reload of SS
1349         * (caused by either a task switch or an inter-privilege IRET),
1350         * so we do not need to update the CPL here.
1351         */
1352         to_svm(vcpu)->vmcb->save.rflags = rflags;
1353 }
1354
1355 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1356 {
1357         switch (reg) {
1358         case VCPU_EXREG_PDPTR:
1359                 BUG_ON(!npt_enabled);
1360                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1361                 break;
1362         default:
1363                 WARN_ON_ONCE(1);
1364         }
1365 }
1366
1367 static inline void svm_enable_vintr(struct vcpu_svm *svm)
1368 {
1369         struct vmcb_control_area *control;
1370
1371         /* The following fields are ignored when AVIC is enabled */
1372         WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
1373
1374         /*
1375          * This is just a dummy VINTR to actually cause a vmexit to happen.
1376          * Actual injection of virtual interrupts happens through EVENTINJ.
1377          */
1378         control = &svm->vmcb->control;
1379         control->int_vector = 0x0;
1380         control->int_ctl &= ~V_INTR_PRIO_MASK;
1381         control->int_ctl |= V_IRQ_MASK |
1382                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1383         mark_dirty(svm->vmcb, VMCB_INTR);
1384 }
1385
1386 static void svm_set_vintr(struct vcpu_svm *svm)
1387 {
1388         set_intercept(svm, INTERCEPT_VINTR);
1389         if (is_intercept(svm, INTERCEPT_VINTR))
1390                 svm_enable_vintr(svm);
1391 }
1392
1393 static void svm_clear_vintr(struct vcpu_svm *svm)
1394 {
1395         clr_intercept(svm, INTERCEPT_VINTR);
1396
1397         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1398         mark_dirty(svm->vmcb, VMCB_INTR);
1399 }
1400
1401 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1402 {
1403         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1404
1405         switch (seg) {
1406         case VCPU_SREG_CS: return &save->cs;
1407         case VCPU_SREG_DS: return &save->ds;
1408         case VCPU_SREG_ES: return &save->es;
1409         case VCPU_SREG_FS: return &save->fs;
1410         case VCPU_SREG_GS: return &save->gs;
1411         case VCPU_SREG_SS: return &save->ss;
1412         case VCPU_SREG_TR: return &save->tr;
1413         case VCPU_SREG_LDTR: return &save->ldtr;
1414         }
1415         BUG();
1416         return NULL;
1417 }
1418
1419 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1420 {
1421         struct vmcb_seg *s = svm_seg(vcpu, seg);
1422
1423         return s->base;
1424 }
1425
1426 static void svm_get_segment(struct kvm_vcpu *vcpu,
1427                             struct kvm_segment *var, int seg)
1428 {
1429         struct vmcb_seg *s = svm_seg(vcpu, seg);
1430
1431         var->base = s->base;
1432         var->limit = s->limit;
1433         var->selector = s->selector;
1434         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1435         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1436         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1437         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1438         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1439         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1440         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1441
1442         /*
1443          * AMD CPUs circa 2014 track the G bit for all segments except CS.
1444          * However, the SVM spec states that the G bit is not observed by the
1445          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1446          * So let's synthesize a legal G bit for all segments, this helps
1447          * running KVM nested. It also helps cross-vendor migration, because
1448          * Intel's vmentry has a check on the 'G' bit.
1449          */
1450         var->g = s->limit > 0xfffff;
1451
1452         /*
1453          * AMD's VMCB does not have an explicit unusable field, so emulate it
1454          * for cross vendor migration purposes by "not present"
1455          */
1456         var->unusable = !var->present;
1457
1458         switch (seg) {
1459         case VCPU_SREG_TR:
1460                 /*
1461                  * Work around a bug where the busy flag in the tr selector
1462                  * isn't exposed
1463                  */
1464                 var->type |= 0x2;
1465                 break;
1466         case VCPU_SREG_DS:
1467         case VCPU_SREG_ES:
1468         case VCPU_SREG_FS:
1469         case VCPU_SREG_GS:
1470                 /*
1471                  * The accessed bit must always be set in the segment
1472                  * descriptor cache, although it can be cleared in the
1473                  * descriptor, the cached bit always remains at 1. Since
1474                  * Intel has a check on this, set it here to support
1475                  * cross-vendor migration.
1476                  */
1477                 if (!var->unusable)
1478                         var->type |= 0x1;
1479                 break;
1480         case VCPU_SREG_SS:
1481                 /*
1482                  * On AMD CPUs sometimes the DB bit in the segment
1483                  * descriptor is left as 1, although the whole segment has
1484                  * been made unusable. Clear it here to pass an Intel VMX
1485                  * entry check when cross vendor migrating.
1486                  */
1487                 if (var->unusable)
1488                         var->db = 0;
1489                 /* This is symmetric with svm_set_segment() */
1490                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1491                 break;
1492         }
1493 }
1494
1495 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1496 {
1497         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1498
1499         return save->cpl;
1500 }
1501
1502 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1503 {
1504         struct vcpu_svm *svm = to_svm(vcpu);
1505
1506         dt->size = svm->vmcb->save.idtr.limit;
1507         dt->address = svm->vmcb->save.idtr.base;
1508 }
1509
1510 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1511 {
1512         struct vcpu_svm *svm = to_svm(vcpu);
1513
1514         svm->vmcb->save.idtr.limit = dt->size;
1515         svm->vmcb->save.idtr.base = dt->address ;
1516         mark_dirty(svm->vmcb, VMCB_DT);
1517 }
1518
1519 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1520 {
1521         struct vcpu_svm *svm = to_svm(vcpu);
1522
1523         dt->size = svm->vmcb->save.gdtr.limit;
1524         dt->address = svm->vmcb->save.gdtr.base;
1525 }
1526
1527 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1528 {
1529         struct vcpu_svm *svm = to_svm(vcpu);
1530
1531         svm->vmcb->save.gdtr.limit = dt->size;
1532         svm->vmcb->save.gdtr.base = dt->address ;
1533         mark_dirty(svm->vmcb, VMCB_DT);
1534 }
1535
1536 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1537 {
1538 }
1539
1540 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1541 {
1542 }
1543
1544 static void update_cr0_intercept(struct vcpu_svm *svm)
1545 {
1546         ulong gcr0 = svm->vcpu.arch.cr0;
1547         u64 *hcr0 = &svm->vmcb->save.cr0;
1548
1549         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1550                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1551
1552         mark_dirty(svm->vmcb, VMCB_CR);
1553
1554         if (gcr0 == *hcr0) {
1555                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1556                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1557         } else {
1558                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1559                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1560         }
1561 }
1562
1563 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1564 {
1565         struct vcpu_svm *svm = to_svm(vcpu);
1566
1567 #ifdef CONFIG_X86_64
1568         if (vcpu->arch.efer & EFER_LME) {
1569                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1570                         vcpu->arch.efer |= EFER_LMA;
1571                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1572                 }
1573
1574                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1575                         vcpu->arch.efer &= ~EFER_LMA;
1576                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1577                 }
1578         }
1579 #endif
1580         vcpu->arch.cr0 = cr0;
1581
1582         if (!npt_enabled)
1583                 cr0 |= X86_CR0_PG | X86_CR0_WP;
1584
1585         /*
1586          * re-enable caching here because the QEMU bios
1587          * does not do it - this results in some delay at
1588          * reboot
1589          */
1590         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1591                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1592         svm->vmcb->save.cr0 = cr0;
1593         mark_dirty(svm->vmcb, VMCB_CR);
1594         update_cr0_intercept(svm);
1595 }
1596
1597 int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1598 {
1599         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1600         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1601
1602         if (cr4 & X86_CR4_VMXE)
1603                 return 1;
1604
1605         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1606                 svm_flush_tlb(vcpu, true);
1607
1608         vcpu->arch.cr4 = cr4;
1609         if (!npt_enabled)
1610                 cr4 |= X86_CR4_PAE;
1611         cr4 |= host_cr4_mce;
1612         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1613         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1614         return 0;
1615 }
1616
1617 static void svm_set_segment(struct kvm_vcpu *vcpu,
1618                             struct kvm_segment *var, int seg)
1619 {
1620         struct vcpu_svm *svm = to_svm(vcpu);
1621         struct vmcb_seg *s = svm_seg(vcpu, seg);
1622
1623         s->base = var->base;
1624         s->limit = var->limit;
1625         s->selector = var->selector;
1626         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1627         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1628         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1629         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1630         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1631         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1632         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1633         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1634
1635         /*
1636          * This is always accurate, except if SYSRET returned to a segment
1637          * with SS.DPL != 3.  Intel does not have this quirk, and always
1638          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1639          * would entail passing the CPL to userspace and back.
1640          */
1641         if (seg == VCPU_SREG_SS)
1642                 /* This is symmetric with svm_get_segment() */
1643                 svm->vmcb->save.cpl = (var->dpl & 3);
1644
1645         mark_dirty(svm->vmcb, VMCB_SEG);
1646 }
1647
1648 static void update_bp_intercept(struct kvm_vcpu *vcpu)
1649 {
1650         struct vcpu_svm *svm = to_svm(vcpu);
1651
1652         clr_exception_intercept(svm, BP_VECTOR);
1653
1654         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1655                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1656                         set_exception_intercept(svm, BP_VECTOR);
1657         } else
1658                 vcpu->guest_debug = 0;
1659 }
1660
1661 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1662 {
1663         if (sd->next_asid > sd->max_asid) {
1664                 ++sd->asid_generation;
1665                 sd->next_asid = sd->min_asid;
1666                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1667         }
1668
1669         svm->asid_generation = sd->asid_generation;
1670         svm->vmcb->control.asid = sd->next_asid++;
1671
1672         mark_dirty(svm->vmcb, VMCB_ASID);
1673 }
1674
1675 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1676 {
1677         return to_svm(vcpu)->vmcb->save.dr6;
1678 }
1679
1680 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1681 {
1682         struct vcpu_svm *svm = to_svm(vcpu);
1683
1684         svm->vmcb->save.dr6 = value;
1685         mark_dirty(svm->vmcb, VMCB_DR);
1686 }
1687
1688 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1689 {
1690         struct vcpu_svm *svm = to_svm(vcpu);
1691
1692         get_debugreg(vcpu->arch.db[0], 0);
1693         get_debugreg(vcpu->arch.db[1], 1);
1694         get_debugreg(vcpu->arch.db[2], 2);
1695         get_debugreg(vcpu->arch.db[3], 3);
1696         vcpu->arch.dr6 = svm_get_dr6(vcpu);
1697         vcpu->arch.dr7 = svm->vmcb->save.dr7;
1698
1699         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1700         set_dr_intercepts(svm);
1701 }
1702
1703 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1704 {
1705         struct vcpu_svm *svm = to_svm(vcpu);
1706
1707         svm->vmcb->save.dr7 = value;
1708         mark_dirty(svm->vmcb, VMCB_DR);
1709 }
1710
1711 static int pf_interception(struct vcpu_svm *svm)
1712 {
1713         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1714         u64 error_code = svm->vmcb->control.exit_info_1;
1715
1716         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
1717                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1718                         svm->vmcb->control.insn_bytes : NULL,
1719                         svm->vmcb->control.insn_len);
1720 }
1721
1722 static int npf_interception(struct vcpu_svm *svm)
1723 {
1724         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1725         u64 error_code = svm->vmcb->control.exit_info_1;
1726
1727         trace_kvm_page_fault(fault_address, error_code);
1728         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1729                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1730                         svm->vmcb->control.insn_bytes : NULL,
1731                         svm->vmcb->control.insn_len);
1732 }
1733
1734 static int db_interception(struct vcpu_svm *svm)
1735 {
1736         struct kvm_run *kvm_run = svm->vcpu.run;
1737         struct kvm_vcpu *vcpu = &svm->vcpu;
1738
1739         if (!(svm->vcpu.guest_debug &
1740               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1741                 !svm->nmi_singlestep) {
1742                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1743                 return 1;
1744         }
1745
1746         if (svm->nmi_singlestep) {
1747                 disable_nmi_singlestep(svm);
1748                 /* Make sure we check for pending NMIs upon entry */
1749                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1750         }
1751
1752         if (svm->vcpu.guest_debug &
1753             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1754                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1755                 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
1756                 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
1757                 kvm_run->debug.arch.pc =
1758                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1759                 kvm_run->debug.arch.exception = DB_VECTOR;
1760                 return 0;
1761         }
1762
1763         return 1;
1764 }
1765
1766 static int bp_interception(struct vcpu_svm *svm)
1767 {
1768         struct kvm_run *kvm_run = svm->vcpu.run;
1769
1770         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1771         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1772         kvm_run->debug.arch.exception = BP_VECTOR;
1773         return 0;
1774 }
1775
1776 static int ud_interception(struct vcpu_svm *svm)
1777 {
1778         return handle_ud(&svm->vcpu);
1779 }
1780
1781 static int ac_interception(struct vcpu_svm *svm)
1782 {
1783         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
1784         return 1;
1785 }
1786
1787 static int gp_interception(struct vcpu_svm *svm)
1788 {
1789         struct kvm_vcpu *vcpu = &svm->vcpu;
1790         u32 error_code = svm->vmcb->control.exit_info_1;
1791
1792         WARN_ON_ONCE(!enable_vmware_backdoor);
1793
1794         /*
1795          * VMware backdoor emulation on #GP interception only handles IN{S},
1796          * OUT{S}, and RDPMC, none of which generate a non-zero error code.
1797          */
1798         if (error_code) {
1799                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1800                 return 1;
1801         }
1802         return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
1803 }
1804
1805 static bool is_erratum_383(void)
1806 {
1807         int err, i;
1808         u64 value;
1809
1810         if (!erratum_383_found)
1811                 return false;
1812
1813         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1814         if (err)
1815                 return false;
1816
1817         /* Bit 62 may or may not be set for this mce */
1818         value &= ~(1ULL << 62);
1819
1820         if (value != 0xb600000000010015ULL)
1821                 return false;
1822
1823         /* Clear MCi_STATUS registers */
1824         for (i = 0; i < 6; ++i)
1825                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1826
1827         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1828         if (!err) {
1829                 u32 low, high;
1830
1831                 value &= ~(1ULL << 2);
1832                 low    = lower_32_bits(value);
1833                 high   = upper_32_bits(value);
1834
1835                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1836         }
1837
1838         /* Flush tlb to evict multi-match entries */
1839         __flush_tlb_all();
1840
1841         return true;
1842 }
1843
1844 static void svm_handle_mce(struct vcpu_svm *svm)
1845 {
1846         if (is_erratum_383()) {
1847                 /*
1848                  * Erratum 383 triggered. Guest state is corrupt so kill the
1849                  * guest.
1850                  */
1851                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1852
1853                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1854
1855                 return;
1856         }
1857
1858         /*
1859          * On an #MC intercept the MCE handler is not called automatically in
1860          * the host. So do it by hand here.
1861          */
1862         asm volatile (
1863                 "int $0x12\n");
1864         /* not sure if we ever come back to this point */
1865
1866         return;
1867 }
1868
1869 static int mc_interception(struct vcpu_svm *svm)
1870 {
1871         return 1;
1872 }
1873
1874 static int shutdown_interception(struct vcpu_svm *svm)
1875 {
1876         struct kvm_run *kvm_run = svm->vcpu.run;
1877
1878         /*
1879          * VMCB is undefined after a SHUTDOWN intercept
1880          * so reinitialize it.
1881          */
1882         clear_page(svm->vmcb);
1883         init_vmcb(svm);
1884
1885         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1886         return 0;
1887 }
1888
1889 static int io_interception(struct vcpu_svm *svm)
1890 {
1891         struct kvm_vcpu *vcpu = &svm->vcpu;
1892         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1893         int size, in, string;
1894         unsigned port;
1895
1896         ++svm->vcpu.stat.io_exits;
1897         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1898         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1899         if (string)
1900                 return kvm_emulate_instruction(vcpu, 0);
1901
1902         port = io_info >> 16;
1903         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1904         svm->next_rip = svm->vmcb->control.exit_info_2;
1905
1906         return kvm_fast_pio(&svm->vcpu, size, port, in);
1907 }
1908
1909 static int nmi_interception(struct vcpu_svm *svm)
1910 {
1911         return 1;
1912 }
1913
1914 static int intr_interception(struct vcpu_svm *svm)
1915 {
1916         ++svm->vcpu.stat.irq_exits;
1917         return 1;
1918 }
1919
1920 static int nop_on_interception(struct vcpu_svm *svm)
1921 {
1922         return 1;
1923 }
1924
1925 static int halt_interception(struct vcpu_svm *svm)
1926 {
1927         return kvm_emulate_halt(&svm->vcpu);
1928 }
1929
1930 static int vmmcall_interception(struct vcpu_svm *svm)
1931 {
1932         return kvm_emulate_hypercall(&svm->vcpu);
1933 }
1934
1935 static int vmload_interception(struct vcpu_svm *svm)
1936 {
1937         struct vmcb *nested_vmcb;
1938         struct kvm_host_map map;
1939         int ret;
1940
1941         if (nested_svm_check_permissions(svm))
1942                 return 1;
1943
1944         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1945         if (ret) {
1946                 if (ret == -EINVAL)
1947                         kvm_inject_gp(&svm->vcpu, 0);
1948                 return 1;
1949         }
1950
1951         nested_vmcb = map.hva;
1952
1953         ret = kvm_skip_emulated_instruction(&svm->vcpu);
1954
1955         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1956         kvm_vcpu_unmap(&svm->vcpu, &map, true);
1957
1958         return ret;
1959 }
1960
1961 static int vmsave_interception(struct vcpu_svm *svm)
1962 {
1963         struct vmcb *nested_vmcb;
1964         struct kvm_host_map map;
1965         int ret;
1966
1967         if (nested_svm_check_permissions(svm))
1968                 return 1;
1969
1970         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1971         if (ret) {
1972                 if (ret == -EINVAL)
1973                         kvm_inject_gp(&svm->vcpu, 0);
1974                 return 1;
1975         }
1976
1977         nested_vmcb = map.hva;
1978
1979         ret = kvm_skip_emulated_instruction(&svm->vcpu);
1980
1981         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1982         kvm_vcpu_unmap(&svm->vcpu, &map, true);
1983
1984         return ret;
1985 }
1986
1987 static int vmrun_interception(struct vcpu_svm *svm)
1988 {
1989         if (nested_svm_check_permissions(svm))
1990                 return 1;
1991
1992         return nested_svm_vmrun(svm);
1993 }
1994
1995 static int stgi_interception(struct vcpu_svm *svm)
1996 {
1997         int ret;
1998
1999         if (nested_svm_check_permissions(svm))
2000                 return 1;
2001
2002         /*
2003          * If VGIF is enabled, the STGI intercept is only added to
2004          * detect the opening of the SMI/NMI window; remove it now.
2005          */
2006         if (vgif_enabled(svm))
2007                 clr_intercept(svm, INTERCEPT_STGI);
2008
2009         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2010         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2011
2012         enable_gif(svm);
2013
2014         return ret;
2015 }
2016
2017 static int clgi_interception(struct vcpu_svm *svm)
2018 {
2019         int ret;
2020
2021         if (nested_svm_check_permissions(svm))
2022                 return 1;
2023
2024         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2025
2026         disable_gif(svm);
2027
2028         /* After a CLGI no interrupts should come */
2029         if (!kvm_vcpu_apicv_active(&svm->vcpu))
2030                 svm_clear_vintr(svm);
2031
2032         return ret;
2033 }
2034
2035 static int invlpga_interception(struct vcpu_svm *svm)
2036 {
2037         struct kvm_vcpu *vcpu = &svm->vcpu;
2038
2039         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
2040                           kvm_rax_read(&svm->vcpu));
2041
2042         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2043         kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
2044
2045         return kvm_skip_emulated_instruction(&svm->vcpu);
2046 }
2047
2048 static int skinit_interception(struct vcpu_svm *svm)
2049 {
2050         trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
2051
2052         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2053         return 1;
2054 }
2055
2056 static int wbinvd_interception(struct vcpu_svm *svm)
2057 {
2058         return kvm_emulate_wbinvd(&svm->vcpu);
2059 }
2060
2061 static int xsetbv_interception(struct vcpu_svm *svm)
2062 {
2063         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2064         u32 index = kvm_rcx_read(&svm->vcpu);
2065
2066         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2067                 return kvm_skip_emulated_instruction(&svm->vcpu);
2068         }
2069
2070         return 1;
2071 }
2072
2073 static int rdpru_interception(struct vcpu_svm *svm)
2074 {
2075         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2076         return 1;
2077 }
2078
2079 static int task_switch_interception(struct vcpu_svm *svm)
2080 {
2081         u16 tss_selector;
2082         int reason;
2083         int int_type = svm->vmcb->control.exit_int_info &
2084                 SVM_EXITINTINFO_TYPE_MASK;
2085         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2086         uint32_t type =
2087                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2088         uint32_t idt_v =
2089                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2090         bool has_error_code = false;
2091         u32 error_code = 0;
2092
2093         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2094
2095         if (svm->vmcb->control.exit_info_2 &
2096             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2097                 reason = TASK_SWITCH_IRET;
2098         else if (svm->vmcb->control.exit_info_2 &
2099                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2100                 reason = TASK_SWITCH_JMP;
2101         else if (idt_v)
2102                 reason = TASK_SWITCH_GATE;
2103         else
2104                 reason = TASK_SWITCH_CALL;
2105
2106         if (reason == TASK_SWITCH_GATE) {
2107                 switch (type) {
2108                 case SVM_EXITINTINFO_TYPE_NMI:
2109                         svm->vcpu.arch.nmi_injected = false;
2110                         break;
2111                 case SVM_EXITINTINFO_TYPE_EXEPT:
2112                         if (svm->vmcb->control.exit_info_2 &
2113                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2114                                 has_error_code = true;
2115                                 error_code =
2116                                         (u32)svm->vmcb->control.exit_info_2;
2117                         }
2118                         kvm_clear_exception_queue(&svm->vcpu);
2119                         break;
2120                 case SVM_EXITINTINFO_TYPE_INTR:
2121                         kvm_clear_interrupt_queue(&svm->vcpu);
2122                         break;
2123                 default:
2124                         break;
2125                 }
2126         }
2127
2128         if (reason != TASK_SWITCH_GATE ||
2129             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2130             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2131              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2132                 if (!skip_emulated_instruction(&svm->vcpu))
2133                         return 0;
2134         }
2135
2136         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2137                 int_vec = -1;
2138
2139         return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2140                                has_error_code, error_code);
2141 }
2142
2143 static int cpuid_interception(struct vcpu_svm *svm)
2144 {
2145         return kvm_emulate_cpuid(&svm->vcpu);
2146 }
2147
2148 static int iret_interception(struct vcpu_svm *svm)
2149 {
2150         ++svm->vcpu.stat.nmi_window_exits;
2151         clr_intercept(svm, INTERCEPT_IRET);
2152         svm->vcpu.arch.hflags |= HF_IRET_MASK;
2153         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2154         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2155         return 1;
2156 }
2157
2158 static int invlpg_interception(struct vcpu_svm *svm)
2159 {
2160         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2161                 return kvm_emulate_instruction(&svm->vcpu, 0);
2162
2163         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2164         return kvm_skip_emulated_instruction(&svm->vcpu);
2165 }
2166
2167 static int emulate_on_interception(struct vcpu_svm *svm)
2168 {
2169         return kvm_emulate_instruction(&svm->vcpu, 0);
2170 }
2171
2172 static int rsm_interception(struct vcpu_svm *svm)
2173 {
2174         return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
2175 }
2176
2177 static int rdpmc_interception(struct vcpu_svm *svm)
2178 {
2179         int err;
2180
2181         if (!nrips)
2182                 return emulate_on_interception(svm);
2183
2184         err = kvm_rdpmc(&svm->vcpu);
2185         return kvm_complete_insn_gp(&svm->vcpu, err);
2186 }
2187
2188 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2189                                             unsigned long val)
2190 {
2191         unsigned long cr0 = svm->vcpu.arch.cr0;
2192         bool ret = false;
2193         u64 intercept;
2194
2195         intercept = svm->nested.intercept;
2196
2197         if (!is_guest_mode(&svm->vcpu) ||
2198             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2199                 return false;
2200
2201         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2202         val &= ~SVM_CR0_SELECTIVE_MASK;
2203
2204         if (cr0 ^ val) {
2205                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2206                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2207         }
2208
2209         return ret;
2210 }
2211
2212 #define CR_VALID (1ULL << 63)
2213
2214 static int cr_interception(struct vcpu_svm *svm)
2215 {
2216         int reg, cr;
2217         unsigned long val;
2218         int err;
2219
2220         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2221                 return emulate_on_interception(svm);
2222
2223         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2224                 return emulate_on_interception(svm);
2225
2226         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2227         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2228                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2229         else
2230                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2231
2232         err = 0;
2233         if (cr >= 16) { /* mov to cr */
2234                 cr -= 16;
2235                 val = kvm_register_read(&svm->vcpu, reg);
2236                 switch (cr) {
2237                 case 0:
2238                         if (!check_selective_cr0_intercepted(svm, val))
2239                                 err = kvm_set_cr0(&svm->vcpu, val);
2240                         else
2241                                 return 1;
2242
2243                         break;
2244                 case 3:
2245                         err = kvm_set_cr3(&svm->vcpu, val);
2246                         break;
2247                 case 4:
2248                         err = kvm_set_cr4(&svm->vcpu, val);
2249                         break;
2250                 case 8:
2251                         err = kvm_set_cr8(&svm->vcpu, val);
2252                         break;
2253                 default:
2254                         WARN(1, "unhandled write to CR%d", cr);
2255                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2256                         return 1;
2257                 }
2258         } else { /* mov from cr */
2259                 switch (cr) {
2260                 case 0:
2261                         val = kvm_read_cr0(&svm->vcpu);
2262                         break;
2263                 case 2:
2264                         val = svm->vcpu.arch.cr2;
2265                         break;
2266                 case 3:
2267                         val = kvm_read_cr3(&svm->vcpu);
2268                         break;
2269                 case 4:
2270                         val = kvm_read_cr4(&svm->vcpu);
2271                         break;
2272                 case 8:
2273                         val = kvm_get_cr8(&svm->vcpu);
2274                         break;
2275                 default:
2276                         WARN(1, "unhandled read from CR%d", cr);
2277                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2278                         return 1;
2279                 }
2280                 kvm_register_write(&svm->vcpu, reg, val);
2281         }
2282         return kvm_complete_insn_gp(&svm->vcpu, err);
2283 }
2284
2285 static int dr_interception(struct vcpu_svm *svm)
2286 {
2287         int reg, dr;
2288         unsigned long val;
2289
2290         if (svm->vcpu.guest_debug == 0) {
2291                 /*
2292                  * No more DR vmexits; force a reload of the debug registers
2293                  * and reenter on this instruction.  The next vmexit will
2294                  * retrieve the full state of the debug registers.
2295                  */
2296                 clr_dr_intercepts(svm);
2297                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2298                 return 1;
2299         }
2300
2301         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2302                 return emulate_on_interception(svm);
2303
2304         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2305         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2306
2307         if (dr >= 16) { /* mov to DRn */
2308                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
2309                         return 1;
2310                 val = kvm_register_read(&svm->vcpu, reg);
2311                 kvm_set_dr(&svm->vcpu, dr - 16, val);
2312         } else {
2313                 if (!kvm_require_dr(&svm->vcpu, dr))
2314                         return 1;
2315                 kvm_get_dr(&svm->vcpu, dr, &val);
2316                 kvm_register_write(&svm->vcpu, reg, val);
2317         }
2318
2319         return kvm_skip_emulated_instruction(&svm->vcpu);
2320 }
2321
2322 static int cr8_write_interception(struct vcpu_svm *svm)
2323 {
2324         struct kvm_run *kvm_run = svm->vcpu.run;
2325         int r;
2326
2327         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2328         /* instruction emulation calls kvm_set_cr8() */
2329         r = cr_interception(svm);
2330         if (lapic_in_kernel(&svm->vcpu))
2331                 return r;
2332         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2333                 return r;
2334         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2335         return 0;
2336 }
2337
2338 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2339 {
2340         msr->data = 0;
2341
2342         switch (msr->index) {
2343         case MSR_F10H_DECFG:
2344                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2345                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2346                 break;
2347         default:
2348                 return 1;
2349         }
2350
2351         return 0;
2352 }
2353
2354 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2355 {
2356         struct vcpu_svm *svm = to_svm(vcpu);
2357
2358         switch (msr_info->index) {
2359         case MSR_STAR:
2360                 msr_info->data = svm->vmcb->save.star;
2361                 break;
2362 #ifdef CONFIG_X86_64
2363         case MSR_LSTAR:
2364                 msr_info->data = svm->vmcb->save.lstar;
2365                 break;
2366         case MSR_CSTAR:
2367                 msr_info->data = svm->vmcb->save.cstar;
2368                 break;
2369         case MSR_KERNEL_GS_BASE:
2370                 msr_info->data = svm->vmcb->save.kernel_gs_base;
2371                 break;
2372         case MSR_SYSCALL_MASK:
2373                 msr_info->data = svm->vmcb->save.sfmask;
2374                 break;
2375 #endif
2376         case MSR_IA32_SYSENTER_CS:
2377                 msr_info->data = svm->vmcb->save.sysenter_cs;
2378                 break;
2379         case MSR_IA32_SYSENTER_EIP:
2380                 msr_info->data = svm->sysenter_eip;
2381                 break;
2382         case MSR_IA32_SYSENTER_ESP:
2383                 msr_info->data = svm->sysenter_esp;
2384                 break;
2385         case MSR_TSC_AUX:
2386                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2387                         return 1;
2388                 msr_info->data = svm->tsc_aux;
2389                 break;
2390         /*
2391          * Nobody will change the following 5 values in the VMCB so we can
2392          * safely return them on rdmsr. They will always be 0 until LBRV is
2393          * implemented.
2394          */
2395         case MSR_IA32_DEBUGCTLMSR:
2396                 msr_info->data = svm->vmcb->save.dbgctl;
2397                 break;
2398         case MSR_IA32_LASTBRANCHFROMIP:
2399                 msr_info->data = svm->vmcb->save.br_from;
2400                 break;
2401         case MSR_IA32_LASTBRANCHTOIP:
2402                 msr_info->data = svm->vmcb->save.br_to;
2403                 break;
2404         case MSR_IA32_LASTINTFROMIP:
2405                 msr_info->data = svm->vmcb->save.last_excp_from;
2406                 break;
2407         case MSR_IA32_LASTINTTOIP:
2408                 msr_info->data = svm->vmcb->save.last_excp_to;
2409                 break;
2410         case MSR_VM_HSAVE_PA:
2411                 msr_info->data = svm->nested.hsave_msr;
2412                 break;
2413         case MSR_VM_CR:
2414                 msr_info->data = svm->nested.vm_cr_msr;
2415                 break;
2416         case MSR_IA32_SPEC_CTRL:
2417                 if (!msr_info->host_initiated &&
2418                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2419                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
2420                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2421                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
2422                         return 1;
2423
2424                 msr_info->data = svm->spec_ctrl;
2425                 break;
2426         case MSR_AMD64_VIRT_SPEC_CTRL:
2427                 if (!msr_info->host_initiated &&
2428                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2429                         return 1;
2430
2431                 msr_info->data = svm->virt_spec_ctrl;
2432                 break;
2433         case MSR_F15H_IC_CFG: {
2434
2435                 int family, model;
2436
2437                 family = guest_cpuid_family(vcpu);
2438                 model  = guest_cpuid_model(vcpu);
2439
2440                 if (family < 0 || model < 0)
2441                         return kvm_get_msr_common(vcpu, msr_info);
2442
2443                 msr_info->data = 0;
2444
2445                 if (family == 0x15 &&
2446                     (model >= 0x2 && model < 0x20))
2447                         msr_info->data = 0x1E;
2448                 }
2449                 break;
2450         case MSR_F10H_DECFG:
2451                 msr_info->data = svm->msr_decfg;
2452                 break;
2453         default:
2454                 return kvm_get_msr_common(vcpu, msr_info);
2455         }
2456         return 0;
2457 }
2458
2459 static int rdmsr_interception(struct vcpu_svm *svm)
2460 {
2461         return kvm_emulate_rdmsr(&svm->vcpu);
2462 }
2463
2464 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2465 {
2466         struct vcpu_svm *svm = to_svm(vcpu);
2467         int svm_dis, chg_mask;
2468
2469         if (data & ~SVM_VM_CR_VALID_MASK)
2470                 return 1;
2471
2472         chg_mask = SVM_VM_CR_VALID_MASK;
2473
2474         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2475                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2476
2477         svm->nested.vm_cr_msr &= ~chg_mask;
2478         svm->nested.vm_cr_msr |= (data & chg_mask);
2479
2480         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2481
2482         /* check for svm_disable while efer.svme is set */
2483         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2484                 return 1;
2485
2486         return 0;
2487 }
2488
2489 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2490 {
2491         struct vcpu_svm *svm = to_svm(vcpu);
2492
2493         u32 ecx = msr->index;
2494         u64 data = msr->data;
2495         switch (ecx) {
2496         case MSR_IA32_CR_PAT:
2497                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2498                         return 1;
2499                 vcpu->arch.pat = data;
2500                 svm->vmcb->save.g_pat = data;
2501                 mark_dirty(svm->vmcb, VMCB_NPT);
2502                 break;
2503         case MSR_IA32_SPEC_CTRL:
2504                 if (!msr->host_initiated &&
2505                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2506                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
2507                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2508                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
2509                         return 1;
2510
2511                 if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
2512                         return 1;
2513
2514                 svm->spec_ctrl = data;
2515                 if (!data)
2516                         break;
2517
2518                 /*
2519                  * For non-nested:
2520                  * When it's written (to non-zero) for the first time, pass
2521                  * it through.
2522                  *
2523                  * For nested:
2524                  * The handling of the MSR bitmap for L2 guests is done in
2525                  * nested_svm_vmrun_msrpm.
2526                  * We update the L1 MSR bit as well since it will end up
2527                  * touching the MSR anyway now.
2528                  */
2529                 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2530                 break;
2531         case MSR_IA32_PRED_CMD:
2532                 if (!msr->host_initiated &&
2533                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
2534                         return 1;
2535
2536                 if (data & ~PRED_CMD_IBPB)
2537                         return 1;
2538                 if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
2539                         return 1;
2540                 if (!data)
2541                         break;
2542
2543                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
2544                 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2545                 break;
2546         case MSR_AMD64_VIRT_SPEC_CTRL:
2547                 if (!msr->host_initiated &&
2548                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2549                         return 1;
2550
2551                 if (data & ~SPEC_CTRL_SSBD)
2552                         return 1;
2553
2554                 svm->virt_spec_ctrl = data;
2555                 break;
2556         case MSR_STAR:
2557                 svm->vmcb->save.star = data;
2558                 break;
2559 #ifdef CONFIG_X86_64
2560         case MSR_LSTAR:
2561                 svm->vmcb->save.lstar = data;
2562                 break;
2563         case MSR_CSTAR:
2564                 svm->vmcb->save.cstar = data;
2565                 break;
2566         case MSR_KERNEL_GS_BASE:
2567                 svm->vmcb->save.kernel_gs_base = data;
2568                 break;
2569         case MSR_SYSCALL_MASK:
2570                 svm->vmcb->save.sfmask = data;
2571                 break;
2572 #endif
2573         case MSR_IA32_SYSENTER_CS:
2574                 svm->vmcb->save.sysenter_cs = data;
2575                 break;
2576         case MSR_IA32_SYSENTER_EIP:
2577                 svm->sysenter_eip = data;
2578                 svm->vmcb->save.sysenter_eip = data;
2579                 break;
2580         case MSR_IA32_SYSENTER_ESP:
2581                 svm->sysenter_esp = data;
2582                 svm->vmcb->save.sysenter_esp = data;
2583                 break;
2584         case MSR_TSC_AUX:
2585                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2586                         return 1;
2587
2588                 /*
2589                  * This is rare, so we update the MSR here instead of using
2590                  * direct_access_msrs.  Doing that would require a rdmsr in
2591                  * svm_vcpu_put.
2592                  */
2593                 svm->tsc_aux = data;
2594                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2595                 break;
2596         case MSR_IA32_DEBUGCTLMSR:
2597                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
2598                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2599                                     __func__, data);
2600                         break;
2601                 }
2602                 if (data & DEBUGCTL_RESERVED_BITS)
2603                         return 1;
2604
2605                 svm->vmcb->save.dbgctl = data;
2606                 mark_dirty(svm->vmcb, VMCB_LBR);
2607                 if (data & (1ULL<<0))
2608                         svm_enable_lbrv(svm);
2609                 else
2610                         svm_disable_lbrv(svm);
2611                 break;
2612         case MSR_VM_HSAVE_PA:
2613                 svm->nested.hsave_msr = data;
2614                 break;
2615         case MSR_VM_CR:
2616                 return svm_set_vm_cr(vcpu, data);
2617         case MSR_VM_IGNNE:
2618                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2619                 break;
2620         case MSR_F10H_DECFG: {
2621                 struct kvm_msr_entry msr_entry;
2622
2623                 msr_entry.index = msr->index;
2624                 if (svm_get_msr_feature(&msr_entry))
2625                         return 1;
2626
2627                 /* Check the supported bits */
2628                 if (data & ~msr_entry.data)
2629                         return 1;
2630
2631                 /* Don't allow the guest to change a bit, #GP */
2632                 if (!msr->host_initiated && (data ^ msr_entry.data))
2633                         return 1;
2634
2635                 svm->msr_decfg = data;
2636                 break;
2637         }
2638         case MSR_IA32_APICBASE:
2639                 if (kvm_vcpu_apicv_active(vcpu))
2640                         avic_update_vapic_bar(to_svm(vcpu), data);
2641                 /* Fall through */
2642         default:
2643                 return kvm_set_msr_common(vcpu, msr);
2644         }
2645         return 0;
2646 }
2647
2648 static int wrmsr_interception(struct vcpu_svm *svm)
2649 {
2650         return kvm_emulate_wrmsr(&svm->vcpu);
2651 }
2652
2653 static int msr_interception(struct vcpu_svm *svm)
2654 {
2655         if (svm->vmcb->control.exit_info_1)
2656                 return wrmsr_interception(svm);
2657         else
2658                 return rdmsr_interception(svm);
2659 }
2660
2661 static int interrupt_window_interception(struct vcpu_svm *svm)
2662 {
2663         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2664         svm_clear_vintr(svm);
2665
2666         /*
2667          * For AVIC, the only reason to end up here is ExtINTs.
2668          * In this case AVIC was temporarily disabled for
2669          * requesting the IRQ window and we have to re-enable it.
2670          */
2671         svm_toggle_avic_for_irq_window(&svm->vcpu, true);
2672
2673         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2674         mark_dirty(svm->vmcb, VMCB_INTR);
2675         ++svm->vcpu.stat.irq_window_exits;
2676         return 1;
2677 }
2678
2679 static int pause_interception(struct vcpu_svm *svm)
2680 {
2681         struct kvm_vcpu *vcpu = &svm->vcpu;
2682         bool in_kernel = (svm_get_cpl(vcpu) == 0);
2683
2684         if (pause_filter_thresh)
2685                 grow_ple_window(vcpu);
2686
2687         kvm_vcpu_on_spin(vcpu, in_kernel);
2688         return 1;
2689 }
2690
2691 static int nop_interception(struct vcpu_svm *svm)
2692 {
2693         return kvm_skip_emulated_instruction(&(svm->vcpu));
2694 }
2695
2696 static int monitor_interception(struct vcpu_svm *svm)
2697 {
2698         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
2699         return nop_interception(svm);
2700 }
2701
2702 static int mwait_interception(struct vcpu_svm *svm)
2703 {
2704         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
2705         return nop_interception(svm);
2706 }
2707
2708 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
2709         [SVM_EXIT_READ_CR0]                     = cr_interception,
2710         [SVM_EXIT_READ_CR3]                     = cr_interception,
2711         [SVM_EXIT_READ_CR4]                     = cr_interception,
2712         [SVM_EXIT_READ_CR8]                     = cr_interception,
2713         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
2714         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
2715         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
2716         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
2717         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
2718         [SVM_EXIT_READ_DR0]                     = dr_interception,
2719         [SVM_EXIT_READ_DR1]                     = dr_interception,
2720         [SVM_EXIT_READ_DR2]                     = dr_interception,
2721         [SVM_EXIT_READ_DR3]                     = dr_interception,
2722         [SVM_EXIT_READ_DR4]                     = dr_interception,
2723         [SVM_EXIT_READ_DR5]                     = dr_interception,
2724         [SVM_EXIT_READ_DR6]                     = dr_interception,
2725         [SVM_EXIT_READ_DR7]                     = dr_interception,
2726         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
2727         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
2728         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
2729         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
2730         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
2731         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
2732         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
2733         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
2734         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
2735         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
2736         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
2737         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
2738         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
2739         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
2740         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
2741         [SVM_EXIT_INTR]                         = intr_interception,
2742         [SVM_EXIT_NMI]                          = nmi_interception,
2743         [SVM_EXIT_SMI]                          = nop_on_interception,
2744         [SVM_EXIT_INIT]                         = nop_on_interception,
2745         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
2746         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
2747         [SVM_EXIT_CPUID]                        = cpuid_interception,
2748         [SVM_EXIT_IRET]                         = iret_interception,
2749         [SVM_EXIT_INVD]                         = emulate_on_interception,
2750         [SVM_EXIT_PAUSE]                        = pause_interception,
2751         [SVM_EXIT_HLT]                          = halt_interception,
2752         [SVM_EXIT_INVLPG]                       = invlpg_interception,
2753         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
2754         [SVM_EXIT_IOIO]                         = io_interception,
2755         [SVM_EXIT_MSR]                          = msr_interception,
2756         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
2757         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
2758         [SVM_EXIT_VMRUN]                        = vmrun_interception,
2759         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
2760         [SVM_EXIT_VMLOAD]                       = vmload_interception,
2761         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
2762         [SVM_EXIT_STGI]                         = stgi_interception,
2763         [SVM_EXIT_CLGI]                         = clgi_interception,
2764         [SVM_EXIT_SKINIT]                       = skinit_interception,
2765         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
2766         [SVM_EXIT_MONITOR]                      = monitor_interception,
2767         [SVM_EXIT_MWAIT]                        = mwait_interception,
2768         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
2769         [SVM_EXIT_RDPRU]                        = rdpru_interception,
2770         [SVM_EXIT_NPF]                          = npf_interception,
2771         [SVM_EXIT_RSM]                          = rsm_interception,
2772         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
2773         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
2774 };
2775
2776 static void dump_vmcb(struct kvm_vcpu *vcpu)
2777 {
2778         struct vcpu_svm *svm = to_svm(vcpu);
2779         struct vmcb_control_area *control = &svm->vmcb->control;
2780         struct vmcb_save_area *save = &svm->vmcb->save;
2781
2782         if (!dump_invalid_vmcb) {
2783                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2784                 return;
2785         }
2786
2787         pr_err("VMCB Control Area:\n");
2788         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
2789         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
2790         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
2791         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
2792         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
2793         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
2794         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
2795         pr_err("%-20s%d\n", "pause filter threshold:",
2796                control->pause_filter_thresh);
2797         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
2798         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
2799         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
2800         pr_err("%-20s%d\n", "asid:", control->asid);
2801         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
2802         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
2803         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
2804         pr_err("%-20s%08x\n", "int_state:", control->int_state);
2805         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
2806         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
2807         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
2808         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
2809         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
2810         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
2811         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
2812         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
2813         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
2814         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
2815         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
2816         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
2817         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
2818         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
2819         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
2820         pr_err("VMCB State Save Area:\n");
2821         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2822                "es:",
2823                save->es.selector, save->es.attrib,
2824                save->es.limit, save->es.base);
2825         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2826                "cs:",
2827                save->cs.selector, save->cs.attrib,
2828                save->cs.limit, save->cs.base);
2829         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2830                "ss:",
2831                save->ss.selector, save->ss.attrib,
2832                save->ss.limit, save->ss.base);
2833         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2834                "ds:",
2835                save->ds.selector, save->ds.attrib,
2836                save->ds.limit, save->ds.base);
2837         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2838                "fs:",
2839                save->fs.selector, save->fs.attrib,
2840                save->fs.limit, save->fs.base);
2841         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2842                "gs:",
2843                save->gs.selector, save->gs.attrib,
2844                save->gs.limit, save->gs.base);
2845         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2846                "gdtr:",
2847                save->gdtr.selector, save->gdtr.attrib,
2848                save->gdtr.limit, save->gdtr.base);
2849         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2850                "ldtr:",
2851                save->ldtr.selector, save->ldtr.attrib,
2852                save->ldtr.limit, save->ldtr.base);
2853         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2854                "idtr:",
2855                save->idtr.selector, save->idtr.attrib,
2856                save->idtr.limit, save->idtr.base);
2857         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2858                "tr:",
2859                save->tr.selector, save->tr.attrib,
2860                save->tr.limit, save->tr.base);
2861         pr_err("cpl:            %d                efer:         %016llx\n",
2862                 save->cpl, save->efer);
2863         pr_err("%-15s %016llx %-13s %016llx\n",
2864                "cr0:", save->cr0, "cr2:", save->cr2);
2865         pr_err("%-15s %016llx %-13s %016llx\n",
2866                "cr3:", save->cr3, "cr4:", save->cr4);
2867         pr_err("%-15s %016llx %-13s %016llx\n",
2868                "dr6:", save->dr6, "dr7:", save->dr7);
2869         pr_err("%-15s %016llx %-13s %016llx\n",
2870                "rip:", save->rip, "rflags:", save->rflags);
2871         pr_err("%-15s %016llx %-13s %016llx\n",
2872                "rsp:", save->rsp, "rax:", save->rax);
2873         pr_err("%-15s %016llx %-13s %016llx\n",
2874                "star:", save->star, "lstar:", save->lstar);
2875         pr_err("%-15s %016llx %-13s %016llx\n",
2876                "cstar:", save->cstar, "sfmask:", save->sfmask);
2877         pr_err("%-15s %016llx %-13s %016llx\n",
2878                "kernel_gs_base:", save->kernel_gs_base,
2879                "sysenter_cs:", save->sysenter_cs);
2880         pr_err("%-15s %016llx %-13s %016llx\n",
2881                "sysenter_esp:", save->sysenter_esp,
2882                "sysenter_eip:", save->sysenter_eip);
2883         pr_err("%-15s %016llx %-13s %016llx\n",
2884                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
2885         pr_err("%-15s %016llx %-13s %016llx\n",
2886                "br_from:", save->br_from, "br_to:", save->br_to);
2887         pr_err("%-15s %016llx %-13s %016llx\n",
2888                "excp_from:", save->last_excp_from,
2889                "excp_to:", save->last_excp_to);
2890 }
2891
2892 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
2893 {
2894         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
2895
2896         *info1 = control->exit_info_1;
2897         *info2 = control->exit_info_2;
2898 }
2899
2900 static int handle_exit(struct kvm_vcpu *vcpu,
2901         enum exit_fastpath_completion exit_fastpath)
2902 {
2903         struct vcpu_svm *svm = to_svm(vcpu);
2904         struct kvm_run *kvm_run = vcpu->run;
2905         u32 exit_code = svm->vmcb->control.exit_code;
2906
2907         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
2908
2909         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
2910                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2911         if (npt_enabled)
2912                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2913
2914         if (unlikely(svm->nested.exit_required)) {
2915                 nested_svm_vmexit(svm);
2916                 svm->nested.exit_required = false;
2917
2918                 return 1;
2919         }
2920
2921         if (is_guest_mode(vcpu)) {
2922                 int vmexit;
2923
2924                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
2925                                         svm->vmcb->control.exit_info_1,
2926                                         svm->vmcb->control.exit_info_2,
2927                                         svm->vmcb->control.exit_int_info,
2928                                         svm->vmcb->control.exit_int_info_err,
2929                                         KVM_ISA_SVM);
2930
2931                 vmexit = nested_svm_exit_special(svm);
2932
2933                 if (vmexit == NESTED_EXIT_CONTINUE)
2934                         vmexit = nested_svm_exit_handled(svm);
2935
2936                 if (vmexit == NESTED_EXIT_DONE)
2937                         return 1;
2938         }
2939
2940         svm_complete_interrupts(svm);
2941
2942         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2943                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2944                 kvm_run->fail_entry.hardware_entry_failure_reason
2945                         = svm->vmcb->control.exit_code;
2946                 dump_vmcb(vcpu);
2947                 return 0;
2948         }
2949
2950         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2951             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2952             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
2953             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
2954                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
2955                        "exit_code 0x%x\n",
2956                        __func__, svm->vmcb->control.exit_int_info,
2957                        exit_code);
2958
2959         if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
2960                 kvm_skip_emulated_instruction(vcpu);
2961                 return 1;
2962         } else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
2963             || !svm_exit_handlers[exit_code]) {
2964                 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
2965                 dump_vmcb(vcpu);
2966                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2967                 vcpu->run->internal.suberror =
2968                         KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2969                 vcpu->run->internal.ndata = 1;
2970                 vcpu->run->internal.data[0] = exit_code;
2971                 return 0;
2972         }
2973
2974 #ifdef CONFIG_RETPOLINE
2975         if (exit_code == SVM_EXIT_MSR)
2976                 return msr_interception(svm);
2977         else if (exit_code == SVM_EXIT_VINTR)
2978                 return interrupt_window_interception(svm);
2979         else if (exit_code == SVM_EXIT_INTR)
2980                 return intr_interception(svm);
2981         else if (exit_code == SVM_EXIT_HLT)
2982                 return halt_interception(svm);
2983         else if (exit_code == SVM_EXIT_NPF)
2984                 return npf_interception(svm);
2985 #endif
2986         return svm_exit_handlers[exit_code](svm);
2987 }
2988
2989 static void reload_tss(struct kvm_vcpu *vcpu)
2990 {
2991         int cpu = raw_smp_processor_id();
2992
2993         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2994         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2995         load_TR_desc();
2996 }
2997
2998 static void pre_svm_run(struct vcpu_svm *svm)
2999 {
3000         int cpu = raw_smp_processor_id();
3001
3002         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3003
3004         if (sev_guest(svm->vcpu.kvm))
3005                 return pre_sev_run(svm, cpu);
3006
3007         /* FIXME: handle wraparound of asid_generation */
3008         if (svm->asid_generation != sd->asid_generation)
3009                 new_asid(svm, sd);
3010 }
3011
3012 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3013 {
3014         struct vcpu_svm *svm = to_svm(vcpu);
3015
3016         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3017         vcpu->arch.hflags |= HF_NMI_MASK;
3018         set_intercept(svm, INTERCEPT_IRET);
3019         ++vcpu->stat.nmi_injections;
3020 }
3021
3022 static void svm_set_irq(struct kvm_vcpu *vcpu)
3023 {
3024         struct vcpu_svm *svm = to_svm(vcpu);
3025
3026         BUG_ON(!(gif_set(svm)));
3027
3028         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3029         ++vcpu->stat.irq_injections;
3030
3031         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3032                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3033 }
3034
3035 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3036 {
3037         struct vcpu_svm *svm = to_svm(vcpu);
3038
3039         if (svm_nested_virtualize_tpr(vcpu))
3040                 return;
3041
3042         clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3043
3044         if (irr == -1)
3045                 return;
3046
3047         if (tpr >= irr)
3048                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3049 }
3050
3051 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3052 {
3053         struct vcpu_svm *svm = to_svm(vcpu);
3054         struct vmcb *vmcb = svm->vmcb;
3055         int ret;
3056         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3057               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3058         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3059
3060         return ret;
3061 }
3062
3063 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3064 {
3065         struct vcpu_svm *svm = to_svm(vcpu);
3066
3067         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3068 }
3069
3070 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3071 {
3072         struct vcpu_svm *svm = to_svm(vcpu);
3073
3074         if (masked) {
3075                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3076                 set_intercept(svm, INTERCEPT_IRET);
3077         } else {
3078                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3079                 clr_intercept(svm, INTERCEPT_IRET);
3080         }
3081 }
3082
3083 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3084 {
3085         struct vcpu_svm *svm = to_svm(vcpu);
3086         struct vmcb *vmcb = svm->vmcb;
3087
3088         if (!gif_set(svm) ||
3089              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3090                 return 0;
3091
3092         if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
3093                 return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
3094         else
3095                 return !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3096 }
3097
3098 static void enable_irq_window(struct kvm_vcpu *vcpu)
3099 {
3100         struct vcpu_svm *svm = to_svm(vcpu);
3101
3102         /*
3103          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3104          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3105          * get that intercept, this function will be called again though and
3106          * we'll get the vintr intercept. However, if the vGIF feature is
3107          * enabled, the STGI interception will not occur. Enable the irq
3108          * window under the assumption that the hardware will set the GIF.
3109          */
3110         if (vgif_enabled(svm) || gif_set(svm)) {
3111                 /*
3112                  * IRQ window is not needed when AVIC is enabled,
3113                  * unless we have pending ExtINT since it cannot be injected
3114                  * via AVIC. In such case, we need to temporarily disable AVIC,
3115                  * and fallback to injecting IRQ via V_IRQ.
3116                  */
3117                 svm_toggle_avic_for_irq_window(vcpu, false);
3118                 svm_set_vintr(svm);
3119         }
3120 }
3121
3122 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3123 {
3124         struct vcpu_svm *svm = to_svm(vcpu);
3125
3126         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3127             == HF_NMI_MASK)
3128                 return; /* IRET will cause a vm exit */
3129
3130         if (!gif_set(svm)) {
3131                 if (vgif_enabled(svm))
3132                         set_intercept(svm, INTERCEPT_STGI);
3133                 return; /* STGI will cause a vm exit */
3134         }
3135
3136         if (svm->nested.exit_required)
3137                 return; /* we're not going to run the guest yet */
3138
3139         /*
3140          * Something prevents NMI from been injected. Single step over possible
3141          * problem (IRET or exception injection or interrupt shadow)
3142          */
3143         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3144         svm->nmi_singlestep = true;
3145         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3146 }
3147
3148 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3149 {
3150         return 0;
3151 }
3152
3153 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
3154 {
3155         return 0;
3156 }
3157
3158 void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
3159 {
3160         struct vcpu_svm *svm = to_svm(vcpu);
3161
3162         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3163                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3164         else
3165                 svm->asid_generation--;
3166 }
3167
3168 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3169 {
3170         struct vcpu_svm *svm = to_svm(vcpu);
3171
3172         invlpga(gva, svm->vmcb->control.asid);
3173 }
3174
3175 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3176 {
3177 }
3178
3179 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3180 {
3181         struct vcpu_svm *svm = to_svm(vcpu);
3182
3183         if (svm_nested_virtualize_tpr(vcpu))
3184                 return;
3185
3186         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3187                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3188                 kvm_set_cr8(vcpu, cr8);
3189         }
3190 }
3191
3192 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3193 {
3194         struct vcpu_svm *svm = to_svm(vcpu);
3195         u64 cr8;
3196
3197         if (svm_nested_virtualize_tpr(vcpu) ||
3198             kvm_vcpu_apicv_active(vcpu))
3199                 return;
3200
3201         cr8 = kvm_get_cr8(vcpu);
3202         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3203         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3204 }
3205
3206 static void svm_complete_interrupts(struct vcpu_svm *svm)
3207 {
3208         u8 vector;
3209         int type;
3210         u32 exitintinfo = svm->vmcb->control.exit_int_info;
3211         unsigned int3_injected = svm->int3_injected;
3212
3213         svm->int3_injected = 0;
3214
3215         /*
3216          * If we've made progress since setting HF_IRET_MASK, we've
3217          * executed an IRET and can allow NMI injection.
3218          */
3219         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3220             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3221                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3222                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3223         }
3224
3225         svm->vcpu.arch.nmi_injected = false;
3226         kvm_clear_exception_queue(&svm->vcpu);
3227         kvm_clear_interrupt_queue(&svm->vcpu);
3228
3229         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3230                 return;
3231
3232         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3233
3234         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3235         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3236
3237         switch (type) {
3238         case SVM_EXITINTINFO_TYPE_NMI:
3239                 svm->vcpu.arch.nmi_injected = true;
3240                 break;
3241         case SVM_EXITINTINFO_TYPE_EXEPT:
3242                 /*
3243                  * In case of software exceptions, do not reinject the vector,
3244                  * but re-execute the instruction instead. Rewind RIP first
3245                  * if we emulated INT3 before.
3246                  */
3247                 if (kvm_exception_is_soft(vector)) {
3248                         if (vector == BP_VECTOR && int3_injected &&
3249                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3250                                 kvm_rip_write(&svm->vcpu,
3251                                               kvm_rip_read(&svm->vcpu) -
3252                                               int3_injected);
3253                         break;
3254                 }
3255                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3256                         u32 err = svm->vmcb->control.exit_int_info_err;
3257                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
3258
3259                 } else
3260                         kvm_requeue_exception(&svm->vcpu, vector);
3261                 break;
3262         case SVM_EXITINTINFO_TYPE_INTR:
3263                 kvm_queue_interrupt(&svm->vcpu, vector, false);
3264                 break;
3265         default:
3266                 break;
3267         }
3268 }
3269
3270 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3271 {
3272         struct vcpu_svm *svm = to_svm(vcpu);
3273         struct vmcb_control_area *control = &svm->vmcb->control;
3274
3275         control->exit_int_info = control->event_inj;
3276         control->exit_int_info_err = control->event_inj_err;
3277         control->event_inj = 0;
3278         svm_complete_interrupts(svm);
3279 }
3280
3281 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
3282
3283 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3284 {
3285         struct vcpu_svm *svm = to_svm(vcpu);
3286
3287         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3288         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3289         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3290
3291         /*
3292          * A vmexit emulation is required before the vcpu can be executed
3293          * again.
3294          */
3295         if (unlikely(svm->nested.exit_required))
3296                 return;
3297
3298         /*
3299          * Disable singlestep if we're injecting an interrupt/exception.
3300          * We don't want our modified rflags to be pushed on the stack where
3301          * we might not be able to easily reset them if we disabled NMI
3302          * singlestep later.
3303          */
3304         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3305                 /*
3306                  * Event injection happens before external interrupts cause a
3307                  * vmexit and interrupts are disabled here, so smp_send_reschedule
3308                  * is enough to force an immediate vmexit.
3309                  */
3310                 disable_nmi_singlestep(svm);
3311                 smp_send_reschedule(vcpu->cpu);
3312         }
3313
3314         pre_svm_run(svm);
3315
3316         sync_lapic_to_cr8(vcpu);
3317
3318         svm->vmcb->save.cr2 = vcpu->arch.cr2;
3319
3320         clgi();
3321         kvm_load_guest_xsave_state(vcpu);
3322
3323         if (lapic_in_kernel(vcpu) &&
3324                 vcpu->arch.apic->lapic_timer.timer_advance_ns)
3325                 kvm_wait_lapic_expire(vcpu);
3326
3327         /*
3328          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3329          * it's non-zero. Since vmentry is serialising on affected CPUs, there
3330          * is no need to worry about the conditional branch over the wrmsr
3331          * being speculatively taken.
3332          */
3333         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3334
3335         __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3336
3337 #ifdef CONFIG_X86_64
3338         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3339 #else
3340         loadsegment(fs, svm->host.fs);
3341 #ifndef CONFIG_X86_32_LAZY_GS
3342         loadsegment(gs, svm->host.gs);
3343 #endif
3344 #endif
3345
3346         /*
3347          * We do not use IBRS in the kernel. If this vCPU has used the
3348          * SPEC_CTRL MSR it may have left it on; save the value and
3349          * turn it off. This is much more efficient than blindly adding
3350          * it to the atomic save/restore list. Especially as the former
3351          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3352          *
3353          * For non-nested case:
3354          * If the L01 MSR bitmap does not intercept the MSR, then we need to
3355          * save it.
3356          *
3357          * For nested case:
3358          * If the L02 MSR bitmap does not intercept the MSR, then we need to
3359          * save it.
3360          */
3361         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3362                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3363
3364         reload_tss(vcpu);
3365
3366         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3367
3368         vcpu->arch.cr2 = svm->vmcb->save.cr2;
3369         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3370         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3371         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3372
3373         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3374                 kvm_before_interrupt(&svm->vcpu);
3375
3376         kvm_load_host_xsave_state(vcpu);
3377         stgi();
3378
3379         /* Any pending NMI will happen here */
3380
3381         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3382                 kvm_after_interrupt(&svm->vcpu);
3383
3384         sync_cr8_to_lapic(vcpu);
3385
3386         svm->next_rip = 0;
3387
3388         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3389
3390         /* if exit due to PF check for async PF */
3391         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3392                 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
3393
3394         if (npt_enabled) {
3395                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3396                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3397         }
3398
3399         /*
3400          * We need to handle MC intercepts here before the vcpu has a chance to
3401          * change the physical cpu
3402          */
3403         if (unlikely(svm->vmcb->control.exit_code ==
3404                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
3405                 svm_handle_mce(svm);
3406
3407         mark_all_clean(svm->vmcb);
3408 }
3409
3410 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
3411 {
3412         struct vcpu_svm *svm = to_svm(vcpu);
3413         bool update_guest_cr3 = true;
3414         unsigned long cr3;
3415
3416         cr3 = __sme_set(root);
3417         if (npt_enabled) {
3418                 svm->vmcb->control.nested_cr3 = cr3;
3419                 mark_dirty(svm->vmcb, VMCB_NPT);
3420
3421                 /* Loading L2's CR3 is handled by enter_svm_guest_mode.  */
3422                 if (is_guest_mode(vcpu))
3423                         update_guest_cr3 = false;
3424                 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3425                         cr3 = vcpu->arch.cr3;
3426                 else /* CR3 is already up-to-date.  */
3427                         update_guest_cr3 = false;
3428         }
3429
3430         if (update_guest_cr3) {
3431                 svm->vmcb->save.cr3 = cr3;
3432                 mark_dirty(svm->vmcb, VMCB_CR);
3433         }
3434 }
3435
3436 static int is_disabled(void)
3437 {
3438         u64 vm_cr;
3439
3440         rdmsrl(MSR_VM_CR, vm_cr);
3441         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3442                 return 1;
3443
3444         return 0;
3445 }
3446
3447 static void
3448 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3449 {
3450         /*
3451          * Patch in the VMMCALL instruction:
3452          */
3453         hypercall[0] = 0x0f;
3454         hypercall[1] = 0x01;
3455         hypercall[2] = 0xd9;
3456 }
3457
3458 static int __init svm_check_processor_compat(void)
3459 {
3460         return 0;
3461 }
3462
3463 static bool svm_cpu_has_accelerated_tpr(void)
3464 {
3465         return false;
3466 }
3467
3468 static bool svm_has_emulated_msr(int index)
3469 {
3470         switch (index) {
3471         case MSR_IA32_MCG_EXT_CTL:
3472         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3473                 return false;
3474         default:
3475                 break;
3476         }
3477
3478         return true;
3479 }
3480
3481 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3482 {
3483         return 0;
3484 }
3485
3486 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3487 {
3488         struct vcpu_svm *svm = to_svm(vcpu);
3489
3490         vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
3491                                     boot_cpu_has(X86_FEATURE_XSAVE) &&
3492                                     boot_cpu_has(X86_FEATURE_XSAVES);
3493
3494         /* Update nrips enabled cache */
3495         svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
3496                              guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
3497
3498         if (!kvm_vcpu_apicv_active(vcpu))
3499                 return;
3500
3501         /*
3502          * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
3503          * is exposed to the guest, disable AVIC.
3504          */
3505         if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
3506                 kvm_request_apicv_update(vcpu->kvm, false,
3507                                          APICV_INHIBIT_REASON_X2APIC);
3508
3509         /*
3510          * Currently, AVIC does not work with nested virtualization.
3511          * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
3512          */
3513         if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
3514                 kvm_request_apicv_update(vcpu->kvm, false,
3515                                          APICV_INHIBIT_REASON_NESTED);
3516 }
3517
3518 static bool svm_has_wbinvd_exit(void)
3519 {
3520         return true;
3521 }
3522
3523 #define PRE_EX(exit)  { .exit_code = (exit), \
3524                         .stage = X86_ICPT_PRE_EXCEPT, }
3525 #define POST_EX(exit) { .exit_code = (exit), \
3526                         .stage = X86_ICPT_POST_EXCEPT, }
3527 #define POST_MEM(exit) { .exit_code = (exit), \
3528                         .stage = X86_ICPT_POST_MEMACCESS, }
3529
3530 static const struct __x86_intercept {
3531         u32 exit_code;
3532         enum x86_intercept_stage stage;
3533 } x86_intercept_map[] = {
3534         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
3535         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
3536         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
3537         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
3538         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
3539         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
3540         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
3541         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
3542         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
3543         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
3544         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
3545         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
3546         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
3547         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
3548         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
3549         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
3550         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
3551         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
3552         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
3553         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
3554         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
3555         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
3556         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
3557         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
3558         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
3559         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
3560         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
3561         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
3562         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
3563         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
3564         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
3565         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
3566         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
3567         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
3568         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
3569         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
3570         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
3571         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
3572         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
3573         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
3574         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
3575         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
3576         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
3577         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
3578         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
3579         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
3580         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
3581 };
3582
3583 #undef PRE_EX
3584 #undef POST_EX
3585 #undef POST_MEM
3586
3587 static int svm_check_intercept(struct kvm_vcpu *vcpu,
3588                                struct x86_instruction_info *info,
3589                                enum x86_intercept_stage stage,
3590                                struct x86_exception *exception)
3591 {
3592         struct vcpu_svm *svm = to_svm(vcpu);
3593         int vmexit, ret = X86EMUL_CONTINUE;
3594         struct __x86_intercept icpt_info;
3595         struct vmcb *vmcb = svm->vmcb;
3596
3597         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3598                 goto out;
3599
3600         icpt_info = x86_intercept_map[info->intercept];
3601
3602         if (stage != icpt_info.stage)
3603                 goto out;
3604
3605         switch (icpt_info.exit_code) {
3606         case SVM_EXIT_READ_CR0:
3607                 if (info->intercept == x86_intercept_cr_read)
3608                         icpt_info.exit_code += info->modrm_reg;
3609                 break;
3610         case SVM_EXIT_WRITE_CR0: {
3611                 unsigned long cr0, val;
3612                 u64 intercept;
3613
3614                 if (info->intercept == x86_intercept_cr_write)
3615                         icpt_info.exit_code += info->modrm_reg;
3616
3617                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
3618                     info->intercept == x86_intercept_clts)
3619                         break;
3620
3621                 intercept = svm->nested.intercept;
3622
3623                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3624                         break;
3625
3626                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3627                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
3628
3629                 if (info->intercept == x86_intercept_lmsw) {
3630                         cr0 &= 0xfUL;
3631                         val &= 0xfUL;
3632                         /* lmsw can't clear PE - catch this here */
3633                         if (cr0 & X86_CR0_PE)
3634                                 val |= X86_CR0_PE;
3635                 }
3636
3637                 if (cr0 ^ val)
3638                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3639
3640                 break;
3641         }
3642         case SVM_EXIT_READ_DR0:
3643         case SVM_EXIT_WRITE_DR0:
3644                 icpt_info.exit_code += info->modrm_reg;
3645                 break;
3646         case SVM_EXIT_MSR:
3647                 if (info->intercept == x86_intercept_wrmsr)
3648                         vmcb->control.exit_info_1 = 1;
3649                 else
3650                         vmcb->control.exit_info_1 = 0;
3651                 break;
3652         case SVM_EXIT_PAUSE:
3653                 /*
3654                  * We get this for NOP only, but pause
3655                  * is rep not, check this here
3656                  */
3657                 if (info->rep_prefix != REPE_PREFIX)
3658                         goto out;
3659                 break;
3660         case SVM_EXIT_IOIO: {
3661                 u64 exit_info;
3662                 u32 bytes;
3663
3664                 if (info->intercept == x86_intercept_in ||
3665                     info->intercept == x86_intercept_ins) {
3666                         exit_info = ((info->src_val & 0xffff) << 16) |
3667                                 SVM_IOIO_TYPE_MASK;
3668                         bytes = info->dst_bytes;
3669                 } else {
3670                         exit_info = (info->dst_val & 0xffff) << 16;
3671                         bytes = info->src_bytes;
3672                 }
3673
3674                 if (info->intercept == x86_intercept_outs ||
3675                     info->intercept == x86_intercept_ins)
3676                         exit_info |= SVM_IOIO_STR_MASK;
3677
3678                 if (info->rep_prefix)
3679                         exit_info |= SVM_IOIO_REP_MASK;
3680
3681                 bytes = min(bytes, 4u);
3682
3683                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
3684
3685                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
3686
3687                 vmcb->control.exit_info_1 = exit_info;
3688                 vmcb->control.exit_info_2 = info->next_rip;
3689
3690                 break;
3691         }
3692         default:
3693                 break;
3694         }
3695
3696         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
3697         if (static_cpu_has(X86_FEATURE_NRIPS))
3698                 vmcb->control.next_rip  = info->next_rip;
3699         vmcb->control.exit_code = icpt_info.exit_code;
3700         vmexit = nested_svm_exit_handled(svm);
3701
3702         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3703                                            : X86EMUL_CONTINUE;
3704
3705 out:
3706         return ret;
3707 }
3708
3709 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
3710         enum exit_fastpath_completion *exit_fastpath)
3711 {
3712         if (!is_guest_mode(vcpu) &&
3713             to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3714             to_svm(vcpu)->vmcb->control.exit_info_1)
3715                 *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
3716 }
3717
3718 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
3719 {
3720         if (pause_filter_thresh)
3721                 shrink_ple_window(vcpu);
3722 }
3723
3724 static void svm_setup_mce(struct kvm_vcpu *vcpu)
3725 {
3726         /* [63:9] are reserved. */
3727         vcpu->arch.mcg_cap &= 0x1ff;
3728 }
3729
3730 static int svm_smi_allowed(struct kvm_vcpu *vcpu)
3731 {
3732         struct vcpu_svm *svm = to_svm(vcpu);
3733
3734         /* Per APM Vol.2 15.22.2 "Response to SMI" */
3735         if (!gif_set(svm))
3736                 return 0;
3737
3738         if (is_guest_mode(&svm->vcpu) &&
3739             svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
3740                 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
3741                 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
3742                 svm->nested.exit_required = true;
3743                 return 0;
3744         }
3745
3746         return 1;
3747 }
3748
3749 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
3750 {
3751         struct vcpu_svm *svm = to_svm(vcpu);
3752         int ret;
3753
3754         if (is_guest_mode(vcpu)) {
3755                 /* FED8h - SVM Guest */
3756                 put_smstate(u64, smstate, 0x7ed8, 1);
3757                 /* FEE0h - SVM Guest VMCB Physical Address */
3758                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
3759
3760                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3761                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3762                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3763
3764                 ret = nested_svm_vmexit(svm);
3765                 if (ret)
3766                         return ret;
3767         }
3768         return 0;
3769 }
3770
3771 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
3772 {
3773         struct vcpu_svm *svm = to_svm(vcpu);
3774         struct vmcb *nested_vmcb;
3775         struct kvm_host_map map;
3776         u64 guest;
3777         u64 vmcb;
3778
3779         guest = GET_SMSTATE(u64, smstate, 0x7ed8);
3780         vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
3781
3782         if (guest) {
3783                 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
3784                         return 1;
3785                 nested_vmcb = map.hva;
3786                 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
3787         }
3788         return 0;
3789 }
3790
3791 static int enable_smi_window(struct kvm_vcpu *vcpu)
3792 {
3793         struct vcpu_svm *svm = to_svm(vcpu);
3794
3795         if (!gif_set(svm)) {
3796                 if (vgif_enabled(svm))
3797                         set_intercept(svm, INTERCEPT_STGI);
3798                 /* STGI will cause a vm exit */
3799                 return 1;
3800         }
3801         return 0;
3802 }
3803
3804 static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
3805 {
3806         unsigned long cr4 = kvm_read_cr4(vcpu);
3807         bool smep = cr4 & X86_CR4_SMEP;
3808         bool smap = cr4 & X86_CR4_SMAP;
3809         bool is_user = svm_get_cpl(vcpu) == 3;
3810
3811         /*
3812          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
3813          *
3814          * Errata:
3815          * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
3816          * possible that CPU microcode implementing DecodeAssist will fail
3817          * to read bytes of instruction which caused #NPF. In this case,
3818          * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
3819          * return 0 instead of the correct guest instruction bytes.
3820          *
3821          * This happens because CPU microcode reading instruction bytes
3822          * uses a special opcode which attempts to read data using CPL=0
3823          * priviledges. The microcode reads CS:RIP and if it hits a SMAP
3824          * fault, it gives up and returns no instruction bytes.
3825          *
3826          * Detection:
3827          * We reach here in case CPU supports DecodeAssist, raised #NPF and
3828          * returned 0 in GuestIntrBytes field of the VMCB.
3829          * First, errata can only be triggered in case vCPU CR4.SMAP=1.
3830          * Second, if vCPU CR4.SMEP=1, errata could only be triggered
3831          * in case vCPU CPL==3 (Because otherwise guest would have triggered
3832          * a SMEP fault instead of #NPF).
3833          * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
3834          * As most guests enable SMAP if they have also enabled SMEP, use above
3835          * logic in order to attempt minimize false-positive of detecting errata
3836          * while still preserving all cases semantic correctness.
3837          *
3838          * Workaround:
3839          * To determine what instruction the guest was executing, the hypervisor
3840          * will have to decode the instruction at the instruction pointer.
3841          *
3842          * In non SEV guest, hypervisor will be able to read the guest
3843          * memory to decode the instruction pointer when insn_len is zero
3844          * so we return true to indicate that decoding is possible.
3845          *
3846          * But in the SEV guest, the guest memory is encrypted with the
3847          * guest specific key and hypervisor will not be able to decode the
3848          * instruction pointer so we will not able to workaround it. Lets
3849          * print the error and request to kill the guest.
3850          */
3851         if (smap && (!smep || is_user)) {
3852                 if (!sev_guest(vcpu->kvm))
3853                         return true;
3854
3855                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
3856                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3857         }
3858
3859         return false;
3860 }
3861
3862 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
3863 {
3864         struct vcpu_svm *svm = to_svm(vcpu);
3865
3866         /*
3867          * TODO: Last condition latch INIT signals on vCPU when
3868          * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
3869          * To properly emulate the INIT intercept, SVM should implement
3870          * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
3871          * there if an INIT signal is pending.
3872          */
3873         return !gif_set(svm) ||
3874                    (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
3875 }
3876
3877 static void svm_vm_destroy(struct kvm *kvm)
3878 {
3879         avic_vm_destroy(kvm);
3880         sev_vm_destroy(kvm);
3881 }
3882
3883 static int svm_vm_init(struct kvm *kvm)
3884 {
3885         if (avic) {
3886                 int ret = avic_vm_init(kvm);
3887                 if (ret)
3888                         return ret;
3889         }
3890
3891         kvm_apicv_init(kvm, avic);
3892         return 0;
3893 }
3894
3895 static struct kvm_x86_ops svm_x86_ops __initdata = {
3896         .hardware_unsetup = svm_hardware_teardown,
3897         .hardware_enable = svm_hardware_enable,
3898         .hardware_disable = svm_hardware_disable,
3899         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
3900         .has_emulated_msr = svm_has_emulated_msr,
3901
3902         .vcpu_create = svm_create_vcpu,
3903         .vcpu_free = svm_free_vcpu,
3904         .vcpu_reset = svm_vcpu_reset,
3905
3906         .vm_size = sizeof(struct kvm_svm),
3907         .vm_init = svm_vm_init,
3908         .vm_destroy = svm_vm_destroy,
3909
3910         .prepare_guest_switch = svm_prepare_guest_switch,
3911         .vcpu_load = svm_vcpu_load,
3912         .vcpu_put = svm_vcpu_put,
3913         .vcpu_blocking = svm_vcpu_blocking,
3914         .vcpu_unblocking = svm_vcpu_unblocking,
3915
3916         .update_bp_intercept = update_bp_intercept,
3917         .get_msr_feature = svm_get_msr_feature,
3918         .get_msr = svm_get_msr,
3919         .set_msr = svm_set_msr,
3920         .get_segment_base = svm_get_segment_base,
3921         .get_segment = svm_get_segment,
3922         .set_segment = svm_set_segment,
3923         .get_cpl = svm_get_cpl,
3924         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
3925         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
3926         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
3927         .set_cr0 = svm_set_cr0,
3928         .set_cr4 = svm_set_cr4,
3929         .set_efer = svm_set_efer,
3930         .get_idt = svm_get_idt,
3931         .set_idt = svm_set_idt,
3932         .get_gdt = svm_get_gdt,
3933         .set_gdt = svm_set_gdt,
3934         .get_dr6 = svm_get_dr6,
3935         .set_dr6 = svm_set_dr6,
3936         .set_dr7 = svm_set_dr7,
3937         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
3938         .cache_reg = svm_cache_reg,
3939         .get_rflags = svm_get_rflags,
3940         .set_rflags = svm_set_rflags,
3941
3942         .tlb_flush = svm_flush_tlb,
3943         .tlb_flush_gva = svm_flush_tlb_gva,
3944
3945         .run = svm_vcpu_run,
3946         .handle_exit = handle_exit,
3947         .skip_emulated_instruction = skip_emulated_instruction,
3948         .update_emulated_instruction = NULL,
3949         .set_interrupt_shadow = svm_set_interrupt_shadow,
3950         .get_interrupt_shadow = svm_get_interrupt_shadow,
3951         .patch_hypercall = svm_patch_hypercall,
3952         .set_irq = svm_set_irq,
3953         .set_nmi = svm_inject_nmi,
3954         .queue_exception = svm_queue_exception,
3955         .cancel_injection = svm_cancel_injection,
3956         .interrupt_allowed = svm_interrupt_allowed,
3957         .nmi_allowed = svm_nmi_allowed,
3958         .get_nmi_mask = svm_get_nmi_mask,
3959         .set_nmi_mask = svm_set_nmi_mask,
3960         .enable_nmi_window = enable_nmi_window,
3961         .enable_irq_window = enable_irq_window,
3962         .update_cr8_intercept = update_cr8_intercept,
3963         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
3964         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
3965         .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
3966         .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
3967         .load_eoi_exitmap = svm_load_eoi_exitmap,
3968         .hwapic_irr_update = svm_hwapic_irr_update,
3969         .hwapic_isr_update = svm_hwapic_isr_update,
3970         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
3971         .apicv_post_state_restore = avic_post_state_restore,
3972
3973         .set_tss_addr = svm_set_tss_addr,
3974         .set_identity_map_addr = svm_set_identity_map_addr,
3975         .get_tdp_level = get_npt_level,
3976         .get_mt_mask = svm_get_mt_mask,
3977
3978         .get_exit_info = svm_get_exit_info,
3979
3980         .cpuid_update = svm_cpuid_update,
3981
3982         .has_wbinvd_exit = svm_has_wbinvd_exit,
3983
3984         .read_l1_tsc_offset = svm_read_l1_tsc_offset,
3985         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
3986
3987         .load_mmu_pgd = svm_load_mmu_pgd,
3988
3989         .check_intercept = svm_check_intercept,
3990         .handle_exit_irqoff = svm_handle_exit_irqoff,
3991
3992         .request_immediate_exit = __kvm_request_immediate_exit,
3993
3994         .sched_in = svm_sched_in,
3995
3996         .pmu_ops = &amd_pmu_ops,
3997         .deliver_posted_interrupt = svm_deliver_avic_intr,
3998         .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
3999         .update_pi_irte = svm_update_pi_irte,
4000         .setup_mce = svm_setup_mce,
4001
4002         .smi_allowed = svm_smi_allowed,
4003         .pre_enter_smm = svm_pre_enter_smm,
4004         .pre_leave_smm = svm_pre_leave_smm,
4005         .enable_smi_window = enable_smi_window,
4006
4007         .mem_enc_op = svm_mem_enc_op,
4008         .mem_enc_reg_region = svm_register_enc_region,
4009         .mem_enc_unreg_region = svm_unregister_enc_region,
4010
4011         .nested_enable_evmcs = NULL,
4012         .nested_get_evmcs_version = NULL,
4013
4014         .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
4015
4016         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
4017
4018         .check_nested_events = svm_check_nested_events,
4019 };
4020
4021 static struct kvm_x86_init_ops svm_init_ops __initdata = {
4022         .cpu_has_kvm_support = has_svm,
4023         .disabled_by_bios = is_disabled,
4024         .hardware_setup = svm_hardware_setup,
4025         .check_processor_compatibility = svm_check_processor_compat,
4026
4027         .runtime_ops = &svm_x86_ops,
4028 };
4029
4030 static int __init svm_init(void)
4031 {
4032         return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
4033                         __alignof__(struct vcpu_svm), THIS_MODULE);
4034 }
4035
4036 static void __exit svm_exit(void)
4037 {
4038         kvm_exit();
4039 }
4040
4041 module_init(svm_init)
4042 module_exit(svm_exit)