KVM: SVM: Add support for the SEV-ES VMSA
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / svm.c
1 #define pr_fmt(fmt) "SVM: " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "cpuid.h"
10 #include "pmu.h"
11
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/kernel.h>
15 #include <linux/vmalloc.h>
16 #include <linux/highmem.h>
17 #include <linux/amd-iommu.h>
18 #include <linux/sched.h>
19 #include <linux/trace_events.h>
20 #include <linux/slab.h>
21 #include <linux/hashtable.h>
22 #include <linux/objtool.h>
23 #include <linux/psp-sev.h>
24 #include <linux/file.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <linux/rwsem.h>
28
29 #include <asm/apic.h>
30 #include <asm/perf_event.h>
31 #include <asm/tlbflush.h>
32 #include <asm/desc.h>
33 #include <asm/debugreg.h>
34 #include <asm/kvm_para.h>
35 #include <asm/irq_remapping.h>
36 #include <asm/spec-ctrl.h>
37 #include <asm/cpu_device_id.h>
38
39 #include <asm/virtext.h>
40 #include "trace.h"
41
42 #include "svm.h"
43
44 #define __ex(x) __kvm_handle_fault_on_reboot(x)
45
46 MODULE_AUTHOR("Qumranet");
47 MODULE_LICENSE("GPL");
48
49 #ifdef MODULE
50 static const struct x86_cpu_id svm_cpu_id[] = {
51         X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
52         {}
53 };
54 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
55 #endif
56
57 #define IOPM_ALLOC_ORDER 2
58 #define MSRPM_ALLOC_ORDER 1
59
60 #define SEG_TYPE_LDT 2
61 #define SEG_TYPE_BUSY_TSS16 3
62
63 #define SVM_FEATURE_LBRV           (1 <<  1)
64 #define SVM_FEATURE_SVML           (1 <<  2)
65 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
66 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
67 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
68 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
69 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
70
71 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
72
73 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
74 #define TSC_RATIO_MIN           0x0000000000000001ULL
75 #define TSC_RATIO_MAX           0x000000ffffffffffULL
76
77 static bool erratum_383_found __read_mostly;
78
79 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
80
81 /*
82  * Set osvw_len to higher value when updated Revision Guides
83  * are published and we know what the new status bits are
84  */
85 static uint64_t osvw_len = 4, osvw_status;
86
87 static DEFINE_PER_CPU(u64, current_tsc_ratio);
88 #define TSC_RATIO_DEFAULT       0x0100000000ULL
89
90 static const struct svm_direct_access_msrs {
91         u32 index;   /* Index of the MSR */
92         bool always; /* True if intercept is always on */
93 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
94         { .index = MSR_STAR,                            .always = true  },
95         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
96 #ifdef CONFIG_X86_64
97         { .index = MSR_GS_BASE,                         .always = true  },
98         { .index = MSR_FS_BASE,                         .always = true  },
99         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
100         { .index = MSR_LSTAR,                           .always = true  },
101         { .index = MSR_CSTAR,                           .always = true  },
102         { .index = MSR_SYSCALL_MASK,                    .always = true  },
103 #endif
104         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
105         { .index = MSR_IA32_PRED_CMD,                   .always = false },
106         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
107         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
108         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
109         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
110         { .index = MSR_INVALID,                         .always = false },
111 };
112
113 /* enable NPT for AMD64 and X86 with PAE */
114 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
115 bool npt_enabled = true;
116 #else
117 bool npt_enabled;
118 #endif
119
120 /*
121  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
122  * pause_filter_count: On processors that support Pause filtering(indicated
123  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
124  *      count value. On VMRUN this value is loaded into an internal counter.
125  *      Each time a pause instruction is executed, this counter is decremented
126  *      until it reaches zero at which time a #VMEXIT is generated if pause
127  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
128  *      Intercept Filtering for more details.
129  *      This also indicate if ple logic enabled.
130  *
131  * pause_filter_thresh: In addition, some processor families support advanced
132  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
133  *      the amount of time a guest is allowed to execute in a pause loop.
134  *      In this mode, a 16-bit pause filter threshold field is added in the
135  *      VMCB. The threshold value is a cycle count that is used to reset the
136  *      pause counter. As with simple pause filtering, VMRUN loads the pause
137  *      count value from VMCB into an internal counter. Then, on each pause
138  *      instruction the hardware checks the elapsed number of cycles since
139  *      the most recent pause instruction against the pause filter threshold.
140  *      If the elapsed cycle count is greater than the pause filter threshold,
141  *      then the internal pause count is reloaded from the VMCB and execution
142  *      continues. If the elapsed cycle count is less than the pause filter
143  *      threshold, then the internal pause count is decremented. If the count
144  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
145  *      triggered. If advanced pause filtering is supported and pause filter
146  *      threshold field is set to zero, the filter will operate in the simpler,
147  *      count only mode.
148  */
149
150 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
151 module_param(pause_filter_thresh, ushort, 0444);
152
153 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
154 module_param(pause_filter_count, ushort, 0444);
155
156 /* Default doubles per-vcpu window every exit. */
157 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
158 module_param(pause_filter_count_grow, ushort, 0444);
159
160 /* Default resets per-vcpu window every exit to pause_filter_count. */
161 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
162 module_param(pause_filter_count_shrink, ushort, 0444);
163
164 /* Default is to compute the maximum so we can never overflow. */
165 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
166 module_param(pause_filter_count_max, ushort, 0444);
167
168 /* allow nested paging (virtualized MMU) for all guests */
169 static int npt = true;
170 module_param(npt, int, S_IRUGO);
171
172 /* allow nested virtualization in KVM/SVM */
173 static int nested = true;
174 module_param(nested, int, S_IRUGO);
175
176 /* enable/disable Next RIP Save */
177 static int nrips = true;
178 module_param(nrips, int, 0444);
179
180 /* enable/disable Virtual VMLOAD VMSAVE */
181 static int vls = true;
182 module_param(vls, int, 0444);
183
184 /* enable/disable Virtual GIF */
185 static int vgif = true;
186 module_param(vgif, int, 0444);
187
188 /* enable/disable SEV support */
189 int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
190 module_param(sev, int, 0444);
191
192 /* enable/disable SEV-ES support */
193 int sev_es = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
194 module_param(sev_es, int, 0444);
195
196 static bool __read_mostly dump_invalid_vmcb = 0;
197 module_param(dump_invalid_vmcb, bool, 0644);
198
199 static u8 rsm_ins_bytes[] = "\x0f\xaa";
200
201 static void svm_complete_interrupts(struct vcpu_svm *svm);
202
203 static unsigned long iopm_base;
204
205 struct kvm_ldttss_desc {
206         u16 limit0;
207         u16 base0;
208         unsigned base1:8, type:5, dpl:2, p:1;
209         unsigned limit1:4, zero0:3, g:1, base2:8;
210         u32 base3;
211         u32 zero1;
212 } __attribute__((packed));
213
214 DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
215
216 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
217
218 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
219 #define MSRS_RANGE_SIZE 2048
220 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
221
222 u32 svm_msrpm_offset(u32 msr)
223 {
224         u32 offset;
225         int i;
226
227         for (i = 0; i < NUM_MSR_MAPS; i++) {
228                 if (msr < msrpm_ranges[i] ||
229                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
230                         continue;
231
232                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
233                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
234
235                 /* Now we have the u8 offset - but need the u32 offset */
236                 return offset / 4;
237         }
238
239         /* MSR not in any range */
240         return MSR_INVALID;
241 }
242
243 #define MAX_INST_SIZE 15
244
245 static inline void clgi(void)
246 {
247         asm volatile (__ex("clgi"));
248 }
249
250 static inline void stgi(void)
251 {
252         asm volatile (__ex("stgi"));
253 }
254
255 static inline void invlpga(unsigned long addr, u32 asid)
256 {
257         asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
258 }
259
260 static int get_max_npt_level(void)
261 {
262 #ifdef CONFIG_X86_64
263         return PT64_ROOT_4LEVEL;
264 #else
265         return PT32E_ROOT_LEVEL;
266 #endif
267 }
268
269 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
270 {
271         struct vcpu_svm *svm = to_svm(vcpu);
272         u64 old_efer = vcpu->arch.efer;
273         vcpu->arch.efer = efer;
274
275         if (!npt_enabled) {
276                 /* Shadow paging assumes NX to be available.  */
277                 efer |= EFER_NX;
278
279                 if (!(efer & EFER_LMA))
280                         efer &= ~EFER_LME;
281         }
282
283         if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
284                 if (!(efer & EFER_SVME)) {
285                         svm_leave_nested(svm);
286                         svm_set_gif(svm, true);
287
288                         /*
289                          * Free the nested guest state, unless we are in SMM.
290                          * In this case we will return to the nested guest
291                          * as soon as we leave SMM.
292                          */
293                         if (!is_smm(&svm->vcpu))
294                                 svm_free_nested(svm);
295
296                 } else {
297                         int ret = svm_allocate_nested(svm);
298
299                         if (ret) {
300                                 vcpu->arch.efer = old_efer;
301                                 return ret;
302                         }
303                 }
304         }
305
306         svm->vmcb->save.efer = efer | EFER_SVME;
307         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
308         return 0;
309 }
310
311 static int is_external_interrupt(u32 info)
312 {
313         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
314         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
315 }
316
317 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
318 {
319         struct vcpu_svm *svm = to_svm(vcpu);
320         u32 ret = 0;
321
322         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
323                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
324         return ret;
325 }
326
327 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
328 {
329         struct vcpu_svm *svm = to_svm(vcpu);
330
331         if (mask == 0)
332                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
333         else
334                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
335
336 }
337
338 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
339 {
340         struct vcpu_svm *svm = to_svm(vcpu);
341
342         if (nrips && svm->vmcb->control.next_rip != 0) {
343                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
344                 svm->next_rip = svm->vmcb->control.next_rip;
345         }
346
347         if (!svm->next_rip) {
348                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
349                         return 0;
350         } else {
351                 kvm_rip_write(vcpu, svm->next_rip);
352         }
353         svm_set_interrupt_shadow(vcpu, 0);
354
355         return 1;
356 }
357
358 static void svm_queue_exception(struct kvm_vcpu *vcpu)
359 {
360         struct vcpu_svm *svm = to_svm(vcpu);
361         unsigned nr = vcpu->arch.exception.nr;
362         bool has_error_code = vcpu->arch.exception.has_error_code;
363         u32 error_code = vcpu->arch.exception.error_code;
364
365         kvm_deliver_exception_payload(&svm->vcpu);
366
367         if (nr == BP_VECTOR && !nrips) {
368                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
369
370                 /*
371                  * For guest debugging where we have to reinject #BP if some
372                  * INT3 is guest-owned:
373                  * Emulate nRIP by moving RIP forward. Will fail if injection
374                  * raises a fault that is not intercepted. Still better than
375                  * failing in all cases.
376                  */
377                 (void)skip_emulated_instruction(&svm->vcpu);
378                 rip = kvm_rip_read(&svm->vcpu);
379                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
380                 svm->int3_injected = rip - old_rip;
381         }
382
383         svm->vmcb->control.event_inj = nr
384                 | SVM_EVTINJ_VALID
385                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
386                 | SVM_EVTINJ_TYPE_EXEPT;
387         svm->vmcb->control.event_inj_err = error_code;
388 }
389
390 static void svm_init_erratum_383(void)
391 {
392         u32 low, high;
393         int err;
394         u64 val;
395
396         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
397                 return;
398
399         /* Use _safe variants to not break nested virtualization */
400         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
401         if (err)
402                 return;
403
404         val |= (1ULL << 47);
405
406         low  = lower_32_bits(val);
407         high = upper_32_bits(val);
408
409         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
410
411         erratum_383_found = true;
412 }
413
414 static void svm_init_osvw(struct kvm_vcpu *vcpu)
415 {
416         /*
417          * Guests should see errata 400 and 415 as fixed (assuming that
418          * HLT and IO instructions are intercepted).
419          */
420         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
421         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
422
423         /*
424          * By increasing VCPU's osvw.length to 3 we are telling the guest that
425          * all osvw.status bits inside that length, including bit 0 (which is
426          * reserved for erratum 298), are valid. However, if host processor's
427          * osvw_len is 0 then osvw_status[0] carries no information. We need to
428          * be conservative here and therefore we tell the guest that erratum 298
429          * is present (because we really don't know).
430          */
431         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
432                 vcpu->arch.osvw.status |= 1;
433 }
434
435 static int has_svm(void)
436 {
437         const char *msg;
438
439         if (!cpu_has_svm(&msg)) {
440                 printk(KERN_INFO "has_svm: %s\n", msg);
441                 return 0;
442         }
443
444         return 1;
445 }
446
447 static void svm_hardware_disable(void)
448 {
449         /* Make sure we clean up behind us */
450         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
451                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
452
453         cpu_svm_disable();
454
455         amd_pmu_disable_virt();
456 }
457
458 static int svm_hardware_enable(void)
459 {
460
461         struct svm_cpu_data *sd;
462         uint64_t efer;
463         struct desc_struct *gdt;
464         int me = raw_smp_processor_id();
465
466         rdmsrl(MSR_EFER, efer);
467         if (efer & EFER_SVME)
468                 return -EBUSY;
469
470         if (!has_svm()) {
471                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
472                 return -EINVAL;
473         }
474         sd = per_cpu(svm_data, me);
475         if (!sd) {
476                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
477                 return -EINVAL;
478         }
479
480         sd->asid_generation = 1;
481         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
482         sd->next_asid = sd->max_asid + 1;
483         sd->min_asid = max_sev_asid + 1;
484
485         gdt = get_current_gdt_rw();
486         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
487
488         wrmsrl(MSR_EFER, efer | EFER_SVME);
489
490         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
491
492         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
493                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
494                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
495         }
496
497
498         /*
499          * Get OSVW bits.
500          *
501          * Note that it is possible to have a system with mixed processor
502          * revisions and therefore different OSVW bits. If bits are not the same
503          * on different processors then choose the worst case (i.e. if erratum
504          * is present on one processor and not on another then assume that the
505          * erratum is present everywhere).
506          */
507         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
508                 uint64_t len, status = 0;
509                 int err;
510
511                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
512                 if (!err)
513                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
514                                                       &err);
515
516                 if (err)
517                         osvw_status = osvw_len = 0;
518                 else {
519                         if (len < osvw_len)
520                                 osvw_len = len;
521                         osvw_status |= status;
522                         osvw_status &= (1ULL << osvw_len) - 1;
523                 }
524         } else
525                 osvw_status = osvw_len = 0;
526
527         svm_init_erratum_383();
528
529         amd_pmu_enable_virt();
530
531         return 0;
532 }
533
534 static void svm_cpu_uninit(int cpu)
535 {
536         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
537
538         if (!sd)
539                 return;
540
541         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
542         kfree(sd->sev_vmcbs);
543         __free_page(sd->save_area);
544         kfree(sd);
545 }
546
547 static int svm_cpu_init(int cpu)
548 {
549         struct svm_cpu_data *sd;
550
551         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
552         if (!sd)
553                 return -ENOMEM;
554         sd->cpu = cpu;
555         sd->save_area = alloc_page(GFP_KERNEL);
556         if (!sd->save_area)
557                 goto free_cpu_data;
558
559         if (svm_sev_enabled()) {
560                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
561                                               sizeof(void *),
562                                               GFP_KERNEL);
563                 if (!sd->sev_vmcbs)
564                         goto free_save_area;
565         }
566
567         per_cpu(svm_data, cpu) = sd;
568
569         return 0;
570
571 free_save_area:
572         __free_page(sd->save_area);
573 free_cpu_data:
574         kfree(sd);
575         return -ENOMEM;
576
577 }
578
579 static int direct_access_msr_slot(u32 msr)
580 {
581         u32 i;
582
583         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
584                 if (direct_access_msrs[i].index == msr)
585                         return i;
586
587         return -ENOENT;
588 }
589
590 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
591                                      int write)
592 {
593         struct vcpu_svm *svm = to_svm(vcpu);
594         int slot = direct_access_msr_slot(msr);
595
596         if (slot == -ENOENT)
597                 return;
598
599         /* Set the shadow bitmaps to the desired intercept states */
600         if (read)
601                 set_bit(slot, svm->shadow_msr_intercept.read);
602         else
603                 clear_bit(slot, svm->shadow_msr_intercept.read);
604
605         if (write)
606                 set_bit(slot, svm->shadow_msr_intercept.write);
607         else
608                 clear_bit(slot, svm->shadow_msr_intercept.write);
609 }
610
611 static bool valid_msr_intercept(u32 index)
612 {
613         return direct_access_msr_slot(index) != -ENOENT;
614 }
615
616 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
617 {
618         u8 bit_write;
619         unsigned long tmp;
620         u32 offset;
621         u32 *msrpm;
622
623         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
624                                       to_svm(vcpu)->msrpm;
625
626         offset    = svm_msrpm_offset(msr);
627         bit_write = 2 * (msr & 0x0f) + 1;
628         tmp       = msrpm[offset];
629
630         BUG_ON(offset == MSR_INVALID);
631
632         return !!test_bit(bit_write,  &tmp);
633 }
634
635 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
636                                         u32 msr, int read, int write)
637 {
638         u8 bit_read, bit_write;
639         unsigned long tmp;
640         u32 offset;
641
642         /*
643          * If this warning triggers extend the direct_access_msrs list at the
644          * beginning of the file
645          */
646         WARN_ON(!valid_msr_intercept(msr));
647
648         /* Enforce non allowed MSRs to trap */
649         if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
650                 read = 0;
651
652         if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
653                 write = 0;
654
655         offset    = svm_msrpm_offset(msr);
656         bit_read  = 2 * (msr & 0x0f);
657         bit_write = 2 * (msr & 0x0f) + 1;
658         tmp       = msrpm[offset];
659
660         BUG_ON(offset == MSR_INVALID);
661
662         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
663         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
664
665         msrpm[offset] = tmp;
666 }
667
668 static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
669                                  int read, int write)
670 {
671         set_shadow_msr_intercept(vcpu, msr, read, write);
672         set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
673 }
674
675 u32 *svm_vcpu_alloc_msrpm(void)
676 {
677         struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
678         u32 *msrpm;
679
680         if (!pages)
681                 return NULL;
682
683         msrpm = page_address(pages);
684         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
685
686         return msrpm;
687 }
688
689 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
690 {
691         int i;
692
693         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
694                 if (!direct_access_msrs[i].always)
695                         continue;
696                 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
697         }
698 }
699
700
701 void svm_vcpu_free_msrpm(u32 *msrpm)
702 {
703         __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER);
704 }
705
706 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
707 {
708         struct vcpu_svm *svm = to_svm(vcpu);
709         u32 i;
710
711         /*
712          * Set intercept permissions for all direct access MSRs again. They
713          * will automatically get filtered through the MSR filter, so we are
714          * back in sync after this.
715          */
716         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
717                 u32 msr = direct_access_msrs[i].index;
718                 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
719                 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
720
721                 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
722         }
723 }
724
725 static void add_msr_offset(u32 offset)
726 {
727         int i;
728
729         for (i = 0; i < MSRPM_OFFSETS; ++i) {
730
731                 /* Offset already in list? */
732                 if (msrpm_offsets[i] == offset)
733                         return;
734
735                 /* Slot used by another offset? */
736                 if (msrpm_offsets[i] != MSR_INVALID)
737                         continue;
738
739                 /* Add offset to list */
740                 msrpm_offsets[i] = offset;
741
742                 return;
743         }
744
745         /*
746          * If this BUG triggers the msrpm_offsets table has an overflow. Just
747          * increase MSRPM_OFFSETS in this case.
748          */
749         BUG();
750 }
751
752 static void init_msrpm_offsets(void)
753 {
754         int i;
755
756         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
757
758         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
759                 u32 offset;
760
761                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
762                 BUG_ON(offset == MSR_INVALID);
763
764                 add_msr_offset(offset);
765         }
766 }
767
768 static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
769 {
770         struct vcpu_svm *svm = to_svm(vcpu);
771
772         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
773         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
774         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
775         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
776         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
777 }
778
779 static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
780 {
781         struct vcpu_svm *svm = to_svm(vcpu);
782
783         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
784         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
785         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
786         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
787         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
788 }
789
790 void disable_nmi_singlestep(struct vcpu_svm *svm)
791 {
792         svm->nmi_singlestep = false;
793
794         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
795                 /* Clear our flags if they were not set by the guest */
796                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
797                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
798                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
799                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
800         }
801 }
802
803 static void grow_ple_window(struct kvm_vcpu *vcpu)
804 {
805         struct vcpu_svm *svm = to_svm(vcpu);
806         struct vmcb_control_area *control = &svm->vmcb->control;
807         int old = control->pause_filter_count;
808
809         control->pause_filter_count = __grow_ple_window(old,
810                                                         pause_filter_count,
811                                                         pause_filter_count_grow,
812                                                         pause_filter_count_max);
813
814         if (control->pause_filter_count != old) {
815                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
816                 trace_kvm_ple_window_update(vcpu->vcpu_id,
817                                             control->pause_filter_count, old);
818         }
819 }
820
821 static void shrink_ple_window(struct kvm_vcpu *vcpu)
822 {
823         struct vcpu_svm *svm = to_svm(vcpu);
824         struct vmcb_control_area *control = &svm->vmcb->control;
825         int old = control->pause_filter_count;
826
827         control->pause_filter_count =
828                                 __shrink_ple_window(old,
829                                                     pause_filter_count,
830                                                     pause_filter_count_shrink,
831                                                     pause_filter_count);
832         if (control->pause_filter_count != old) {
833                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
834                 trace_kvm_ple_window_update(vcpu->vcpu_id,
835                                             control->pause_filter_count, old);
836         }
837 }
838
839 /*
840  * The default MMIO mask is a single bit (excluding the present bit),
841  * which could conflict with the memory encryption bit. Check for
842  * memory encryption support and override the default MMIO mask if
843  * memory encryption is enabled.
844  */
845 static __init void svm_adjust_mmio_mask(void)
846 {
847         unsigned int enc_bit, mask_bit;
848         u64 msr, mask;
849
850         /* If there is no memory encryption support, use existing mask */
851         if (cpuid_eax(0x80000000) < 0x8000001f)
852                 return;
853
854         /* If memory encryption is not enabled, use existing mask */
855         rdmsrl(MSR_K8_SYSCFG, msr);
856         if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
857                 return;
858
859         enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
860         mask_bit = boot_cpu_data.x86_phys_bits;
861
862         /* Increment the mask bit if it is the same as the encryption bit */
863         if (enc_bit == mask_bit)
864                 mask_bit++;
865
866         /*
867          * If the mask bit location is below 52, then some bits above the
868          * physical addressing limit will always be reserved, so use the
869          * rsvd_bits() function to generate the mask. This mask, along with
870          * the present bit, will be used to generate a page fault with
871          * PFER.RSV = 1.
872          *
873          * If the mask bit location is 52 (or above), then clear the mask.
874          */
875         mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
876
877         kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
878 }
879
880 static void svm_hardware_teardown(void)
881 {
882         int cpu;
883
884         if (svm_sev_enabled())
885                 sev_hardware_teardown();
886
887         for_each_possible_cpu(cpu)
888                 svm_cpu_uninit(cpu);
889
890         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
891         iopm_base = 0;
892 }
893
894 static __init void svm_set_cpu_caps(void)
895 {
896         kvm_set_cpu_caps();
897
898         supported_xss = 0;
899
900         /* CPUID 0x80000001 and 0x8000000A (SVM features) */
901         if (nested) {
902                 kvm_cpu_cap_set(X86_FEATURE_SVM);
903
904                 if (nrips)
905                         kvm_cpu_cap_set(X86_FEATURE_NRIPS);
906
907                 if (npt_enabled)
908                         kvm_cpu_cap_set(X86_FEATURE_NPT);
909         }
910
911         /* CPUID 0x80000008 */
912         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
913             boot_cpu_has(X86_FEATURE_AMD_SSBD))
914                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
915
916         /* Enable INVPCID feature */
917         kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
918 }
919
920 static __init int svm_hardware_setup(void)
921 {
922         int cpu;
923         struct page *iopm_pages;
924         void *iopm_va;
925         int r;
926
927         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
928
929         if (!iopm_pages)
930                 return -ENOMEM;
931
932         iopm_va = page_address(iopm_pages);
933         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
934         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
935
936         init_msrpm_offsets();
937
938         supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
939
940         if (boot_cpu_has(X86_FEATURE_NX))
941                 kvm_enable_efer_bits(EFER_NX);
942
943         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
944                 kvm_enable_efer_bits(EFER_FFXSR);
945
946         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
947                 kvm_has_tsc_control = true;
948                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
949                 kvm_tsc_scaling_ratio_frac_bits = 32;
950         }
951
952         /* Check for pause filtering support */
953         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
954                 pause_filter_count = 0;
955                 pause_filter_thresh = 0;
956         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
957                 pause_filter_thresh = 0;
958         }
959
960         if (nested) {
961                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
962                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
963         }
964
965         if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
966                 sev_hardware_setup();
967         } else {
968                 sev = false;
969                 sev_es = false;
970         }
971
972         svm_adjust_mmio_mask();
973
974         for_each_possible_cpu(cpu) {
975                 r = svm_cpu_init(cpu);
976                 if (r)
977                         goto err;
978         }
979
980         if (!boot_cpu_has(X86_FEATURE_NPT))
981                 npt_enabled = false;
982
983         if (npt_enabled && !npt)
984                 npt_enabled = false;
985
986         kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
987         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
988
989         if (nrips) {
990                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
991                         nrips = false;
992         }
993
994         if (avic) {
995                 if (!npt_enabled ||
996                     !boot_cpu_has(X86_FEATURE_AVIC) ||
997                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
998                         avic = false;
999                 } else {
1000                         pr_info("AVIC enabled\n");
1001
1002                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1003                 }
1004         }
1005
1006         if (vls) {
1007                 if (!npt_enabled ||
1008                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1009                     !IS_ENABLED(CONFIG_X86_64)) {
1010                         vls = false;
1011                 } else {
1012                         pr_info("Virtual VMLOAD VMSAVE supported\n");
1013                 }
1014         }
1015
1016         if (vgif) {
1017                 if (!boot_cpu_has(X86_FEATURE_VGIF))
1018                         vgif = false;
1019                 else
1020                         pr_info("Virtual GIF supported\n");
1021         }
1022
1023         svm_set_cpu_caps();
1024
1025         /*
1026          * It seems that on AMD processors PTE's accessed bit is
1027          * being set by the CPU hardware before the NPF vmexit.
1028          * This is not expected behaviour and our tests fail because
1029          * of it.
1030          * A workaround here is to disable support for
1031          * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
1032          * In this case userspace can know if there is support using
1033          * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
1034          * it
1035          * If future AMD CPU models change the behaviour described above,
1036          * this variable can be changed accordingly
1037          */
1038         allow_smaller_maxphyaddr = !npt_enabled;
1039
1040         return 0;
1041
1042 err:
1043         svm_hardware_teardown();
1044         return r;
1045 }
1046
1047 static void init_seg(struct vmcb_seg *seg)
1048 {
1049         seg->selector = 0;
1050         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1051                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1052         seg->limit = 0xffff;
1053         seg->base = 0;
1054 }
1055
1056 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1057 {
1058         seg->selector = 0;
1059         seg->attrib = SVM_SELECTOR_P_MASK | type;
1060         seg->limit = 0xffff;
1061         seg->base = 0;
1062 }
1063
1064 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1065 {
1066         struct vcpu_svm *svm = to_svm(vcpu);
1067         u64 g_tsc_offset = 0;
1068
1069         if (is_guest_mode(vcpu)) {
1070                 /* Write L1's TSC offset.  */
1071                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1072                                svm->nested.hsave->control.tsc_offset;
1073                 svm->nested.hsave->control.tsc_offset = offset;
1074         }
1075
1076         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1077                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
1078                                    offset);
1079
1080         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1081
1082         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1083         return svm->vmcb->control.tsc_offset;
1084 }
1085
1086 static void svm_check_invpcid(struct vcpu_svm *svm)
1087 {
1088         /*
1089          * Intercept INVPCID instruction only if shadow page table is
1090          * enabled. Interception is not required with nested page table
1091          * enabled.
1092          */
1093         if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1094                 if (!npt_enabled)
1095                         svm_set_intercept(svm, INTERCEPT_INVPCID);
1096                 else
1097                         svm_clr_intercept(svm, INTERCEPT_INVPCID);
1098         }
1099 }
1100
1101 static void init_vmcb(struct vcpu_svm *svm)
1102 {
1103         struct vmcb_control_area *control = &svm->vmcb->control;
1104         struct vmcb_save_area *save = &svm->vmcb->save;
1105
1106         svm->vcpu.arch.hflags = 0;
1107
1108         svm_set_intercept(svm, INTERCEPT_CR0_READ);
1109         svm_set_intercept(svm, INTERCEPT_CR3_READ);
1110         svm_set_intercept(svm, INTERCEPT_CR4_READ);
1111         svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1112         svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1113         svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1114         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1115                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1116
1117         set_dr_intercepts(svm);
1118
1119         set_exception_intercept(svm, PF_VECTOR);
1120         set_exception_intercept(svm, UD_VECTOR);
1121         set_exception_intercept(svm, MC_VECTOR);
1122         set_exception_intercept(svm, AC_VECTOR);
1123         set_exception_intercept(svm, DB_VECTOR);
1124         /*
1125          * Guest access to VMware backdoor ports could legitimately
1126          * trigger #GP because of TSS I/O permission bitmap.
1127          * We intercept those #GP and allow access to them anyway
1128          * as VMware does.
1129          */
1130         if (enable_vmware_backdoor)
1131                 set_exception_intercept(svm, GP_VECTOR);
1132
1133         svm_set_intercept(svm, INTERCEPT_INTR);
1134         svm_set_intercept(svm, INTERCEPT_NMI);
1135         svm_set_intercept(svm, INTERCEPT_SMI);
1136         svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1137         svm_set_intercept(svm, INTERCEPT_RDPMC);
1138         svm_set_intercept(svm, INTERCEPT_CPUID);
1139         svm_set_intercept(svm, INTERCEPT_INVD);
1140         svm_set_intercept(svm, INTERCEPT_INVLPG);
1141         svm_set_intercept(svm, INTERCEPT_INVLPGA);
1142         svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1143         svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1144         svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1145         svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1146         svm_set_intercept(svm, INTERCEPT_VMRUN);
1147         svm_set_intercept(svm, INTERCEPT_VMMCALL);
1148         svm_set_intercept(svm, INTERCEPT_VMLOAD);
1149         svm_set_intercept(svm, INTERCEPT_VMSAVE);
1150         svm_set_intercept(svm, INTERCEPT_STGI);
1151         svm_set_intercept(svm, INTERCEPT_CLGI);
1152         svm_set_intercept(svm, INTERCEPT_SKINIT);
1153         svm_set_intercept(svm, INTERCEPT_WBINVD);
1154         svm_set_intercept(svm, INTERCEPT_XSETBV);
1155         svm_set_intercept(svm, INTERCEPT_RDPRU);
1156         svm_set_intercept(svm, INTERCEPT_RSM);
1157
1158         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1159                 svm_set_intercept(svm, INTERCEPT_MONITOR);
1160                 svm_set_intercept(svm, INTERCEPT_MWAIT);
1161         }
1162
1163         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1164                 svm_set_intercept(svm, INTERCEPT_HLT);
1165
1166         control->iopm_base_pa = __sme_set(iopm_base);
1167         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1168         control->int_ctl = V_INTR_MASKING_MASK;
1169
1170         init_seg(&save->es);
1171         init_seg(&save->ss);
1172         init_seg(&save->ds);
1173         init_seg(&save->fs);
1174         init_seg(&save->gs);
1175
1176         save->cs.selector = 0xf000;
1177         save->cs.base = 0xffff0000;
1178         /* Executable/Readable Code Segment */
1179         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1180                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1181         save->cs.limit = 0xffff;
1182
1183         save->gdtr.limit = 0xffff;
1184         save->idtr.limit = 0xffff;
1185
1186         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1187         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1188
1189         svm_set_efer(&svm->vcpu, 0);
1190         save->dr6 = 0xffff0ff0;
1191         kvm_set_rflags(&svm->vcpu, 2);
1192         save->rip = 0x0000fff0;
1193         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1194
1195         /*
1196          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1197          * It also updates the guest-visible cr0 value.
1198          */
1199         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1200         kvm_mmu_reset_context(&svm->vcpu);
1201
1202         save->cr4 = X86_CR4_PAE;
1203         /* rdx = ?? */
1204
1205         if (npt_enabled) {
1206                 /* Setup VMCB for Nested Paging */
1207                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1208                 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1209                 clr_exception_intercept(svm, PF_VECTOR);
1210                 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1211                 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1212                 save->g_pat = svm->vcpu.arch.pat;
1213                 save->cr3 = 0;
1214                 save->cr4 = 0;
1215         }
1216         svm->asid_generation = 0;
1217         svm->asid = 0;
1218
1219         svm->nested.vmcb12_gpa = 0;
1220         svm->vcpu.arch.hflags = 0;
1221
1222         if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
1223                 control->pause_filter_count = pause_filter_count;
1224                 if (pause_filter_thresh)
1225                         control->pause_filter_thresh = pause_filter_thresh;
1226                 svm_set_intercept(svm, INTERCEPT_PAUSE);
1227         } else {
1228                 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1229         }
1230
1231         svm_check_invpcid(svm);
1232
1233         if (kvm_vcpu_apicv_active(&svm->vcpu))
1234                 avic_init_vmcb(svm);
1235
1236         /*
1237          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1238          * in VMCB and clear intercepts to avoid #VMEXIT.
1239          */
1240         if (vls) {
1241                 svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1242                 svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1243                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1244         }
1245
1246         if (vgif) {
1247                 svm_clr_intercept(svm, INTERCEPT_STGI);
1248                 svm_clr_intercept(svm, INTERCEPT_CLGI);
1249                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1250         }
1251
1252         if (sev_guest(svm->vcpu.kvm)) {
1253                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1254                 clr_exception_intercept(svm, UD_VECTOR);
1255         }
1256
1257         vmcb_mark_all_dirty(svm->vmcb);
1258
1259         enable_gif(svm);
1260
1261 }
1262
1263 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1264 {
1265         struct vcpu_svm *svm = to_svm(vcpu);
1266         u32 dummy;
1267         u32 eax = 1;
1268
1269         svm->spec_ctrl = 0;
1270         svm->virt_spec_ctrl = 0;
1271
1272         if (!init_event) {
1273                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1274                                            MSR_IA32_APICBASE_ENABLE;
1275                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1276                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1277         }
1278         init_vmcb(svm);
1279
1280         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
1281         kvm_rdx_write(vcpu, eax);
1282
1283         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1284                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
1285 }
1286
1287 static int svm_create_vcpu(struct kvm_vcpu *vcpu)
1288 {
1289         struct vcpu_svm *svm;
1290         struct page *vmcb_page;
1291         struct page *vmsa_page = NULL;
1292         int err;
1293
1294         BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1295         svm = to_svm(vcpu);
1296
1297         err = -ENOMEM;
1298         vmcb_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1299         if (!vmcb_page)
1300                 goto out;
1301
1302         if (sev_es_guest(svm->vcpu.kvm)) {
1303                 /*
1304                  * SEV-ES guests require a separate VMSA page used to contain
1305                  * the encrypted register state of the guest.
1306                  */
1307                 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1308                 if (!vmsa_page)
1309                         goto error_free_vmcb_page;
1310         }
1311
1312         err = avic_init_vcpu(svm);
1313         if (err)
1314                 goto error_free_vmsa_page;
1315
1316         /* We initialize this flag to true to make sure that the is_running
1317          * bit would be set the first time the vcpu is loaded.
1318          */
1319         if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
1320                 svm->avic_is_running = true;
1321
1322         svm->msrpm = svm_vcpu_alloc_msrpm();
1323         if (!svm->msrpm)
1324                 goto error_free_vmsa_page;
1325
1326         svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1327
1328         svm->vmcb = page_address(vmcb_page);
1329         svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
1330
1331         if (vmsa_page)
1332                 svm->vmsa = page_address(vmsa_page);
1333
1334         svm->asid_generation = 0;
1335         init_vmcb(svm);
1336
1337         svm_init_osvw(vcpu);
1338         vcpu->arch.microcode_version = 0x01000065;
1339
1340         return 0;
1341
1342 error_free_vmsa_page:
1343         if (vmsa_page)
1344                 __free_page(vmsa_page);
1345 error_free_vmcb_page:
1346         __free_page(vmcb_page);
1347 out:
1348         return err;
1349 }
1350
1351 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1352 {
1353         int i;
1354
1355         for_each_online_cpu(i)
1356                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1357 }
1358
1359 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1360 {
1361         struct vcpu_svm *svm = to_svm(vcpu);
1362
1363         /*
1364          * The vmcb page can be recycled, causing a false negative in
1365          * svm_vcpu_load(). So, ensure that no logical CPU has this
1366          * vmcb page recorded as its current vmcb.
1367          */
1368         svm_clear_current_vmcb(svm->vmcb);
1369
1370         svm_free_nested(svm);
1371
1372         sev_free_vcpu(vcpu);
1373
1374         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
1375         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1376 }
1377
1378 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1379 {
1380         struct vcpu_svm *svm = to_svm(vcpu);
1381         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1382         int i;
1383
1384         if (unlikely(cpu != vcpu->cpu)) {
1385                 svm->asid_generation = 0;
1386                 vmcb_mark_all_dirty(svm->vmcb);
1387         }
1388
1389 #ifdef CONFIG_X86_64
1390         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1391 #endif
1392         savesegment(fs, svm->host.fs);
1393         savesegment(gs, svm->host.gs);
1394         svm->host.ldt = kvm_read_ldt();
1395
1396         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1397                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1398
1399         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1400                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1401                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1402                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
1403                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1404                 }
1405         }
1406         /* This assumes that the kernel never uses MSR_TSC_AUX */
1407         if (static_cpu_has(X86_FEATURE_RDTSCP))
1408                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
1409
1410         if (sd->current_vmcb != svm->vmcb) {
1411                 sd->current_vmcb = svm->vmcb;
1412                 indirect_branch_prediction_barrier();
1413         }
1414         avic_vcpu_load(vcpu, cpu);
1415 }
1416
1417 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1418 {
1419         struct vcpu_svm *svm = to_svm(vcpu);
1420         int i;
1421
1422         avic_vcpu_put(vcpu);
1423
1424         ++vcpu->stat.host_state_reload;
1425         kvm_load_ldt(svm->host.ldt);
1426 #ifdef CONFIG_X86_64
1427         loadsegment(fs, svm->host.fs);
1428         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
1429         load_gs_index(svm->host.gs);
1430 #else
1431 #ifdef CONFIG_X86_32_LAZY_GS
1432         loadsegment(gs, svm->host.gs);
1433 #endif
1434 #endif
1435         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1436                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1437 }
1438
1439 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1440 {
1441         struct vcpu_svm *svm = to_svm(vcpu);
1442         unsigned long rflags = svm->vmcb->save.rflags;
1443
1444         if (svm->nmi_singlestep) {
1445                 /* Hide our flags if they were not set by the guest */
1446                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1447                         rflags &= ~X86_EFLAGS_TF;
1448                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1449                         rflags &= ~X86_EFLAGS_RF;
1450         }
1451         return rflags;
1452 }
1453
1454 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1455 {
1456         if (to_svm(vcpu)->nmi_singlestep)
1457                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1458
1459        /*
1460         * Any change of EFLAGS.VM is accompanied by a reload of SS
1461         * (caused by either a task switch or an inter-privilege IRET),
1462         * so we do not need to update the CPL here.
1463         */
1464         to_svm(vcpu)->vmcb->save.rflags = rflags;
1465 }
1466
1467 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1468 {
1469         switch (reg) {
1470         case VCPU_EXREG_PDPTR:
1471                 BUG_ON(!npt_enabled);
1472                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1473                 break;
1474         default:
1475                 WARN_ON_ONCE(1);
1476         }
1477 }
1478
1479 static void svm_set_vintr(struct vcpu_svm *svm)
1480 {
1481         struct vmcb_control_area *control;
1482
1483         /* The following fields are ignored when AVIC is enabled */
1484         WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
1485         svm_set_intercept(svm, INTERCEPT_VINTR);
1486
1487         /*
1488          * This is just a dummy VINTR to actually cause a vmexit to happen.
1489          * Actual injection of virtual interrupts happens through EVENTINJ.
1490          */
1491         control = &svm->vmcb->control;
1492         control->int_vector = 0x0;
1493         control->int_ctl &= ~V_INTR_PRIO_MASK;
1494         control->int_ctl |= V_IRQ_MASK |
1495                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1496         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1497 }
1498
1499 static void svm_clear_vintr(struct vcpu_svm *svm)
1500 {
1501         const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
1502         svm_clr_intercept(svm, INTERCEPT_VINTR);
1503
1504         /* Drop int_ctl fields related to VINTR injection.  */
1505         svm->vmcb->control.int_ctl &= mask;
1506         if (is_guest_mode(&svm->vcpu)) {
1507                 svm->nested.hsave->control.int_ctl &= mask;
1508
1509                 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1510                         (svm->nested.ctl.int_ctl & V_TPR_MASK));
1511                 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
1512         }
1513
1514         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1515 }
1516
1517 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1518 {
1519         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1520
1521         switch (seg) {
1522         case VCPU_SREG_CS: return &save->cs;
1523         case VCPU_SREG_DS: return &save->ds;
1524         case VCPU_SREG_ES: return &save->es;
1525         case VCPU_SREG_FS: return &save->fs;
1526         case VCPU_SREG_GS: return &save->gs;
1527         case VCPU_SREG_SS: return &save->ss;
1528         case VCPU_SREG_TR: return &save->tr;
1529         case VCPU_SREG_LDTR: return &save->ldtr;
1530         }
1531         BUG();
1532         return NULL;
1533 }
1534
1535 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1536 {
1537         struct vmcb_seg *s = svm_seg(vcpu, seg);
1538
1539         return s->base;
1540 }
1541
1542 static void svm_get_segment(struct kvm_vcpu *vcpu,
1543                             struct kvm_segment *var, int seg)
1544 {
1545         struct vmcb_seg *s = svm_seg(vcpu, seg);
1546
1547         var->base = s->base;
1548         var->limit = s->limit;
1549         var->selector = s->selector;
1550         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1551         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1552         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1553         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1554         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1555         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1556         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1557
1558         /*
1559          * AMD CPUs circa 2014 track the G bit for all segments except CS.
1560          * However, the SVM spec states that the G bit is not observed by the
1561          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1562          * So let's synthesize a legal G bit for all segments, this helps
1563          * running KVM nested. It also helps cross-vendor migration, because
1564          * Intel's vmentry has a check on the 'G' bit.
1565          */
1566         var->g = s->limit > 0xfffff;
1567
1568         /*
1569          * AMD's VMCB does not have an explicit unusable field, so emulate it
1570          * for cross vendor migration purposes by "not present"
1571          */
1572         var->unusable = !var->present;
1573
1574         switch (seg) {
1575         case VCPU_SREG_TR:
1576                 /*
1577                  * Work around a bug where the busy flag in the tr selector
1578                  * isn't exposed
1579                  */
1580                 var->type |= 0x2;
1581                 break;
1582         case VCPU_SREG_DS:
1583         case VCPU_SREG_ES:
1584         case VCPU_SREG_FS:
1585         case VCPU_SREG_GS:
1586                 /*
1587                  * The accessed bit must always be set in the segment
1588                  * descriptor cache, although it can be cleared in the
1589                  * descriptor, the cached bit always remains at 1. Since
1590                  * Intel has a check on this, set it here to support
1591                  * cross-vendor migration.
1592                  */
1593                 if (!var->unusable)
1594                         var->type |= 0x1;
1595                 break;
1596         case VCPU_SREG_SS:
1597                 /*
1598                  * On AMD CPUs sometimes the DB bit in the segment
1599                  * descriptor is left as 1, although the whole segment has
1600                  * been made unusable. Clear it here to pass an Intel VMX
1601                  * entry check when cross vendor migrating.
1602                  */
1603                 if (var->unusable)
1604                         var->db = 0;
1605                 /* This is symmetric with svm_set_segment() */
1606                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1607                 break;
1608         }
1609 }
1610
1611 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1612 {
1613         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1614
1615         return save->cpl;
1616 }
1617
1618 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1619 {
1620         struct vcpu_svm *svm = to_svm(vcpu);
1621
1622         dt->size = svm->vmcb->save.idtr.limit;
1623         dt->address = svm->vmcb->save.idtr.base;
1624 }
1625
1626 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1627 {
1628         struct vcpu_svm *svm = to_svm(vcpu);
1629
1630         svm->vmcb->save.idtr.limit = dt->size;
1631         svm->vmcb->save.idtr.base = dt->address ;
1632         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1633 }
1634
1635 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1636 {
1637         struct vcpu_svm *svm = to_svm(vcpu);
1638
1639         dt->size = svm->vmcb->save.gdtr.limit;
1640         dt->address = svm->vmcb->save.gdtr.base;
1641 }
1642
1643 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1644 {
1645         struct vcpu_svm *svm = to_svm(vcpu);
1646
1647         svm->vmcb->save.gdtr.limit = dt->size;
1648         svm->vmcb->save.gdtr.base = dt->address ;
1649         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1650 }
1651
1652 static void update_cr0_intercept(struct vcpu_svm *svm)
1653 {
1654         ulong gcr0 = svm->vcpu.arch.cr0;
1655         u64 *hcr0 = &svm->vmcb->save.cr0;
1656
1657         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1658                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1659
1660         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1661
1662         if (gcr0 == *hcr0) {
1663                 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1664                 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1665         } else {
1666                 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1667                 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1668         }
1669 }
1670
1671 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1672 {
1673         struct vcpu_svm *svm = to_svm(vcpu);
1674
1675 #ifdef CONFIG_X86_64
1676         if (vcpu->arch.efer & EFER_LME) {
1677                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1678                         vcpu->arch.efer |= EFER_LMA;
1679                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1680                 }
1681
1682                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1683                         vcpu->arch.efer &= ~EFER_LMA;
1684                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1685                 }
1686         }
1687 #endif
1688         vcpu->arch.cr0 = cr0;
1689
1690         if (!npt_enabled)
1691                 cr0 |= X86_CR0_PG | X86_CR0_WP;
1692
1693         /*
1694          * re-enable caching here because the QEMU bios
1695          * does not do it - this results in some delay at
1696          * reboot
1697          */
1698         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1699                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1700         svm->vmcb->save.cr0 = cr0;
1701         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1702         update_cr0_intercept(svm);
1703 }
1704
1705 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1706 {
1707         return true;
1708 }
1709
1710 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1711 {
1712         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1713         unsigned long old_cr4 = vcpu->arch.cr4;
1714
1715         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1716                 svm_flush_tlb(vcpu);
1717
1718         vcpu->arch.cr4 = cr4;
1719         if (!npt_enabled)
1720                 cr4 |= X86_CR4_PAE;
1721         cr4 |= host_cr4_mce;
1722         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1723         vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1724
1725         if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1726                 kvm_update_cpuid_runtime(vcpu);
1727 }
1728
1729 static void svm_set_segment(struct kvm_vcpu *vcpu,
1730                             struct kvm_segment *var, int seg)
1731 {
1732         struct vcpu_svm *svm = to_svm(vcpu);
1733         struct vmcb_seg *s = svm_seg(vcpu, seg);
1734
1735         s->base = var->base;
1736         s->limit = var->limit;
1737         s->selector = var->selector;
1738         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1739         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1740         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1741         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1742         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1743         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1744         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1745         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1746
1747         /*
1748          * This is always accurate, except if SYSRET returned to a segment
1749          * with SS.DPL != 3.  Intel does not have this quirk, and always
1750          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1751          * would entail passing the CPL to userspace and back.
1752          */
1753         if (seg == VCPU_SREG_SS)
1754                 /* This is symmetric with svm_get_segment() */
1755                 svm->vmcb->save.cpl = (var->dpl & 3);
1756
1757         vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1758 }
1759
1760 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1761 {
1762         struct vcpu_svm *svm = to_svm(vcpu);
1763
1764         clr_exception_intercept(svm, BP_VECTOR);
1765
1766         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1767                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1768                         set_exception_intercept(svm, BP_VECTOR);
1769         }
1770 }
1771
1772 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1773 {
1774         if (sd->next_asid > sd->max_asid) {
1775                 ++sd->asid_generation;
1776                 sd->next_asid = sd->min_asid;
1777                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1778                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1779         }
1780
1781         svm->asid_generation = sd->asid_generation;
1782         svm->asid = sd->next_asid++;
1783 }
1784
1785 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
1786 {
1787         struct vmcb *vmcb = svm->vmcb;
1788
1789         if (unlikely(value != vmcb->save.dr6)) {
1790                 vmcb->save.dr6 = value;
1791                 vmcb_mark_dirty(vmcb, VMCB_DR);
1792         }
1793 }
1794
1795 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1796 {
1797         struct vcpu_svm *svm = to_svm(vcpu);
1798
1799         get_debugreg(vcpu->arch.db[0], 0);
1800         get_debugreg(vcpu->arch.db[1], 1);
1801         get_debugreg(vcpu->arch.db[2], 2);
1802         get_debugreg(vcpu->arch.db[3], 3);
1803         /*
1804          * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
1805          * because db_interception might need it.  We can do it before vmentry.
1806          */
1807         vcpu->arch.dr6 = svm->vmcb->save.dr6;
1808         vcpu->arch.dr7 = svm->vmcb->save.dr7;
1809         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1810         set_dr_intercepts(svm);
1811 }
1812
1813 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1814 {
1815         struct vcpu_svm *svm = to_svm(vcpu);
1816
1817         svm->vmcb->save.dr7 = value;
1818         vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1819 }
1820
1821 static int pf_interception(struct vcpu_svm *svm)
1822 {
1823         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1824         u64 error_code = svm->vmcb->control.exit_info_1;
1825
1826         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
1827                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1828                         svm->vmcb->control.insn_bytes : NULL,
1829                         svm->vmcb->control.insn_len);
1830 }
1831
1832 static int npf_interception(struct vcpu_svm *svm)
1833 {
1834         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1835         u64 error_code = svm->vmcb->control.exit_info_1;
1836
1837         trace_kvm_page_fault(fault_address, error_code);
1838         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1839                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1840                         svm->vmcb->control.insn_bytes : NULL,
1841                         svm->vmcb->control.insn_len);
1842 }
1843
1844 static int db_interception(struct vcpu_svm *svm)
1845 {
1846         struct kvm_run *kvm_run = svm->vcpu.run;
1847         struct kvm_vcpu *vcpu = &svm->vcpu;
1848
1849         if (!(svm->vcpu.guest_debug &
1850               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1851                 !svm->nmi_singlestep) {
1852                 u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
1853                 kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
1854                 return 1;
1855         }
1856
1857         if (svm->nmi_singlestep) {
1858                 disable_nmi_singlestep(svm);
1859                 /* Make sure we check for pending NMIs upon entry */
1860                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1861         }
1862
1863         if (svm->vcpu.guest_debug &
1864             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1865                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1866                 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
1867                 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
1868                 kvm_run->debug.arch.pc =
1869                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1870                 kvm_run->debug.arch.exception = DB_VECTOR;
1871                 return 0;
1872         }
1873
1874         return 1;
1875 }
1876
1877 static int bp_interception(struct vcpu_svm *svm)
1878 {
1879         struct kvm_run *kvm_run = svm->vcpu.run;
1880
1881         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1882         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1883         kvm_run->debug.arch.exception = BP_VECTOR;
1884         return 0;
1885 }
1886
1887 static int ud_interception(struct vcpu_svm *svm)
1888 {
1889         return handle_ud(&svm->vcpu);
1890 }
1891
1892 static int ac_interception(struct vcpu_svm *svm)
1893 {
1894         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
1895         return 1;
1896 }
1897
1898 static int gp_interception(struct vcpu_svm *svm)
1899 {
1900         struct kvm_vcpu *vcpu = &svm->vcpu;
1901         u32 error_code = svm->vmcb->control.exit_info_1;
1902
1903         WARN_ON_ONCE(!enable_vmware_backdoor);
1904
1905         /*
1906          * VMware backdoor emulation on #GP interception only handles IN{S},
1907          * OUT{S}, and RDPMC, none of which generate a non-zero error code.
1908          */
1909         if (error_code) {
1910                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1911                 return 1;
1912         }
1913         return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
1914 }
1915
1916 static bool is_erratum_383(void)
1917 {
1918         int err, i;
1919         u64 value;
1920
1921         if (!erratum_383_found)
1922                 return false;
1923
1924         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1925         if (err)
1926                 return false;
1927
1928         /* Bit 62 may or may not be set for this mce */
1929         value &= ~(1ULL << 62);
1930
1931         if (value != 0xb600000000010015ULL)
1932                 return false;
1933
1934         /* Clear MCi_STATUS registers */
1935         for (i = 0; i < 6; ++i)
1936                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1937
1938         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1939         if (!err) {
1940                 u32 low, high;
1941
1942                 value &= ~(1ULL << 2);
1943                 low    = lower_32_bits(value);
1944                 high   = upper_32_bits(value);
1945
1946                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1947         }
1948
1949         /* Flush tlb to evict multi-match entries */
1950         __flush_tlb_all();
1951
1952         return true;
1953 }
1954
1955 static void svm_handle_mce(struct vcpu_svm *svm)
1956 {
1957         if (is_erratum_383()) {
1958                 /*
1959                  * Erratum 383 triggered. Guest state is corrupt so kill the
1960                  * guest.
1961                  */
1962                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1963
1964                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1965
1966                 return;
1967         }
1968
1969         /*
1970          * On an #MC intercept the MCE handler is not called automatically in
1971          * the host. So do it by hand here.
1972          */
1973         kvm_machine_check();
1974 }
1975
1976 static int mc_interception(struct vcpu_svm *svm)
1977 {
1978         return 1;
1979 }
1980
1981 static int shutdown_interception(struct vcpu_svm *svm)
1982 {
1983         struct kvm_run *kvm_run = svm->vcpu.run;
1984
1985         /*
1986          * VMCB is undefined after a SHUTDOWN intercept
1987          * so reinitialize it.
1988          */
1989         clear_page(svm->vmcb);
1990         init_vmcb(svm);
1991
1992         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1993         return 0;
1994 }
1995
1996 static int io_interception(struct vcpu_svm *svm)
1997 {
1998         struct kvm_vcpu *vcpu = &svm->vcpu;
1999         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2000         int size, in, string;
2001         unsigned port;
2002
2003         ++svm->vcpu.stat.io_exits;
2004         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2005         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2006         if (string)
2007                 return kvm_emulate_instruction(vcpu, 0);
2008
2009         port = io_info >> 16;
2010         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2011         svm->next_rip = svm->vmcb->control.exit_info_2;
2012
2013         return kvm_fast_pio(&svm->vcpu, size, port, in);
2014 }
2015
2016 static int nmi_interception(struct vcpu_svm *svm)
2017 {
2018         return 1;
2019 }
2020
2021 static int intr_interception(struct vcpu_svm *svm)
2022 {
2023         ++svm->vcpu.stat.irq_exits;
2024         return 1;
2025 }
2026
2027 static int nop_on_interception(struct vcpu_svm *svm)
2028 {
2029         return 1;
2030 }
2031
2032 static int halt_interception(struct vcpu_svm *svm)
2033 {
2034         return kvm_emulate_halt(&svm->vcpu);
2035 }
2036
2037 static int vmmcall_interception(struct vcpu_svm *svm)
2038 {
2039         return kvm_emulate_hypercall(&svm->vcpu);
2040 }
2041
2042 static int vmload_interception(struct vcpu_svm *svm)
2043 {
2044         struct vmcb *nested_vmcb;
2045         struct kvm_host_map map;
2046         int ret;
2047
2048         if (nested_svm_check_permissions(svm))
2049                 return 1;
2050
2051         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2052         if (ret) {
2053                 if (ret == -EINVAL)
2054                         kvm_inject_gp(&svm->vcpu, 0);
2055                 return 1;
2056         }
2057
2058         nested_vmcb = map.hva;
2059
2060         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2061
2062         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2063         kvm_vcpu_unmap(&svm->vcpu, &map, true);
2064
2065         return ret;
2066 }
2067
2068 static int vmsave_interception(struct vcpu_svm *svm)
2069 {
2070         struct vmcb *nested_vmcb;
2071         struct kvm_host_map map;
2072         int ret;
2073
2074         if (nested_svm_check_permissions(svm))
2075                 return 1;
2076
2077         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2078         if (ret) {
2079                 if (ret == -EINVAL)
2080                         kvm_inject_gp(&svm->vcpu, 0);
2081                 return 1;
2082         }
2083
2084         nested_vmcb = map.hva;
2085
2086         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2087
2088         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2089         kvm_vcpu_unmap(&svm->vcpu, &map, true);
2090
2091         return ret;
2092 }
2093
2094 static int vmrun_interception(struct vcpu_svm *svm)
2095 {
2096         if (nested_svm_check_permissions(svm))
2097                 return 1;
2098
2099         return nested_svm_vmrun(svm);
2100 }
2101
2102 void svm_set_gif(struct vcpu_svm *svm, bool value)
2103 {
2104         if (value) {
2105                 /*
2106                  * If VGIF is enabled, the STGI intercept is only added to
2107                  * detect the opening of the SMI/NMI window; remove it now.
2108                  * Likewise, clear the VINTR intercept, we will set it
2109                  * again while processing KVM_REQ_EVENT if needed.
2110                  */
2111                 if (vgif_enabled(svm))
2112                         svm_clr_intercept(svm, INTERCEPT_STGI);
2113                 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2114                         svm_clear_vintr(svm);
2115
2116                 enable_gif(svm);
2117                 if (svm->vcpu.arch.smi_pending ||
2118                     svm->vcpu.arch.nmi_pending ||
2119                     kvm_cpu_has_injectable_intr(&svm->vcpu))
2120                         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2121         } else {
2122                 disable_gif(svm);
2123
2124                 /*
2125                  * After a CLGI no interrupts should come.  But if vGIF is
2126                  * in use, we still rely on the VINTR intercept (rather than
2127                  * STGI) to detect an open interrupt window.
2128                 */
2129                 if (!vgif_enabled(svm))
2130                         svm_clear_vintr(svm);
2131         }
2132 }
2133
2134 static int stgi_interception(struct vcpu_svm *svm)
2135 {
2136         int ret;
2137
2138         if (nested_svm_check_permissions(svm))
2139                 return 1;
2140
2141         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2142         svm_set_gif(svm, true);
2143         return ret;
2144 }
2145
2146 static int clgi_interception(struct vcpu_svm *svm)
2147 {
2148         int ret;
2149
2150         if (nested_svm_check_permissions(svm))
2151                 return 1;
2152
2153         ret = kvm_skip_emulated_instruction(&svm->vcpu);
2154         svm_set_gif(svm, false);
2155         return ret;
2156 }
2157
2158 static int invlpga_interception(struct vcpu_svm *svm)
2159 {
2160         struct kvm_vcpu *vcpu = &svm->vcpu;
2161
2162         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
2163                           kvm_rax_read(&svm->vcpu));
2164
2165         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2166         kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
2167
2168         return kvm_skip_emulated_instruction(&svm->vcpu);
2169 }
2170
2171 static int skinit_interception(struct vcpu_svm *svm)
2172 {
2173         trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
2174
2175         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2176         return 1;
2177 }
2178
2179 static int wbinvd_interception(struct vcpu_svm *svm)
2180 {
2181         return kvm_emulate_wbinvd(&svm->vcpu);
2182 }
2183
2184 static int xsetbv_interception(struct vcpu_svm *svm)
2185 {
2186         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2187         u32 index = kvm_rcx_read(&svm->vcpu);
2188
2189         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2190                 return kvm_skip_emulated_instruction(&svm->vcpu);
2191         }
2192
2193         return 1;
2194 }
2195
2196 static int rdpru_interception(struct vcpu_svm *svm)
2197 {
2198         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2199         return 1;
2200 }
2201
2202 static int task_switch_interception(struct vcpu_svm *svm)
2203 {
2204         u16 tss_selector;
2205         int reason;
2206         int int_type = svm->vmcb->control.exit_int_info &
2207                 SVM_EXITINTINFO_TYPE_MASK;
2208         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2209         uint32_t type =
2210                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2211         uint32_t idt_v =
2212                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2213         bool has_error_code = false;
2214         u32 error_code = 0;
2215
2216         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2217
2218         if (svm->vmcb->control.exit_info_2 &
2219             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2220                 reason = TASK_SWITCH_IRET;
2221         else if (svm->vmcb->control.exit_info_2 &
2222                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2223                 reason = TASK_SWITCH_JMP;
2224         else if (idt_v)
2225                 reason = TASK_SWITCH_GATE;
2226         else
2227                 reason = TASK_SWITCH_CALL;
2228
2229         if (reason == TASK_SWITCH_GATE) {
2230                 switch (type) {
2231                 case SVM_EXITINTINFO_TYPE_NMI:
2232                         svm->vcpu.arch.nmi_injected = false;
2233                         break;
2234                 case SVM_EXITINTINFO_TYPE_EXEPT:
2235                         if (svm->vmcb->control.exit_info_2 &
2236                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2237                                 has_error_code = true;
2238                                 error_code =
2239                                         (u32)svm->vmcb->control.exit_info_2;
2240                         }
2241                         kvm_clear_exception_queue(&svm->vcpu);
2242                         break;
2243                 case SVM_EXITINTINFO_TYPE_INTR:
2244                         kvm_clear_interrupt_queue(&svm->vcpu);
2245                         break;
2246                 default:
2247                         break;
2248                 }
2249         }
2250
2251         if (reason != TASK_SWITCH_GATE ||
2252             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2253             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2254              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2255                 if (!skip_emulated_instruction(&svm->vcpu))
2256                         return 0;
2257         }
2258
2259         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2260                 int_vec = -1;
2261
2262         return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2263                                has_error_code, error_code);
2264 }
2265
2266 static int cpuid_interception(struct vcpu_svm *svm)
2267 {
2268         return kvm_emulate_cpuid(&svm->vcpu);
2269 }
2270
2271 static int iret_interception(struct vcpu_svm *svm)
2272 {
2273         ++svm->vcpu.stat.nmi_window_exits;
2274         svm_clr_intercept(svm, INTERCEPT_IRET);
2275         svm->vcpu.arch.hflags |= HF_IRET_MASK;
2276         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2277         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2278         return 1;
2279 }
2280
2281 static int invd_interception(struct vcpu_svm *svm)
2282 {
2283         /* Treat an INVD instruction as a NOP and just skip it. */
2284         return kvm_skip_emulated_instruction(&svm->vcpu);
2285 }
2286
2287 static int invlpg_interception(struct vcpu_svm *svm)
2288 {
2289         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2290                 return kvm_emulate_instruction(&svm->vcpu, 0);
2291
2292         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2293         return kvm_skip_emulated_instruction(&svm->vcpu);
2294 }
2295
2296 static int emulate_on_interception(struct vcpu_svm *svm)
2297 {
2298         return kvm_emulate_instruction(&svm->vcpu, 0);
2299 }
2300
2301 static int rsm_interception(struct vcpu_svm *svm)
2302 {
2303         return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
2304 }
2305
2306 static int rdpmc_interception(struct vcpu_svm *svm)
2307 {
2308         int err;
2309
2310         if (!nrips)
2311                 return emulate_on_interception(svm);
2312
2313         err = kvm_rdpmc(&svm->vcpu);
2314         return kvm_complete_insn_gp(&svm->vcpu, err);
2315 }
2316
2317 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2318                                             unsigned long val)
2319 {
2320         unsigned long cr0 = svm->vcpu.arch.cr0;
2321         bool ret = false;
2322
2323         if (!is_guest_mode(&svm->vcpu) ||
2324             (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2325                 return false;
2326
2327         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2328         val &= ~SVM_CR0_SELECTIVE_MASK;
2329
2330         if (cr0 ^ val) {
2331                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2332                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2333         }
2334
2335         return ret;
2336 }
2337
2338 #define CR_VALID (1ULL << 63)
2339
2340 static int cr_interception(struct vcpu_svm *svm)
2341 {
2342         int reg, cr;
2343         unsigned long val;
2344         int err;
2345
2346         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2347                 return emulate_on_interception(svm);
2348
2349         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2350                 return emulate_on_interception(svm);
2351
2352         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2353         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2354                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2355         else
2356                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2357
2358         err = 0;
2359         if (cr >= 16) { /* mov to cr */
2360                 cr -= 16;
2361                 val = kvm_register_read(&svm->vcpu, reg);
2362                 trace_kvm_cr_write(cr, val);
2363                 switch (cr) {
2364                 case 0:
2365                         if (!check_selective_cr0_intercepted(svm, val))
2366                                 err = kvm_set_cr0(&svm->vcpu, val);
2367                         else
2368                                 return 1;
2369
2370                         break;
2371                 case 3:
2372                         err = kvm_set_cr3(&svm->vcpu, val);
2373                         break;
2374                 case 4:
2375                         err = kvm_set_cr4(&svm->vcpu, val);
2376                         break;
2377                 case 8:
2378                         err = kvm_set_cr8(&svm->vcpu, val);
2379                         break;
2380                 default:
2381                         WARN(1, "unhandled write to CR%d", cr);
2382                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2383                         return 1;
2384                 }
2385         } else { /* mov from cr */
2386                 switch (cr) {
2387                 case 0:
2388                         val = kvm_read_cr0(&svm->vcpu);
2389                         break;
2390                 case 2:
2391                         val = svm->vcpu.arch.cr2;
2392                         break;
2393                 case 3:
2394                         val = kvm_read_cr3(&svm->vcpu);
2395                         break;
2396                 case 4:
2397                         val = kvm_read_cr4(&svm->vcpu);
2398                         break;
2399                 case 8:
2400                         val = kvm_get_cr8(&svm->vcpu);
2401                         break;
2402                 default:
2403                         WARN(1, "unhandled read from CR%d", cr);
2404                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2405                         return 1;
2406                 }
2407                 kvm_register_write(&svm->vcpu, reg, val);
2408                 trace_kvm_cr_read(cr, val);
2409         }
2410         return kvm_complete_insn_gp(&svm->vcpu, err);
2411 }
2412
2413 static int dr_interception(struct vcpu_svm *svm)
2414 {
2415         int reg, dr;
2416         unsigned long val;
2417
2418         if (svm->vcpu.guest_debug == 0) {
2419                 /*
2420                  * No more DR vmexits; force a reload of the debug registers
2421                  * and reenter on this instruction.  The next vmexit will
2422                  * retrieve the full state of the debug registers.
2423                  */
2424                 clr_dr_intercepts(svm);
2425                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2426                 return 1;
2427         }
2428
2429         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2430                 return emulate_on_interception(svm);
2431
2432         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2433         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2434
2435         if (dr >= 16) { /* mov to DRn */
2436                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
2437                         return 1;
2438                 val = kvm_register_read(&svm->vcpu, reg);
2439                 kvm_set_dr(&svm->vcpu, dr - 16, val);
2440         } else {
2441                 if (!kvm_require_dr(&svm->vcpu, dr))
2442                         return 1;
2443                 kvm_get_dr(&svm->vcpu, dr, &val);
2444                 kvm_register_write(&svm->vcpu, reg, val);
2445         }
2446
2447         return kvm_skip_emulated_instruction(&svm->vcpu);
2448 }
2449
2450 static int cr8_write_interception(struct vcpu_svm *svm)
2451 {
2452         struct kvm_run *kvm_run = svm->vcpu.run;
2453         int r;
2454
2455         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2456         /* instruction emulation calls kvm_set_cr8() */
2457         r = cr_interception(svm);
2458         if (lapic_in_kernel(&svm->vcpu))
2459                 return r;
2460         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2461                 return r;
2462         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2463         return 0;
2464 }
2465
2466 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2467 {
2468         msr->data = 0;
2469
2470         switch (msr->index) {
2471         case MSR_F10H_DECFG:
2472                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2473                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2474                 break;
2475         case MSR_IA32_PERF_CAPABILITIES:
2476                 return 0;
2477         default:
2478                 return KVM_MSR_RET_INVALID;
2479         }
2480
2481         return 0;
2482 }
2483
2484 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2485 {
2486         struct vcpu_svm *svm = to_svm(vcpu);
2487
2488         switch (msr_info->index) {
2489         case MSR_STAR:
2490                 msr_info->data = svm->vmcb->save.star;
2491                 break;
2492 #ifdef CONFIG_X86_64
2493         case MSR_LSTAR:
2494                 msr_info->data = svm->vmcb->save.lstar;
2495                 break;
2496         case MSR_CSTAR:
2497                 msr_info->data = svm->vmcb->save.cstar;
2498                 break;
2499         case MSR_KERNEL_GS_BASE:
2500                 msr_info->data = svm->vmcb->save.kernel_gs_base;
2501                 break;
2502         case MSR_SYSCALL_MASK:
2503                 msr_info->data = svm->vmcb->save.sfmask;
2504                 break;
2505 #endif
2506         case MSR_IA32_SYSENTER_CS:
2507                 msr_info->data = svm->vmcb->save.sysenter_cs;
2508                 break;
2509         case MSR_IA32_SYSENTER_EIP:
2510                 msr_info->data = svm->sysenter_eip;
2511                 break;
2512         case MSR_IA32_SYSENTER_ESP:
2513                 msr_info->data = svm->sysenter_esp;
2514                 break;
2515         case MSR_TSC_AUX:
2516                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2517                         return 1;
2518                 msr_info->data = svm->tsc_aux;
2519                 break;
2520         /*
2521          * Nobody will change the following 5 values in the VMCB so we can
2522          * safely return them on rdmsr. They will always be 0 until LBRV is
2523          * implemented.
2524          */
2525         case MSR_IA32_DEBUGCTLMSR:
2526                 msr_info->data = svm->vmcb->save.dbgctl;
2527                 break;
2528         case MSR_IA32_LASTBRANCHFROMIP:
2529                 msr_info->data = svm->vmcb->save.br_from;
2530                 break;
2531         case MSR_IA32_LASTBRANCHTOIP:
2532                 msr_info->data = svm->vmcb->save.br_to;
2533                 break;
2534         case MSR_IA32_LASTINTFROMIP:
2535                 msr_info->data = svm->vmcb->save.last_excp_from;
2536                 break;
2537         case MSR_IA32_LASTINTTOIP:
2538                 msr_info->data = svm->vmcb->save.last_excp_to;
2539                 break;
2540         case MSR_VM_HSAVE_PA:
2541                 msr_info->data = svm->nested.hsave_msr;
2542                 break;
2543         case MSR_VM_CR:
2544                 msr_info->data = svm->nested.vm_cr_msr;
2545                 break;
2546         case MSR_IA32_SPEC_CTRL:
2547                 if (!msr_info->host_initiated &&
2548                     !guest_has_spec_ctrl_msr(vcpu))
2549                         return 1;
2550
2551                 msr_info->data = svm->spec_ctrl;
2552                 break;
2553         case MSR_AMD64_VIRT_SPEC_CTRL:
2554                 if (!msr_info->host_initiated &&
2555                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2556                         return 1;
2557
2558                 msr_info->data = svm->virt_spec_ctrl;
2559                 break;
2560         case MSR_F15H_IC_CFG: {
2561
2562                 int family, model;
2563
2564                 family = guest_cpuid_family(vcpu);
2565                 model  = guest_cpuid_model(vcpu);
2566
2567                 if (family < 0 || model < 0)
2568                         return kvm_get_msr_common(vcpu, msr_info);
2569
2570                 msr_info->data = 0;
2571
2572                 if (family == 0x15 &&
2573                     (model >= 0x2 && model < 0x20))
2574                         msr_info->data = 0x1E;
2575                 }
2576                 break;
2577         case MSR_F10H_DECFG:
2578                 msr_info->data = svm->msr_decfg;
2579                 break;
2580         default:
2581                 return kvm_get_msr_common(vcpu, msr_info);
2582         }
2583         return 0;
2584 }
2585
2586 static int rdmsr_interception(struct vcpu_svm *svm)
2587 {
2588         return kvm_emulate_rdmsr(&svm->vcpu);
2589 }
2590
2591 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2592 {
2593         struct vcpu_svm *svm = to_svm(vcpu);
2594         int svm_dis, chg_mask;
2595
2596         if (data & ~SVM_VM_CR_VALID_MASK)
2597                 return 1;
2598
2599         chg_mask = SVM_VM_CR_VALID_MASK;
2600
2601         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2602                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2603
2604         svm->nested.vm_cr_msr &= ~chg_mask;
2605         svm->nested.vm_cr_msr |= (data & chg_mask);
2606
2607         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2608
2609         /* check for svm_disable while efer.svme is set */
2610         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2611                 return 1;
2612
2613         return 0;
2614 }
2615
2616 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2617 {
2618         struct vcpu_svm *svm = to_svm(vcpu);
2619
2620         u32 ecx = msr->index;
2621         u64 data = msr->data;
2622         switch (ecx) {
2623         case MSR_IA32_CR_PAT:
2624                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2625                         return 1;
2626                 vcpu->arch.pat = data;
2627                 svm->vmcb->save.g_pat = data;
2628                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2629                 break;
2630         case MSR_IA32_SPEC_CTRL:
2631                 if (!msr->host_initiated &&
2632                     !guest_has_spec_ctrl_msr(vcpu))
2633                         return 1;
2634
2635                 if (kvm_spec_ctrl_test_value(data))
2636                         return 1;
2637
2638                 svm->spec_ctrl = data;
2639                 if (!data)
2640                         break;
2641
2642                 /*
2643                  * For non-nested:
2644                  * When it's written (to non-zero) for the first time, pass
2645                  * it through.
2646                  *
2647                  * For nested:
2648                  * The handling of the MSR bitmap for L2 guests is done in
2649                  * nested_svm_vmrun_msrpm.
2650                  * We update the L1 MSR bit as well since it will end up
2651                  * touching the MSR anyway now.
2652                  */
2653                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2654                 break;
2655         case MSR_IA32_PRED_CMD:
2656                 if (!msr->host_initiated &&
2657                     !guest_has_pred_cmd_msr(vcpu))
2658                         return 1;
2659
2660                 if (data & ~PRED_CMD_IBPB)
2661                         return 1;
2662                 if (!boot_cpu_has(X86_FEATURE_IBPB))
2663                         return 1;
2664                 if (!data)
2665                         break;
2666
2667                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
2668                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2669                 break;
2670         case MSR_AMD64_VIRT_SPEC_CTRL:
2671                 if (!msr->host_initiated &&
2672                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2673                         return 1;
2674
2675                 if (data & ~SPEC_CTRL_SSBD)
2676                         return 1;
2677
2678                 svm->virt_spec_ctrl = data;
2679                 break;
2680         case MSR_STAR:
2681                 svm->vmcb->save.star = data;
2682                 break;
2683 #ifdef CONFIG_X86_64
2684         case MSR_LSTAR:
2685                 svm->vmcb->save.lstar = data;
2686                 break;
2687         case MSR_CSTAR:
2688                 svm->vmcb->save.cstar = data;
2689                 break;
2690         case MSR_KERNEL_GS_BASE:
2691                 svm->vmcb->save.kernel_gs_base = data;
2692                 break;
2693         case MSR_SYSCALL_MASK:
2694                 svm->vmcb->save.sfmask = data;
2695                 break;
2696 #endif
2697         case MSR_IA32_SYSENTER_CS:
2698                 svm->vmcb->save.sysenter_cs = data;
2699                 break;
2700         case MSR_IA32_SYSENTER_EIP:
2701                 svm->sysenter_eip = data;
2702                 svm->vmcb->save.sysenter_eip = data;
2703                 break;
2704         case MSR_IA32_SYSENTER_ESP:
2705                 svm->sysenter_esp = data;
2706                 svm->vmcb->save.sysenter_esp = data;
2707                 break;
2708         case MSR_TSC_AUX:
2709                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2710                         return 1;
2711
2712                 /*
2713                  * This is rare, so we update the MSR here instead of using
2714                  * direct_access_msrs.  Doing that would require a rdmsr in
2715                  * svm_vcpu_put.
2716                  */
2717                 svm->tsc_aux = data;
2718                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2719                 break;
2720         case MSR_IA32_DEBUGCTLMSR:
2721                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
2722                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2723                                     __func__, data);
2724                         break;
2725                 }
2726                 if (data & DEBUGCTL_RESERVED_BITS)
2727                         return 1;
2728
2729                 svm->vmcb->save.dbgctl = data;
2730                 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
2731                 if (data & (1ULL<<0))
2732                         svm_enable_lbrv(vcpu);
2733                 else
2734                         svm_disable_lbrv(vcpu);
2735                 break;
2736         case MSR_VM_HSAVE_PA:
2737                 svm->nested.hsave_msr = data;
2738                 break;
2739         case MSR_VM_CR:
2740                 return svm_set_vm_cr(vcpu, data);
2741         case MSR_VM_IGNNE:
2742                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2743                 break;
2744         case MSR_F10H_DECFG: {
2745                 struct kvm_msr_entry msr_entry;
2746
2747                 msr_entry.index = msr->index;
2748                 if (svm_get_msr_feature(&msr_entry))
2749                         return 1;
2750
2751                 /* Check the supported bits */
2752                 if (data & ~msr_entry.data)
2753                         return 1;
2754
2755                 /* Don't allow the guest to change a bit, #GP */
2756                 if (!msr->host_initiated && (data ^ msr_entry.data))
2757                         return 1;
2758
2759                 svm->msr_decfg = data;
2760                 break;
2761         }
2762         case MSR_IA32_APICBASE:
2763                 if (kvm_vcpu_apicv_active(vcpu))
2764                         avic_update_vapic_bar(to_svm(vcpu), data);
2765                 fallthrough;
2766         default:
2767                 return kvm_set_msr_common(vcpu, msr);
2768         }
2769         return 0;
2770 }
2771
2772 static int wrmsr_interception(struct vcpu_svm *svm)
2773 {
2774         return kvm_emulate_wrmsr(&svm->vcpu);
2775 }
2776
2777 static int msr_interception(struct vcpu_svm *svm)
2778 {
2779         if (svm->vmcb->control.exit_info_1)
2780                 return wrmsr_interception(svm);
2781         else
2782                 return rdmsr_interception(svm);
2783 }
2784
2785 static int interrupt_window_interception(struct vcpu_svm *svm)
2786 {
2787         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2788         svm_clear_vintr(svm);
2789
2790         /*
2791          * For AVIC, the only reason to end up here is ExtINTs.
2792          * In this case AVIC was temporarily disabled for
2793          * requesting the IRQ window and we have to re-enable it.
2794          */
2795         svm_toggle_avic_for_irq_window(&svm->vcpu, true);
2796
2797         ++svm->vcpu.stat.irq_window_exits;
2798         return 1;
2799 }
2800
2801 static int pause_interception(struct vcpu_svm *svm)
2802 {
2803         struct kvm_vcpu *vcpu = &svm->vcpu;
2804         bool in_kernel = (svm_get_cpl(vcpu) == 0);
2805
2806         if (!kvm_pause_in_guest(vcpu->kvm))
2807                 grow_ple_window(vcpu);
2808
2809         kvm_vcpu_on_spin(vcpu, in_kernel);
2810         return 1;
2811 }
2812
2813 static int nop_interception(struct vcpu_svm *svm)
2814 {
2815         return kvm_skip_emulated_instruction(&(svm->vcpu));
2816 }
2817
2818 static int monitor_interception(struct vcpu_svm *svm)
2819 {
2820         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
2821         return nop_interception(svm);
2822 }
2823
2824 static int mwait_interception(struct vcpu_svm *svm)
2825 {
2826         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
2827         return nop_interception(svm);
2828 }
2829
2830 static int invpcid_interception(struct vcpu_svm *svm)
2831 {
2832         struct kvm_vcpu *vcpu = &svm->vcpu;
2833         unsigned long type;
2834         gva_t gva;
2835
2836         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
2837                 kvm_queue_exception(vcpu, UD_VECTOR);
2838                 return 1;
2839         }
2840
2841         /*
2842          * For an INVPCID intercept:
2843          * EXITINFO1 provides the linear address of the memory operand.
2844          * EXITINFO2 provides the contents of the register operand.
2845          */
2846         type = svm->vmcb->control.exit_info_2;
2847         gva = svm->vmcb->control.exit_info_1;
2848
2849         if (type > 3) {
2850                 kvm_inject_gp(vcpu, 0);
2851                 return 1;
2852         }
2853
2854         return kvm_handle_invpcid(vcpu, type, gva);
2855 }
2856
2857 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
2858         [SVM_EXIT_READ_CR0]                     = cr_interception,
2859         [SVM_EXIT_READ_CR3]                     = cr_interception,
2860         [SVM_EXIT_READ_CR4]                     = cr_interception,
2861         [SVM_EXIT_READ_CR8]                     = cr_interception,
2862         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
2863         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
2864         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
2865         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
2866         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
2867         [SVM_EXIT_READ_DR0]                     = dr_interception,
2868         [SVM_EXIT_READ_DR1]                     = dr_interception,
2869         [SVM_EXIT_READ_DR2]                     = dr_interception,
2870         [SVM_EXIT_READ_DR3]                     = dr_interception,
2871         [SVM_EXIT_READ_DR4]                     = dr_interception,
2872         [SVM_EXIT_READ_DR5]                     = dr_interception,
2873         [SVM_EXIT_READ_DR6]                     = dr_interception,
2874         [SVM_EXIT_READ_DR7]                     = dr_interception,
2875         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
2876         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
2877         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
2878         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
2879         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
2880         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
2881         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
2882         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
2883         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
2884         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
2885         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
2886         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
2887         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
2888         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
2889         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
2890         [SVM_EXIT_INTR]                         = intr_interception,
2891         [SVM_EXIT_NMI]                          = nmi_interception,
2892         [SVM_EXIT_SMI]                          = nop_on_interception,
2893         [SVM_EXIT_INIT]                         = nop_on_interception,
2894         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
2895         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
2896         [SVM_EXIT_CPUID]                        = cpuid_interception,
2897         [SVM_EXIT_IRET]                         = iret_interception,
2898         [SVM_EXIT_INVD]                         = invd_interception,
2899         [SVM_EXIT_PAUSE]                        = pause_interception,
2900         [SVM_EXIT_HLT]                          = halt_interception,
2901         [SVM_EXIT_INVLPG]                       = invlpg_interception,
2902         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
2903         [SVM_EXIT_IOIO]                         = io_interception,
2904         [SVM_EXIT_MSR]                          = msr_interception,
2905         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
2906         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
2907         [SVM_EXIT_VMRUN]                        = vmrun_interception,
2908         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
2909         [SVM_EXIT_VMLOAD]                       = vmload_interception,
2910         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
2911         [SVM_EXIT_STGI]                         = stgi_interception,
2912         [SVM_EXIT_CLGI]                         = clgi_interception,
2913         [SVM_EXIT_SKINIT]                       = skinit_interception,
2914         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
2915         [SVM_EXIT_MONITOR]                      = monitor_interception,
2916         [SVM_EXIT_MWAIT]                        = mwait_interception,
2917         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
2918         [SVM_EXIT_RDPRU]                        = rdpru_interception,
2919         [SVM_EXIT_INVPCID]                      = invpcid_interception,
2920         [SVM_EXIT_NPF]                          = npf_interception,
2921         [SVM_EXIT_RSM]                          = rsm_interception,
2922         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
2923         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
2924 };
2925
2926 static void dump_vmcb(struct kvm_vcpu *vcpu)
2927 {
2928         struct vcpu_svm *svm = to_svm(vcpu);
2929         struct vmcb_control_area *control = &svm->vmcb->control;
2930         struct vmcb_save_area *save = &svm->vmcb->save;
2931
2932         if (!dump_invalid_vmcb) {
2933                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2934                 return;
2935         }
2936
2937         pr_err("VMCB Control Area:\n");
2938         pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
2939         pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
2940         pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
2941         pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
2942         pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
2943         pr_err("%-20s%08x %08x\n", "intercepts:",
2944               control->intercepts[INTERCEPT_WORD3],
2945                control->intercepts[INTERCEPT_WORD4]);
2946         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
2947         pr_err("%-20s%d\n", "pause filter threshold:",
2948                control->pause_filter_thresh);
2949         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
2950         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
2951         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
2952         pr_err("%-20s%d\n", "asid:", control->asid);
2953         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
2954         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
2955         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
2956         pr_err("%-20s%08x\n", "int_state:", control->int_state);
2957         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
2958         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
2959         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
2960         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
2961         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
2962         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
2963         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
2964         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
2965         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
2966         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
2967         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
2968         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
2969         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
2970         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
2971         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
2972         pr_err("VMCB State Save Area:\n");
2973         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2974                "es:",
2975                save->es.selector, save->es.attrib,
2976                save->es.limit, save->es.base);
2977         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2978                "cs:",
2979                save->cs.selector, save->cs.attrib,
2980                save->cs.limit, save->cs.base);
2981         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2982                "ss:",
2983                save->ss.selector, save->ss.attrib,
2984                save->ss.limit, save->ss.base);
2985         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2986                "ds:",
2987                save->ds.selector, save->ds.attrib,
2988                save->ds.limit, save->ds.base);
2989         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2990                "fs:",
2991                save->fs.selector, save->fs.attrib,
2992                save->fs.limit, save->fs.base);
2993         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2994                "gs:",
2995                save->gs.selector, save->gs.attrib,
2996                save->gs.limit, save->gs.base);
2997         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2998                "gdtr:",
2999                save->gdtr.selector, save->gdtr.attrib,
3000                save->gdtr.limit, save->gdtr.base);
3001         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3002                "ldtr:",
3003                save->ldtr.selector, save->ldtr.attrib,
3004                save->ldtr.limit, save->ldtr.base);
3005         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3006                "idtr:",
3007                save->idtr.selector, save->idtr.attrib,
3008                save->idtr.limit, save->idtr.base);
3009         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3010                "tr:",
3011                save->tr.selector, save->tr.attrib,
3012                save->tr.limit, save->tr.base);
3013         pr_err("cpl:            %d                efer:         %016llx\n",
3014                 save->cpl, save->efer);
3015         pr_err("%-15s %016llx %-13s %016llx\n",
3016                "cr0:", save->cr0, "cr2:", save->cr2);
3017         pr_err("%-15s %016llx %-13s %016llx\n",
3018                "cr3:", save->cr3, "cr4:", save->cr4);
3019         pr_err("%-15s %016llx %-13s %016llx\n",
3020                "dr6:", save->dr6, "dr7:", save->dr7);
3021         pr_err("%-15s %016llx %-13s %016llx\n",
3022                "rip:", save->rip, "rflags:", save->rflags);
3023         pr_err("%-15s %016llx %-13s %016llx\n",
3024                "rsp:", save->rsp, "rax:", save->rax);
3025         pr_err("%-15s %016llx %-13s %016llx\n",
3026                "star:", save->star, "lstar:", save->lstar);
3027         pr_err("%-15s %016llx %-13s %016llx\n",
3028                "cstar:", save->cstar, "sfmask:", save->sfmask);
3029         pr_err("%-15s %016llx %-13s %016llx\n",
3030                "kernel_gs_base:", save->kernel_gs_base,
3031                "sysenter_cs:", save->sysenter_cs);
3032         pr_err("%-15s %016llx %-13s %016llx\n",
3033                "sysenter_esp:", save->sysenter_esp,
3034                "sysenter_eip:", save->sysenter_eip);
3035         pr_err("%-15s %016llx %-13s %016llx\n",
3036                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3037         pr_err("%-15s %016llx %-13s %016llx\n",
3038                "br_from:", save->br_from, "br_to:", save->br_to);
3039         pr_err("%-15s %016llx %-13s %016llx\n",
3040                "excp_from:", save->last_excp_from,
3041                "excp_to:", save->last_excp_to);
3042 }
3043
3044 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
3045                               u32 *intr_info, u32 *error_code)
3046 {
3047         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3048
3049         *info1 = control->exit_info_1;
3050         *info2 = control->exit_info_2;
3051         *intr_info = control->exit_int_info;
3052         if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3053             (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3054                 *error_code = control->exit_int_info_err;
3055         else
3056                 *error_code = 0;
3057 }
3058
3059 static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3060 {
3061         struct vcpu_svm *svm = to_svm(vcpu);
3062         struct kvm_run *kvm_run = vcpu->run;
3063         u32 exit_code = svm->vmcb->control.exit_code;
3064
3065         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3066
3067         if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3068                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3069         if (npt_enabled)
3070                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3071
3072         if (is_guest_mode(vcpu)) {
3073                 int vmexit;
3074
3075                 trace_kvm_nested_vmexit(exit_code, vcpu, KVM_ISA_SVM);
3076
3077                 vmexit = nested_svm_exit_special(svm);
3078
3079                 if (vmexit == NESTED_EXIT_CONTINUE)
3080                         vmexit = nested_svm_exit_handled(svm);
3081
3082                 if (vmexit == NESTED_EXIT_DONE)
3083                         return 1;
3084         }
3085
3086         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3087                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3088                 kvm_run->fail_entry.hardware_entry_failure_reason
3089                         = svm->vmcb->control.exit_code;
3090                 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3091                 dump_vmcb(vcpu);
3092                 return 0;
3093         }
3094
3095         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3096             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3097             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3098             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3099                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3100                        "exit_code 0x%x\n",
3101                        __func__, svm->vmcb->control.exit_int_info,
3102                        exit_code);
3103
3104         if (exit_fastpath != EXIT_FASTPATH_NONE)
3105                 return 1;
3106
3107         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3108             || !svm_exit_handlers[exit_code]) {
3109                 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
3110                 dump_vmcb(vcpu);
3111                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3112                 vcpu->run->internal.suberror =
3113                         KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3114                 vcpu->run->internal.ndata = 2;
3115                 vcpu->run->internal.data[0] = exit_code;
3116                 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3117                 return 0;
3118         }
3119
3120 #ifdef CONFIG_RETPOLINE
3121         if (exit_code == SVM_EXIT_MSR)
3122                 return msr_interception(svm);
3123         else if (exit_code == SVM_EXIT_VINTR)
3124                 return interrupt_window_interception(svm);
3125         else if (exit_code == SVM_EXIT_INTR)
3126                 return intr_interception(svm);
3127         else if (exit_code == SVM_EXIT_HLT)
3128                 return halt_interception(svm);
3129         else if (exit_code == SVM_EXIT_NPF)
3130                 return npf_interception(svm);
3131 #endif
3132         return svm_exit_handlers[exit_code](svm);
3133 }
3134
3135 static void reload_tss(struct kvm_vcpu *vcpu)
3136 {
3137         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3138
3139         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3140         load_TR_desc();
3141 }
3142
3143 static void pre_svm_run(struct vcpu_svm *svm)
3144 {
3145         struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu);
3146
3147         if (sev_guest(svm->vcpu.kvm))
3148                 return pre_sev_run(svm, svm->vcpu.cpu);
3149
3150         /* FIXME: handle wraparound of asid_generation */
3151         if (svm->asid_generation != sd->asid_generation)
3152                 new_asid(svm, sd);
3153 }
3154
3155 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3156 {
3157         struct vcpu_svm *svm = to_svm(vcpu);
3158
3159         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3160         vcpu->arch.hflags |= HF_NMI_MASK;
3161         svm_set_intercept(svm, INTERCEPT_IRET);
3162         ++vcpu->stat.nmi_injections;
3163 }
3164
3165 static void svm_set_irq(struct kvm_vcpu *vcpu)
3166 {
3167         struct vcpu_svm *svm = to_svm(vcpu);
3168
3169         BUG_ON(!(gif_set(svm)));
3170
3171         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3172         ++vcpu->stat.irq_injections;
3173
3174         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3175                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3176 }
3177
3178 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3179 {
3180         struct vcpu_svm *svm = to_svm(vcpu);
3181
3182         if (nested_svm_virtualize_tpr(vcpu))
3183                 return;
3184
3185         svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3186
3187         if (irr == -1)
3188                 return;
3189
3190         if (tpr >= irr)
3191                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3192 }
3193
3194 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3195 {
3196         struct vcpu_svm *svm = to_svm(vcpu);
3197         struct vmcb *vmcb = svm->vmcb;
3198         bool ret;
3199
3200         if (!gif_set(svm))
3201                 return true;
3202
3203         if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3204                 return false;
3205
3206         ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
3207               (svm->vcpu.arch.hflags & HF_NMI_MASK);
3208
3209         return ret;
3210 }
3211
3212 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3213 {
3214         struct vcpu_svm *svm = to_svm(vcpu);
3215         if (svm->nested.nested_run_pending)
3216                 return -EBUSY;
3217
3218         /* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
3219         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3220                 return -EBUSY;
3221
3222         return !svm_nmi_blocked(vcpu);
3223 }
3224
3225 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3226 {
3227         struct vcpu_svm *svm = to_svm(vcpu);
3228
3229         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3230 }
3231
3232 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3233 {
3234         struct vcpu_svm *svm = to_svm(vcpu);
3235
3236         if (masked) {
3237                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3238                 svm_set_intercept(svm, INTERCEPT_IRET);
3239         } else {
3240                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3241                 svm_clr_intercept(svm, INTERCEPT_IRET);
3242         }
3243 }
3244
3245 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3246 {
3247         struct vcpu_svm *svm = to_svm(vcpu);
3248         struct vmcb *vmcb = svm->vmcb;
3249
3250         if (!gif_set(svm))
3251                 return true;
3252
3253         if (is_guest_mode(vcpu)) {
3254                 /* As long as interrupts are being delivered...  */
3255                 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3256                     ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF)
3257                     : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3258                         return true;
3259
3260                 /* ... vmexits aren't blocked by the interrupt shadow  */
3261                 if (nested_exit_on_intr(svm))
3262                         return false;
3263         } else {
3264                 if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3265                         return true;
3266         }
3267
3268         return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3269 }
3270
3271 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3272 {
3273         struct vcpu_svm *svm = to_svm(vcpu);
3274         if (svm->nested.nested_run_pending)
3275                 return -EBUSY;
3276
3277         /*
3278          * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3279          * e.g. if the IRQ arrived asynchronously after checking nested events.
3280          */
3281         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3282                 return -EBUSY;
3283
3284         return !svm_interrupt_blocked(vcpu);
3285 }
3286
3287 static void enable_irq_window(struct kvm_vcpu *vcpu)
3288 {
3289         struct vcpu_svm *svm = to_svm(vcpu);
3290
3291         /*
3292          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3293          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3294          * get that intercept, this function will be called again though and
3295          * we'll get the vintr intercept. However, if the vGIF feature is
3296          * enabled, the STGI interception will not occur. Enable the irq
3297          * window under the assumption that the hardware will set the GIF.
3298          */
3299         if (vgif_enabled(svm) || gif_set(svm)) {
3300                 /*
3301                  * IRQ window is not needed when AVIC is enabled,
3302                  * unless we have pending ExtINT since it cannot be injected
3303                  * via AVIC. In such case, we need to temporarily disable AVIC,
3304                  * and fallback to injecting IRQ via V_IRQ.
3305                  */
3306                 svm_toggle_avic_for_irq_window(vcpu, false);
3307                 svm_set_vintr(svm);
3308         }
3309 }
3310
3311 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3312 {
3313         struct vcpu_svm *svm = to_svm(vcpu);
3314
3315         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3316             == HF_NMI_MASK)
3317                 return; /* IRET will cause a vm exit */
3318
3319         if (!gif_set(svm)) {
3320                 if (vgif_enabled(svm))
3321                         svm_set_intercept(svm, INTERCEPT_STGI);
3322                 return; /* STGI will cause a vm exit */
3323         }
3324
3325         /*
3326          * Something prevents NMI from been injected. Single step over possible
3327          * problem (IRET or exception injection or interrupt shadow)
3328          */
3329         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3330         svm->nmi_singlestep = true;
3331         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3332 }
3333
3334 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3335 {
3336         return 0;
3337 }
3338
3339 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
3340 {
3341         return 0;
3342 }
3343
3344 void svm_flush_tlb(struct kvm_vcpu *vcpu)
3345 {
3346         struct vcpu_svm *svm = to_svm(vcpu);
3347
3348         /*
3349          * Flush only the current ASID even if the TLB flush was invoked via
3350          * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
3351          * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3352          * unconditionally does a TLB flush on both nested VM-Enter and nested
3353          * VM-Exit (via kvm_mmu_reset_context()).
3354          */
3355         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3356                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3357         else
3358                 svm->asid_generation--;
3359 }
3360
3361 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3362 {
3363         struct vcpu_svm *svm = to_svm(vcpu);
3364
3365         invlpga(gva, svm->vmcb->control.asid);
3366 }
3367
3368 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3369 {
3370 }
3371
3372 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3373 {
3374         struct vcpu_svm *svm = to_svm(vcpu);
3375
3376         if (nested_svm_virtualize_tpr(vcpu))
3377                 return;
3378
3379         if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3380                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3381                 kvm_set_cr8(vcpu, cr8);
3382         }
3383 }
3384
3385 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3386 {
3387         struct vcpu_svm *svm = to_svm(vcpu);
3388         u64 cr8;
3389
3390         if (nested_svm_virtualize_tpr(vcpu) ||
3391             kvm_vcpu_apicv_active(vcpu))
3392                 return;
3393
3394         cr8 = kvm_get_cr8(vcpu);
3395         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3396         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3397 }
3398
3399 static void svm_complete_interrupts(struct vcpu_svm *svm)
3400 {
3401         u8 vector;
3402         int type;
3403         u32 exitintinfo = svm->vmcb->control.exit_int_info;
3404         unsigned int3_injected = svm->int3_injected;
3405
3406         svm->int3_injected = 0;
3407
3408         /*
3409          * If we've made progress since setting HF_IRET_MASK, we've
3410          * executed an IRET and can allow NMI injection.
3411          */
3412         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3413             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3414                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3415                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3416         }
3417
3418         svm->vcpu.arch.nmi_injected = false;
3419         kvm_clear_exception_queue(&svm->vcpu);
3420         kvm_clear_interrupt_queue(&svm->vcpu);
3421
3422         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3423                 return;
3424
3425         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3426
3427         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3428         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3429
3430         switch (type) {
3431         case SVM_EXITINTINFO_TYPE_NMI:
3432                 svm->vcpu.arch.nmi_injected = true;
3433                 break;
3434         case SVM_EXITINTINFO_TYPE_EXEPT:
3435                 /*
3436                  * In case of software exceptions, do not reinject the vector,
3437                  * but re-execute the instruction instead. Rewind RIP first
3438                  * if we emulated INT3 before.
3439                  */
3440                 if (kvm_exception_is_soft(vector)) {
3441                         if (vector == BP_VECTOR && int3_injected &&
3442                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3443                                 kvm_rip_write(&svm->vcpu,
3444                                               kvm_rip_read(&svm->vcpu) -
3445                                               int3_injected);
3446                         break;
3447                 }
3448                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3449                         u32 err = svm->vmcb->control.exit_int_info_err;
3450                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
3451
3452                 } else
3453                         kvm_requeue_exception(&svm->vcpu, vector);
3454                 break;
3455         case SVM_EXITINTINFO_TYPE_INTR:
3456                 kvm_queue_interrupt(&svm->vcpu, vector, false);
3457                 break;
3458         default:
3459                 break;
3460         }
3461 }
3462
3463 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3464 {
3465         struct vcpu_svm *svm = to_svm(vcpu);
3466         struct vmcb_control_area *control = &svm->vmcb->control;
3467
3468         control->exit_int_info = control->event_inj;
3469         control->exit_int_info_err = control->event_inj_err;
3470         control->event_inj = 0;
3471         svm_complete_interrupts(svm);
3472 }
3473
3474 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
3475 {
3476         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3477             to_svm(vcpu)->vmcb->control.exit_info_1)
3478                 return handle_fastpath_set_msr_irqoff(vcpu);
3479
3480         return EXIT_FASTPATH_NONE;
3481 }
3482
3483 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
3484
3485 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
3486                                         struct vcpu_svm *svm)
3487 {
3488         /*
3489          * VMENTER enables interrupts (host state), but the kernel state is
3490          * interrupts disabled when this is invoked. Also tell RCU about
3491          * it. This is the same logic as for exit_to_user_mode().
3492          *
3493          * This ensures that e.g. latency analysis on the host observes
3494          * guest mode as interrupt enabled.
3495          *
3496          * guest_enter_irqoff() informs context tracking about the
3497          * transition to guest mode and if enabled adjusts RCU state
3498          * accordingly.
3499          */
3500         instrumentation_begin();
3501         trace_hardirqs_on_prepare();
3502         lockdep_hardirqs_on_prepare(CALLER_ADDR0);
3503         instrumentation_end();
3504
3505         guest_enter_irqoff();
3506         lockdep_hardirqs_on(CALLER_ADDR0);
3507
3508         __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3509
3510 #ifdef CONFIG_X86_64
3511         native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3512 #else
3513         loadsegment(fs, svm->host.fs);
3514 #ifndef CONFIG_X86_32_LAZY_GS
3515         loadsegment(gs, svm->host.gs);
3516 #endif
3517 #endif
3518
3519         /*
3520          * VMEXIT disables interrupts (host state), but tracing and lockdep
3521          * have them in state 'on' as recorded before entering guest mode.
3522          * Same as enter_from_user_mode().
3523          *
3524          * guest_exit_irqoff() restores host context and reinstates RCU if
3525          * enabled and required.
3526          *
3527          * This needs to be done before the below as native_read_msr()
3528          * contains a tracepoint and x86_spec_ctrl_restore_host() calls
3529          * into world and some more.
3530          */
3531         lockdep_hardirqs_off(CALLER_ADDR0);
3532         guest_exit_irqoff();
3533
3534         instrumentation_begin();
3535         trace_hardirqs_off_finish();
3536         instrumentation_end();
3537 }
3538
3539 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3540 {
3541         struct vcpu_svm *svm = to_svm(vcpu);
3542
3543         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3544         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3545         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3546
3547         /*
3548          * Disable singlestep if we're injecting an interrupt/exception.
3549          * We don't want our modified rflags to be pushed on the stack where
3550          * we might not be able to easily reset them if we disabled NMI
3551          * singlestep later.
3552          */
3553         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3554                 /*
3555                  * Event injection happens before external interrupts cause a
3556                  * vmexit and interrupts are disabled here, so smp_send_reschedule
3557                  * is enough to force an immediate vmexit.
3558                  */
3559                 disable_nmi_singlestep(svm);
3560                 smp_send_reschedule(vcpu->cpu);
3561         }
3562
3563         pre_svm_run(svm);
3564
3565         sync_lapic_to_cr8(vcpu);
3566
3567         if (unlikely(svm->asid != svm->vmcb->control.asid)) {
3568                 svm->vmcb->control.asid = svm->asid;
3569                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
3570         }
3571         svm->vmcb->save.cr2 = vcpu->arch.cr2;
3572
3573         /*
3574          * Run with all-zero DR6 unless needed, so that we can get the exact cause
3575          * of a #DB.
3576          */
3577         if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3578                 svm_set_dr6(svm, vcpu->arch.dr6);
3579         else
3580                 svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
3581
3582         clgi();
3583         kvm_load_guest_xsave_state(vcpu);
3584
3585         kvm_wait_lapic_expire(vcpu);
3586
3587         /*
3588          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3589          * it's non-zero. Since vmentry is serialising on affected CPUs, there
3590          * is no need to worry about the conditional branch over the wrmsr
3591          * being speculatively taken.
3592          */
3593         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3594
3595         svm_vcpu_enter_exit(vcpu, svm);
3596
3597         /*
3598          * We do not use IBRS in the kernel. If this vCPU has used the
3599          * SPEC_CTRL MSR it may have left it on; save the value and
3600          * turn it off. This is much more efficient than blindly adding
3601          * it to the atomic save/restore list. Especially as the former
3602          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3603          *
3604          * For non-nested case:
3605          * If the L01 MSR bitmap does not intercept the MSR, then we need to
3606          * save it.
3607          *
3608          * For nested case:
3609          * If the L02 MSR bitmap does not intercept the MSR, then we need to
3610          * save it.
3611          */
3612         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3613                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3614
3615         reload_tss(vcpu);
3616
3617         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3618
3619         vcpu->arch.cr2 = svm->vmcb->save.cr2;
3620         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3621         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3622         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3623
3624         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3625                 kvm_before_interrupt(&svm->vcpu);
3626
3627         kvm_load_host_xsave_state(vcpu);
3628         stgi();
3629
3630         /* Any pending NMI will happen here */
3631
3632         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3633                 kvm_after_interrupt(&svm->vcpu);
3634
3635         sync_cr8_to_lapic(vcpu);
3636
3637         svm->next_rip = 0;
3638         if (is_guest_mode(&svm->vcpu)) {
3639                 sync_nested_vmcb_control(svm);
3640                 svm->nested.nested_run_pending = 0;
3641         }
3642
3643         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3644         vmcb_mark_all_clean(svm->vmcb);
3645
3646         /* if exit due to PF check for async PF */
3647         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3648                 svm->vcpu.arch.apf.host_apf_flags =
3649                         kvm_read_and_reset_apf_flags();
3650
3651         if (npt_enabled) {
3652                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3653                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3654         }
3655
3656         /*
3657          * We need to handle MC intercepts here before the vcpu has a chance to
3658          * change the physical cpu
3659          */
3660         if (unlikely(svm->vmcb->control.exit_code ==
3661                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
3662                 svm_handle_mce(svm);
3663
3664         svm_complete_interrupts(svm);
3665
3666         if (is_guest_mode(vcpu))
3667                 return EXIT_FASTPATH_NONE;
3668
3669         return svm_exit_handlers_fastpath(vcpu);
3670 }
3671
3672 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root,
3673                              int root_level)
3674 {
3675         struct vcpu_svm *svm = to_svm(vcpu);
3676         unsigned long cr3;
3677
3678         cr3 = __sme_set(root);
3679         if (npt_enabled) {
3680                 svm->vmcb->control.nested_cr3 = cr3;
3681                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
3682
3683                 /* Loading L2's CR3 is handled by enter_svm_guest_mode.  */
3684                 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3685                         return;
3686                 cr3 = vcpu->arch.cr3;
3687         }
3688
3689         svm->vmcb->save.cr3 = cr3;
3690         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
3691 }
3692
3693 static int is_disabled(void)
3694 {
3695         u64 vm_cr;
3696
3697         rdmsrl(MSR_VM_CR, vm_cr);
3698         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3699                 return 1;
3700
3701         return 0;
3702 }
3703
3704 static void
3705 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3706 {
3707         /*
3708          * Patch in the VMMCALL instruction:
3709          */
3710         hypercall[0] = 0x0f;
3711         hypercall[1] = 0x01;
3712         hypercall[2] = 0xd9;
3713 }
3714
3715 static int __init svm_check_processor_compat(void)
3716 {
3717         return 0;
3718 }
3719
3720 static bool svm_cpu_has_accelerated_tpr(void)
3721 {
3722         return false;
3723 }
3724
3725 static bool svm_has_emulated_msr(u32 index)
3726 {
3727         switch (index) {
3728         case MSR_IA32_MCG_EXT_CTL:
3729         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3730                 return false;
3731         default:
3732                 break;
3733         }
3734
3735         return true;
3736 }
3737
3738 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3739 {
3740         return 0;
3741 }
3742
3743 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
3744 {
3745         struct vcpu_svm *svm = to_svm(vcpu);
3746         struct kvm_cpuid_entry2 *best;
3747
3748         vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
3749                                     boot_cpu_has(X86_FEATURE_XSAVE) &&
3750                                     boot_cpu_has(X86_FEATURE_XSAVES);
3751
3752         /* Update nrips enabled cache */
3753         svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
3754                              guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
3755
3756         /* Check again if INVPCID interception if required */
3757         svm_check_invpcid(svm);
3758
3759         /* For sev guests, the memory encryption bit is not reserved in CR3.  */
3760         if (sev_guest(vcpu->kvm)) {
3761                 best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
3762                 if (best)
3763                         vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
3764         }
3765
3766         if (!kvm_vcpu_apicv_active(vcpu))
3767                 return;
3768
3769         /*
3770          * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
3771          * is exposed to the guest, disable AVIC.
3772          */
3773         if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
3774                 kvm_request_apicv_update(vcpu->kvm, false,
3775                                          APICV_INHIBIT_REASON_X2APIC);
3776
3777         /*
3778          * Currently, AVIC does not work with nested virtualization.
3779          * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
3780          */
3781         if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
3782                 kvm_request_apicv_update(vcpu->kvm, false,
3783                                          APICV_INHIBIT_REASON_NESTED);
3784 }
3785
3786 static bool svm_has_wbinvd_exit(void)
3787 {
3788         return true;
3789 }
3790
3791 #define PRE_EX(exit)  { .exit_code = (exit), \
3792                         .stage = X86_ICPT_PRE_EXCEPT, }
3793 #define POST_EX(exit) { .exit_code = (exit), \
3794                         .stage = X86_ICPT_POST_EXCEPT, }
3795 #define POST_MEM(exit) { .exit_code = (exit), \
3796                         .stage = X86_ICPT_POST_MEMACCESS, }
3797
3798 static const struct __x86_intercept {
3799         u32 exit_code;
3800         enum x86_intercept_stage stage;
3801 } x86_intercept_map[] = {
3802         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
3803         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
3804         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
3805         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
3806         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
3807         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
3808         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
3809         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
3810         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
3811         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
3812         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
3813         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
3814         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
3815         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
3816         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
3817         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
3818         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
3819         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
3820         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
3821         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
3822         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
3823         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
3824         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
3825         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
3826         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
3827         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
3828         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
3829         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
3830         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
3831         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
3832         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
3833         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
3834         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
3835         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
3836         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
3837         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
3838         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
3839         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
3840         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
3841         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
3842         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
3843         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
3844         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
3845         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
3846         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
3847         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
3848         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
3849 };
3850
3851 #undef PRE_EX
3852 #undef POST_EX
3853 #undef POST_MEM
3854
3855 static int svm_check_intercept(struct kvm_vcpu *vcpu,
3856                                struct x86_instruction_info *info,
3857                                enum x86_intercept_stage stage,
3858                                struct x86_exception *exception)
3859 {
3860         struct vcpu_svm *svm = to_svm(vcpu);
3861         int vmexit, ret = X86EMUL_CONTINUE;
3862         struct __x86_intercept icpt_info;
3863         struct vmcb *vmcb = svm->vmcb;
3864
3865         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3866                 goto out;
3867
3868         icpt_info = x86_intercept_map[info->intercept];
3869
3870         if (stage != icpt_info.stage)
3871                 goto out;
3872
3873         switch (icpt_info.exit_code) {
3874         case SVM_EXIT_READ_CR0:
3875                 if (info->intercept == x86_intercept_cr_read)
3876                         icpt_info.exit_code += info->modrm_reg;
3877                 break;
3878         case SVM_EXIT_WRITE_CR0: {
3879                 unsigned long cr0, val;
3880
3881                 if (info->intercept == x86_intercept_cr_write)
3882                         icpt_info.exit_code += info->modrm_reg;
3883
3884                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
3885                     info->intercept == x86_intercept_clts)
3886                         break;
3887
3888                 if (!(vmcb_is_intercept(&svm->nested.ctl,
3889                                         INTERCEPT_SELECTIVE_CR0)))
3890                         break;
3891
3892                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3893                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
3894
3895                 if (info->intercept == x86_intercept_lmsw) {
3896                         cr0 &= 0xfUL;
3897                         val &= 0xfUL;
3898                         /* lmsw can't clear PE - catch this here */
3899                         if (cr0 & X86_CR0_PE)
3900                                 val |= X86_CR0_PE;
3901                 }
3902
3903                 if (cr0 ^ val)
3904                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3905
3906                 break;
3907         }
3908         case SVM_EXIT_READ_DR0:
3909         case SVM_EXIT_WRITE_DR0:
3910                 icpt_info.exit_code += info->modrm_reg;
3911                 break;
3912         case SVM_EXIT_MSR:
3913                 if (info->intercept == x86_intercept_wrmsr)
3914                         vmcb->control.exit_info_1 = 1;
3915                 else
3916                         vmcb->control.exit_info_1 = 0;
3917                 break;
3918         case SVM_EXIT_PAUSE:
3919                 /*
3920                  * We get this for NOP only, but pause
3921                  * is rep not, check this here
3922                  */
3923                 if (info->rep_prefix != REPE_PREFIX)
3924                         goto out;
3925                 break;
3926         case SVM_EXIT_IOIO: {
3927                 u64 exit_info;
3928                 u32 bytes;
3929
3930                 if (info->intercept == x86_intercept_in ||
3931                     info->intercept == x86_intercept_ins) {
3932                         exit_info = ((info->src_val & 0xffff) << 16) |
3933                                 SVM_IOIO_TYPE_MASK;
3934                         bytes = info->dst_bytes;
3935                 } else {
3936                         exit_info = (info->dst_val & 0xffff) << 16;
3937                         bytes = info->src_bytes;
3938                 }
3939
3940                 if (info->intercept == x86_intercept_outs ||
3941                     info->intercept == x86_intercept_ins)
3942                         exit_info |= SVM_IOIO_STR_MASK;
3943
3944                 if (info->rep_prefix)
3945                         exit_info |= SVM_IOIO_REP_MASK;
3946
3947                 bytes = min(bytes, 4u);
3948
3949                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
3950
3951                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
3952
3953                 vmcb->control.exit_info_1 = exit_info;
3954                 vmcb->control.exit_info_2 = info->next_rip;
3955
3956                 break;
3957         }
3958         default:
3959                 break;
3960         }
3961
3962         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
3963         if (static_cpu_has(X86_FEATURE_NRIPS))
3964                 vmcb->control.next_rip  = info->next_rip;
3965         vmcb->control.exit_code = icpt_info.exit_code;
3966         vmexit = nested_svm_exit_handled(svm);
3967
3968         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3969                                            : X86EMUL_CONTINUE;
3970
3971 out:
3972         return ret;
3973 }
3974
3975 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
3976 {
3977 }
3978
3979 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
3980 {
3981         if (!kvm_pause_in_guest(vcpu->kvm))
3982                 shrink_ple_window(vcpu);
3983 }
3984
3985 static void svm_setup_mce(struct kvm_vcpu *vcpu)
3986 {
3987         /* [63:9] are reserved. */
3988         vcpu->arch.mcg_cap &= 0x1ff;
3989 }
3990
3991 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
3992 {
3993         struct vcpu_svm *svm = to_svm(vcpu);
3994
3995         /* Per APM Vol.2 15.22.2 "Response to SMI" */
3996         if (!gif_set(svm))
3997                 return true;
3998
3999         return is_smm(vcpu);
4000 }
4001
4002 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4003 {
4004         struct vcpu_svm *svm = to_svm(vcpu);
4005         if (svm->nested.nested_run_pending)
4006                 return -EBUSY;
4007
4008         /* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
4009         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4010                 return -EBUSY;
4011
4012         return !svm_smi_blocked(vcpu);
4013 }
4014
4015 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
4016 {
4017         struct vcpu_svm *svm = to_svm(vcpu);
4018         int ret;
4019
4020         if (is_guest_mode(vcpu)) {
4021                 /* FED8h - SVM Guest */
4022                 put_smstate(u64, smstate, 0x7ed8, 1);
4023                 /* FEE0h - SVM Guest VMCB Physical Address */
4024                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
4025
4026                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4027                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4028                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4029
4030                 ret = nested_svm_vmexit(svm);
4031                 if (ret)
4032                         return ret;
4033         }
4034         return 0;
4035 }
4036
4037 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
4038 {
4039         struct vcpu_svm *svm = to_svm(vcpu);
4040         struct kvm_host_map map;
4041         int ret = 0;
4042
4043         if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
4044                 u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
4045                 u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
4046                 u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
4047
4048                 if (guest) {
4049                         if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4050                                 return 1;
4051
4052                         if (!(saved_efer & EFER_SVME))
4053                                 return 1;
4054
4055                         if (kvm_vcpu_map(&svm->vcpu,
4056                                          gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
4057                                 return 1;
4058
4059                         if (svm_allocate_nested(svm))
4060                                 return 1;
4061
4062                         ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva);
4063                         kvm_vcpu_unmap(&svm->vcpu, &map, true);
4064                 }
4065         }
4066
4067         return ret;
4068 }
4069
4070 static void enable_smi_window(struct kvm_vcpu *vcpu)
4071 {
4072         struct vcpu_svm *svm = to_svm(vcpu);
4073
4074         if (!gif_set(svm)) {
4075                 if (vgif_enabled(svm))
4076                         svm_set_intercept(svm, INTERCEPT_STGI);
4077                 /* STGI will cause a vm exit */
4078         } else {
4079                 /* We must be in SMM; RSM will cause a vmexit anyway.  */
4080         }
4081 }
4082
4083 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
4084 {
4085         bool smep, smap, is_user;
4086         unsigned long cr4;
4087
4088         /*
4089          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4090          *
4091          * Errata:
4092          * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
4093          * possible that CPU microcode implementing DecodeAssist will fail
4094          * to read bytes of instruction which caused #NPF. In this case,
4095          * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
4096          * return 0 instead of the correct guest instruction bytes.
4097          *
4098          * This happens because CPU microcode reading instruction bytes
4099          * uses a special opcode which attempts to read data using CPL=0
4100          * priviledges. The microcode reads CS:RIP and if it hits a SMAP
4101          * fault, it gives up and returns no instruction bytes.
4102          *
4103          * Detection:
4104          * We reach here in case CPU supports DecodeAssist, raised #NPF and
4105          * returned 0 in GuestIntrBytes field of the VMCB.
4106          * First, errata can only be triggered in case vCPU CR4.SMAP=1.
4107          * Second, if vCPU CR4.SMEP=1, errata could only be triggered
4108          * in case vCPU CPL==3 (Because otherwise guest would have triggered
4109          * a SMEP fault instead of #NPF).
4110          * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
4111          * As most guests enable SMAP if they have also enabled SMEP, use above
4112          * logic in order to attempt minimize false-positive of detecting errata
4113          * while still preserving all cases semantic correctness.
4114          *
4115          * Workaround:
4116          * To determine what instruction the guest was executing, the hypervisor
4117          * will have to decode the instruction at the instruction pointer.
4118          *
4119          * In non SEV guest, hypervisor will be able to read the guest
4120          * memory to decode the instruction pointer when insn_len is zero
4121          * so we return true to indicate that decoding is possible.
4122          *
4123          * But in the SEV guest, the guest memory is encrypted with the
4124          * guest specific key and hypervisor will not be able to decode the
4125          * instruction pointer so we will not able to workaround it. Lets
4126          * print the error and request to kill the guest.
4127          */
4128         if (likely(!insn || insn_len))
4129                 return true;
4130
4131         /*
4132          * If RIP is invalid, go ahead with emulation which will cause an
4133          * internal error exit.
4134          */
4135         if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
4136                 return true;
4137
4138         cr4 = kvm_read_cr4(vcpu);
4139         smep = cr4 & X86_CR4_SMEP;
4140         smap = cr4 & X86_CR4_SMAP;
4141         is_user = svm_get_cpl(vcpu) == 3;
4142         if (smap && (!smep || is_user)) {
4143                 if (!sev_guest(vcpu->kvm))
4144                         return true;
4145
4146                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
4147                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4148         }
4149
4150         return false;
4151 }
4152
4153 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4154 {
4155         struct vcpu_svm *svm = to_svm(vcpu);
4156
4157         /*
4158          * TODO: Last condition latch INIT signals on vCPU when
4159          * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
4160          * To properly emulate the INIT intercept,
4161          * svm_check_nested_events() should call nested_svm_vmexit()
4162          * if an INIT signal is pending.
4163          */
4164         return !gif_set(svm) ||
4165                    (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
4166 }
4167
4168 static void svm_vm_destroy(struct kvm *kvm)
4169 {
4170         avic_vm_destroy(kvm);
4171         sev_vm_destroy(kvm);
4172 }
4173
4174 static int svm_vm_init(struct kvm *kvm)
4175 {
4176         if (!pause_filter_count || !pause_filter_thresh)
4177                 kvm->arch.pause_in_guest = true;
4178
4179         if (avic) {
4180                 int ret = avic_vm_init(kvm);
4181                 if (ret)
4182                         return ret;
4183         }
4184
4185         kvm_apicv_init(kvm, avic);
4186         return 0;
4187 }
4188
4189 static struct kvm_x86_ops svm_x86_ops __initdata = {
4190         .hardware_unsetup = svm_hardware_teardown,
4191         .hardware_enable = svm_hardware_enable,
4192         .hardware_disable = svm_hardware_disable,
4193         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4194         .has_emulated_msr = svm_has_emulated_msr,
4195
4196         .vcpu_create = svm_create_vcpu,
4197         .vcpu_free = svm_free_vcpu,
4198         .vcpu_reset = svm_vcpu_reset,
4199
4200         .vm_size = sizeof(struct kvm_svm),
4201         .vm_init = svm_vm_init,
4202         .vm_destroy = svm_vm_destroy,
4203
4204         .prepare_guest_switch = svm_prepare_guest_switch,
4205         .vcpu_load = svm_vcpu_load,
4206         .vcpu_put = svm_vcpu_put,
4207         .vcpu_blocking = svm_vcpu_blocking,
4208         .vcpu_unblocking = svm_vcpu_unblocking,
4209
4210         .update_exception_bitmap = update_exception_bitmap,
4211         .get_msr_feature = svm_get_msr_feature,
4212         .get_msr = svm_get_msr,
4213         .set_msr = svm_set_msr,
4214         .get_segment_base = svm_get_segment_base,
4215         .get_segment = svm_get_segment,
4216         .set_segment = svm_set_segment,
4217         .get_cpl = svm_get_cpl,
4218         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4219         .set_cr0 = svm_set_cr0,
4220         .is_valid_cr4 = svm_is_valid_cr4,
4221         .set_cr4 = svm_set_cr4,
4222         .set_efer = svm_set_efer,
4223         .get_idt = svm_get_idt,
4224         .set_idt = svm_set_idt,
4225         .get_gdt = svm_get_gdt,
4226         .set_gdt = svm_set_gdt,
4227         .set_dr7 = svm_set_dr7,
4228         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4229         .cache_reg = svm_cache_reg,
4230         .get_rflags = svm_get_rflags,
4231         .set_rflags = svm_set_rflags,
4232
4233         .tlb_flush_all = svm_flush_tlb,
4234         .tlb_flush_current = svm_flush_tlb,
4235         .tlb_flush_gva = svm_flush_tlb_gva,
4236         .tlb_flush_guest = svm_flush_tlb,
4237
4238         .run = svm_vcpu_run,
4239         .handle_exit = handle_exit,
4240         .skip_emulated_instruction = skip_emulated_instruction,
4241         .update_emulated_instruction = NULL,
4242         .set_interrupt_shadow = svm_set_interrupt_shadow,
4243         .get_interrupt_shadow = svm_get_interrupt_shadow,
4244         .patch_hypercall = svm_patch_hypercall,
4245         .set_irq = svm_set_irq,
4246         .set_nmi = svm_inject_nmi,
4247         .queue_exception = svm_queue_exception,
4248         .cancel_injection = svm_cancel_injection,
4249         .interrupt_allowed = svm_interrupt_allowed,
4250         .nmi_allowed = svm_nmi_allowed,
4251         .get_nmi_mask = svm_get_nmi_mask,
4252         .set_nmi_mask = svm_set_nmi_mask,
4253         .enable_nmi_window = enable_nmi_window,
4254         .enable_irq_window = enable_irq_window,
4255         .update_cr8_intercept = update_cr8_intercept,
4256         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
4257         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
4258         .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
4259         .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
4260         .load_eoi_exitmap = svm_load_eoi_exitmap,
4261         .hwapic_irr_update = svm_hwapic_irr_update,
4262         .hwapic_isr_update = svm_hwapic_isr_update,
4263         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
4264         .apicv_post_state_restore = avic_post_state_restore,
4265
4266         .set_tss_addr = svm_set_tss_addr,
4267         .set_identity_map_addr = svm_set_identity_map_addr,
4268         .get_mt_mask = svm_get_mt_mask,
4269
4270         .get_exit_info = svm_get_exit_info,
4271
4272         .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
4273
4274         .has_wbinvd_exit = svm_has_wbinvd_exit,
4275
4276         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
4277
4278         .load_mmu_pgd = svm_load_mmu_pgd,
4279
4280         .check_intercept = svm_check_intercept,
4281         .handle_exit_irqoff = svm_handle_exit_irqoff,
4282
4283         .request_immediate_exit = __kvm_request_immediate_exit,
4284
4285         .sched_in = svm_sched_in,
4286
4287         .pmu_ops = &amd_pmu_ops,
4288         .nested_ops = &svm_nested_ops,
4289
4290         .deliver_posted_interrupt = svm_deliver_avic_intr,
4291         .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
4292         .update_pi_irte = svm_update_pi_irte,
4293         .setup_mce = svm_setup_mce,
4294
4295         .smi_allowed = svm_smi_allowed,
4296         .pre_enter_smm = svm_pre_enter_smm,
4297         .pre_leave_smm = svm_pre_leave_smm,
4298         .enable_smi_window = enable_smi_window,
4299
4300         .mem_enc_op = svm_mem_enc_op,
4301         .mem_enc_reg_region = svm_register_enc_region,
4302         .mem_enc_unreg_region = svm_unregister_enc_region,
4303
4304         .can_emulate_instruction = svm_can_emulate_instruction,
4305
4306         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
4307
4308         .msr_filter_changed = svm_msr_filter_changed,
4309 };
4310
4311 static struct kvm_x86_init_ops svm_init_ops __initdata = {
4312         .cpu_has_kvm_support = has_svm,
4313         .disabled_by_bios = is_disabled,
4314         .hardware_setup = svm_hardware_setup,
4315         .check_processor_compatibility = svm_check_processor_compat,
4316
4317         .runtime_ops = &svm_x86_ops,
4318 };
4319
4320 static int __init svm_init(void)
4321 {
4322         __unused_size_checks();
4323
4324         return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
4325                         __alignof__(struct vcpu_svm), THIS_MODULE);
4326 }
4327
4328 static void __exit svm_exit(void)
4329 {
4330         kvm_exit();
4331 }
4332
4333 module_init(svm_init)
4334 module_exit(svm_exit)