1c381c6a7b513b01cc45c00968104fd3cdbd3018
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / nested.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31 #include "hyperv.h"
32
33 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
34
35 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
36                                        struct x86_exception *fault)
37 {
38         struct vcpu_svm *svm = to_svm(vcpu);
39         struct vmcb *vmcb = svm->vmcb;
40
41         if (vmcb->control.exit_code != SVM_EXIT_NPF) {
42                 /*
43                  * TODO: track the cause of the nested page fault, and
44                  * correctly fill in the high bits of exit_info_1.
45                  */
46                 vmcb->control.exit_code = SVM_EXIT_NPF;
47                 vmcb->control.exit_code_hi = 0;
48                 vmcb->control.exit_info_1 = (1ULL << 32);
49                 vmcb->control.exit_info_2 = fault->address;
50         }
51
52         vmcb->control.exit_info_1 &= ~0xffffffffULL;
53         vmcb->control.exit_info_1 |= fault->error_code;
54
55         nested_svm_vmexit(svm);
56 }
57
58 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
59 {
60         struct vcpu_svm *svm = to_svm(vcpu);
61         struct vmcb *vmcb = svm->vmcb;
62
63         WARN_ON(!is_guest_mode(vcpu));
64
65         if (vmcb12_is_intercept(&svm->nested.ctl,
66                                 INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
67                                 !svm->nested.nested_run_pending) {
68                 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
69                 vmcb->control.exit_code_hi = 0;
70                 vmcb->control.exit_info_1 = fault->error_code;
71                 vmcb->control.exit_info_2 = fault->address;
72                 nested_svm_vmexit(svm);
73         } else {
74                 kvm_inject_page_fault(vcpu, fault);
75         }
76 }
77
78 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
79 {
80         struct vcpu_svm *svm = to_svm(vcpu);
81         u64 cr3 = svm->nested.ctl.nested_cr3;
82         u64 pdpte;
83         int ret;
84
85         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
86                                        offset_in_page(cr3) + index * 8, 8);
87         if (ret)
88                 return 0;
89         return pdpte;
90 }
91
92 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
93 {
94         struct vcpu_svm *svm = to_svm(vcpu);
95
96         return svm->nested.ctl.nested_cr3;
97 }
98
99 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
100 {
101         struct vcpu_svm *svm = to_svm(vcpu);
102
103         WARN_ON(mmu_is_nested(vcpu));
104
105         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
106
107         /*
108          * The NPT format depends on L1's CR4 and EFER, which is in vmcb01.  Note,
109          * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
110          * vCPU state.  CR0.WP is explicitly ignored, while CR0.PG is required.
111          */
112         kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
113                                 svm->vmcb01.ptr->save.efer,
114                                 svm->nested.ctl.nested_cr3);
115         vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
116         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
117         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
118         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
119 }
120
121 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
122 {
123         vcpu->arch.mmu = &vcpu->arch.root_mmu;
124         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
125 }
126
127 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
128 {
129         if (!svm->v_vmload_vmsave_enabled)
130                 return true;
131
132         if (!nested_npt_enabled(svm))
133                 return true;
134
135         if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
136                 return true;
137
138         return false;
139 }
140
141 void recalc_intercepts(struct vcpu_svm *svm)
142 {
143         struct vmcb_control_area *c, *h;
144         struct vmcb_ctrl_area_cached *g;
145         unsigned int i;
146
147         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
148
149         if (!is_guest_mode(&svm->vcpu))
150                 return;
151
152         c = &svm->vmcb->control;
153         h = &svm->vmcb01.ptr->control;
154         g = &svm->nested.ctl;
155
156         for (i = 0; i < MAX_INTERCEPT; i++)
157                 c->intercepts[i] = h->intercepts[i];
158
159         if (g->int_ctl & V_INTR_MASKING_MASK) {
160                 /* We only want the cr8 intercept bits of L1 */
161                 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
162                 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
163
164                 /*
165                  * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
166                  * affect any interrupt we may want to inject; therefore,
167                  * interrupt window vmexits are irrelevant to L0.
168                  */
169                 vmcb_clr_intercept(c, INTERCEPT_VINTR);
170         }
171
172         /* We don't want to see VMMCALLs from a nested guest */
173         vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
174
175         for (i = 0; i < MAX_INTERCEPT; i++)
176                 c->intercepts[i] |= g->intercepts[i];
177
178         /* If SMI is not intercepted, ignore guest SMI intercept as well  */
179         if (!intercept_smi)
180                 vmcb_clr_intercept(c, INTERCEPT_SMI);
181
182         if (nested_vmcb_needs_vls_intercept(svm)) {
183                 /*
184                  * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
185                  * we must intercept these instructions to correctly
186                  * emulate them in case L1 doesn't intercept them.
187                  */
188                 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
189                 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
190         } else {
191                 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
192         }
193 }
194
195 /*
196  * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
197  * is optimized in that it only merges the parts where KVM MSR permission bitmap
198  * may contain zero bits.
199  */
200 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
201 {
202         struct hv_enlightenments *hve =
203                 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
204         int i;
205
206         /*
207          * MSR bitmap update can be skipped when:
208          * - MSR bitmap for L1 hasn't changed.
209          * - Nested hypervisor (L1) is attempting to launch the same L2 as
210          *   before.
211          * - Nested hypervisor (L1) is using Hyper-V emulation interface and
212          * tells KVM (L0) there were no changes in MSR bitmap for L2.
213          */
214         if (!svm->nested.force_msr_bitmap_recalc &&
215             kvm_hv_hypercall_enabled(&svm->vcpu) &&
216             hve->hv_enlightenments_control.msr_bitmap &&
217             (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
218                 goto set_msrpm_base_pa;
219
220         if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
221                 return true;
222
223         for (i = 0; i < MSRPM_OFFSETS; i++) {
224                 u32 value, p;
225                 u64 offset;
226
227                 if (msrpm_offsets[i] == 0xffffffff)
228                         break;
229
230                 p      = msrpm_offsets[i];
231                 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
232
233                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
234                         return false;
235
236                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
237         }
238
239         svm->nested.force_msr_bitmap_recalc = false;
240
241 set_msrpm_base_pa:
242         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
243
244         return true;
245 }
246
247 /*
248  * Bits 11:0 of bitmap address are ignored by hardware
249  */
250 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
251 {
252         u64 addr = PAGE_ALIGN(pa);
253
254         return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
255             kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
256 }
257
258 static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
259 {
260         /* Nested FLUSHBYASID is not supported yet.  */
261         switch(tlb_ctl) {
262                 case TLB_CONTROL_DO_NOTHING:
263                 case TLB_CONTROL_FLUSH_ALL_ASID:
264                         return true;
265                 default:
266                         return false;
267         }
268 }
269
270 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
271                                          struct vmcb_ctrl_area_cached *control)
272 {
273         if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
274                 return false;
275
276         if (CC(control->asid == 0))
277                 return false;
278
279         if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
280                 return false;
281
282         if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
283                                            MSRPM_SIZE)))
284                 return false;
285         if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
286                                            IOPM_SIZE)))
287                 return false;
288
289         if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
290                 return false;
291
292         return true;
293 }
294
295 /* Common checks that apply to both L1 and L2 state.  */
296 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
297                                      struct vmcb_save_area_cached *save)
298 {
299         if (CC(!(save->efer & EFER_SVME)))
300                 return false;
301
302         if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
303             CC(save->cr0 & ~0xffffffffULL))
304                 return false;
305
306         if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
307                 return false;
308
309         /*
310          * These checks are also performed by KVM_SET_SREGS,
311          * except that EFER.LMA is not checked by SVM against
312          * CR0.PG && EFER.LME.
313          */
314         if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
315                 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
316                     CC(!(save->cr0 & X86_CR0_PE)) ||
317                     CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
318                         return false;
319         }
320
321         if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
322                 return false;
323
324         if (CC(!kvm_valid_efer(vcpu, save->efer)))
325                 return false;
326
327         return true;
328 }
329
330 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
331 {
332         struct vcpu_svm *svm = to_svm(vcpu);
333         struct vmcb_save_area_cached *save = &svm->nested.save;
334
335         return __nested_vmcb_check_save(vcpu, save);
336 }
337
338 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
339 {
340         struct vcpu_svm *svm = to_svm(vcpu);
341         struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
342
343         return __nested_vmcb_check_controls(vcpu, ctl);
344 }
345
346 static
347 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
348                                          struct vmcb_ctrl_area_cached *to,
349                                          struct vmcb_control_area *from)
350 {
351         unsigned int i;
352
353         for (i = 0; i < MAX_INTERCEPT; i++)
354                 to->intercepts[i] = from->intercepts[i];
355
356         to->iopm_base_pa        = from->iopm_base_pa;
357         to->msrpm_base_pa       = from->msrpm_base_pa;
358         to->tsc_offset          = from->tsc_offset;
359         to->tlb_ctl             = from->tlb_ctl;
360         to->int_ctl             = from->int_ctl;
361         to->int_vector          = from->int_vector;
362         to->int_state           = from->int_state;
363         to->exit_code           = from->exit_code;
364         to->exit_code_hi        = from->exit_code_hi;
365         to->exit_info_1         = from->exit_info_1;
366         to->exit_info_2         = from->exit_info_2;
367         to->exit_int_info       = from->exit_int_info;
368         to->exit_int_info_err   = from->exit_int_info_err;
369         to->nested_ctl          = from->nested_ctl;
370         to->event_inj           = from->event_inj;
371         to->event_inj_err       = from->event_inj_err;
372         to->nested_cr3          = from->nested_cr3;
373         to->virt_ext            = from->virt_ext;
374         to->pause_filter_count  = from->pause_filter_count;
375         to->pause_filter_thresh = from->pause_filter_thresh;
376
377         /* Copy asid here because nested_vmcb_check_controls will check it.  */
378         to->asid           = from->asid;
379         to->msrpm_base_pa &= ~0x0fffULL;
380         to->iopm_base_pa  &= ~0x0fffULL;
381
382         /* Hyper-V extensions (Enlightened VMCB) */
383         if (kvm_hv_hypercall_enabled(vcpu)) {
384                 to->clean = from->clean;
385                 memcpy(to->reserved_sw, from->reserved_sw,
386                        sizeof(struct hv_enlightenments));
387         }
388 }
389
390 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
391                                        struct vmcb_control_area *control)
392 {
393         __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
394 }
395
396 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
397                                              struct vmcb_save_area *from)
398 {
399         /*
400          * Copy only fields that are validated, as we need them
401          * to avoid TOC/TOU races.
402          */
403         to->efer = from->efer;
404         to->cr0 = from->cr0;
405         to->cr3 = from->cr3;
406         to->cr4 = from->cr4;
407
408         to->dr6 = from->dr6;
409         to->dr7 = from->dr7;
410 }
411
412 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
413                                     struct vmcb_save_area *save)
414 {
415         __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
416 }
417
418 /*
419  * Synchronize fields that are written by the processor, so that
420  * they can be copied back into the vmcb12.
421  */
422 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
423 {
424         u32 mask;
425         svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
426         svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
427
428         /* Only a few fields of int_ctl are written by the processor.  */
429         mask = V_IRQ_MASK | V_TPR_MASK;
430         if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
431             svm_is_intercept(svm, INTERCEPT_VINTR)) {
432                 /*
433                  * In order to request an interrupt window, L0 is usurping
434                  * svm->vmcb->control.int_ctl and possibly setting V_IRQ
435                  * even if it was clear in L1's VMCB.  Restoring it would be
436                  * wrong.  However, in this case V_IRQ will remain true until
437                  * interrupt_window_interception calls svm_clear_vintr and
438                  * restores int_ctl.  We can just leave it aside.
439                  */
440                 mask &= ~V_IRQ_MASK;
441         }
442         svm->nested.ctl.int_ctl        &= ~mask;
443         svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
444 }
445
446 /*
447  * Transfer any event that L0 or L1 wanted to inject into L2 to
448  * EXIT_INT_INFO.
449  */
450 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
451                                                 struct vmcb *vmcb12)
452 {
453         struct kvm_vcpu *vcpu = &svm->vcpu;
454         u32 exit_int_info = 0;
455         unsigned int nr;
456
457         if (vcpu->arch.exception.injected) {
458                 nr = vcpu->arch.exception.nr;
459                 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
460
461                 if (vcpu->arch.exception.has_error_code) {
462                         exit_int_info |= SVM_EVTINJ_VALID_ERR;
463                         vmcb12->control.exit_int_info_err =
464                                 vcpu->arch.exception.error_code;
465                 }
466
467         } else if (vcpu->arch.nmi_injected) {
468                 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
469
470         } else if (vcpu->arch.interrupt.injected) {
471                 nr = vcpu->arch.interrupt.nr;
472                 exit_int_info = nr | SVM_EVTINJ_VALID;
473
474                 if (vcpu->arch.interrupt.soft)
475                         exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
476                 else
477                         exit_int_info |= SVM_EVTINJ_TYPE_INTR;
478         }
479
480         vmcb12->control.exit_int_info = exit_int_info;
481 }
482
483 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
484 {
485         /*
486          * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
487          * things to fix before this can be conditional:
488          *
489          *  - Flush TLBs for both L1 and L2 remote TLB flush
490          *  - Honor L1's request to flush an ASID on nested VMRUN
491          *  - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
492          *  - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
493          *  - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
494          *
495          * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
496          *     NPT guest-physical mappings on VMRUN.
497          */
498         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
499         kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
500 }
501
502 /*
503  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
504  * if we are emulating VM-Entry into a guest with NPT enabled.
505  */
506 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
507                                bool nested_npt, bool reload_pdptrs)
508 {
509         if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
510                 return -EINVAL;
511
512         if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
513             CC(!load_pdptrs(vcpu, cr3)))
514                 return -EINVAL;
515
516         vcpu->arch.cr3 = cr3;
517
518         /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
519         kvm_init_mmu(vcpu);
520
521         if (!nested_npt)
522                 kvm_mmu_new_pgd(vcpu, cr3);
523
524         return 0;
525 }
526
527 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
528 {
529         if (!svm->nested.vmcb02.ptr)
530                 return;
531
532         /* FIXME: merge g_pat from vmcb01 and vmcb12.  */
533         svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
534 }
535
536 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
537 {
538         bool new_vmcb12 = false;
539         struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
540
541         nested_vmcb02_compute_g_pat(svm);
542
543         /* Load the nested guest state */
544         if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
545                 new_vmcb12 = true;
546                 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
547                 svm->nested.force_msr_bitmap_recalc = true;
548         }
549
550         if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
551                 vmcb02->save.es = vmcb12->save.es;
552                 vmcb02->save.cs = vmcb12->save.cs;
553                 vmcb02->save.ss = vmcb12->save.ss;
554                 vmcb02->save.ds = vmcb12->save.ds;
555                 vmcb02->save.cpl = vmcb12->save.cpl;
556                 vmcb_mark_dirty(vmcb02, VMCB_SEG);
557         }
558
559         if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
560                 vmcb02->save.gdtr = vmcb12->save.gdtr;
561                 vmcb02->save.idtr = vmcb12->save.idtr;
562                 vmcb_mark_dirty(vmcb02, VMCB_DT);
563         }
564
565         kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
566
567         svm_set_efer(&svm->vcpu, svm->nested.save.efer);
568
569         svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
570         svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
571
572         svm->vcpu.arch.cr2 = vmcb12->save.cr2;
573
574         kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
575         kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
576         kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
577
578         /* In case we don't even reach vcpu_run, the fields are not updated */
579         vmcb02->save.rax = vmcb12->save.rax;
580         vmcb02->save.rsp = vmcb12->save.rsp;
581         vmcb02->save.rip = vmcb12->save.rip;
582
583         /* These bits will be set properly on the first execution when new_vmc12 is true */
584         if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
585                 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
586                 svm->vcpu.arch.dr6  = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
587                 vmcb_mark_dirty(vmcb02, VMCB_DR);
588         }
589 }
590
591 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
592 {
593         const u32 int_ctl_vmcb01_bits =
594                 V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
595
596         const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
597
598         struct kvm_vcpu *vcpu = &svm->vcpu;
599         struct vmcb *vmcb01 = svm->vmcb01.ptr;
600         struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
601
602         /*
603          * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
604          * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
605          */
606
607         /*
608          * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
609          * avic_physical_id.
610          */
611         WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
612
613         /* Copied from vmcb01.  msrpm_base can be overwritten later.  */
614         vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
615         vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
616         vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
617
618         /* Done at vmrun: asid.  */
619
620         /* Also overwritten later if necessary.  */
621         vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
622
623         /* nested_cr3.  */
624         if (nested_npt_enabled(svm))
625                 nested_svm_init_mmu_context(vcpu);
626
627         vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
628                         vcpu->arch.l1_tsc_offset,
629                         svm->nested.ctl.tsc_offset,
630                         svm->tsc_ratio_msr);
631
632         vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
633
634         if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
635                 WARN_ON(!svm->tsc_scaling_enabled);
636                 nested_svm_update_tsc_ratio_msr(vcpu);
637         }
638
639         vmcb02->control.int_ctl             =
640                 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
641                 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
642
643         vmcb02->control.int_vector          = svm->nested.ctl.int_vector;
644         vmcb02->control.int_state           = svm->nested.ctl.int_state;
645         vmcb02->control.event_inj           = svm->nested.ctl.event_inj;
646         vmcb02->control.event_inj_err       = svm->nested.ctl.event_inj_err;
647
648         if (!nested_vmcb_needs_vls_intercept(svm))
649                 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
650
651         nested_svm_transition_tlb_flush(vcpu);
652
653         /* Enter Guest-Mode */
654         enter_guest_mode(vcpu);
655
656         /*
657          * Merge guest and host intercepts - must be called with vcpu in
658          * guest-mode to take effect.
659          */
660         recalc_intercepts(svm);
661 }
662
663 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
664 {
665         /*
666          * Some VMCB state is shared between L1 and L2 and thus has to be
667          * moved at the time of nested vmrun and vmexit.
668          *
669          * VMLOAD/VMSAVE state would also belong in this category, but KVM
670          * always performs VMLOAD and VMSAVE from the VMCB01.
671          */
672         to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
673 }
674
675 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
676                          struct vmcb *vmcb12, bool from_vmrun)
677 {
678         struct vcpu_svm *svm = to_svm(vcpu);
679         int ret;
680
681         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
682                                vmcb12->save.rip,
683                                vmcb12->control.int_ctl,
684                                vmcb12->control.event_inj,
685                                vmcb12->control.nested_ctl);
686
687         trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
688                                     vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
689                                     vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
690                                     vmcb12->control.intercepts[INTERCEPT_WORD3],
691                                     vmcb12->control.intercepts[INTERCEPT_WORD4],
692                                     vmcb12->control.intercepts[INTERCEPT_WORD5]);
693
694
695         svm->nested.vmcb12_gpa = vmcb12_gpa;
696
697         WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
698
699         nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
700
701         svm_switch_vmcb(svm, &svm->nested.vmcb02);
702         nested_vmcb02_prepare_control(svm);
703         nested_vmcb02_prepare_save(svm, vmcb12);
704
705         ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
706                                   nested_npt_enabled(svm), from_vmrun);
707         if (ret)
708                 return ret;
709
710         if (!npt_enabled)
711                 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
712
713         if (!from_vmrun)
714                 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
715
716         svm_set_gif(svm, true);
717
718         return 0;
719 }
720
721 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
722 {
723         struct vcpu_svm *svm = to_svm(vcpu);
724         int ret;
725         struct vmcb *vmcb12;
726         struct kvm_host_map map;
727         u64 vmcb12_gpa;
728         struct vmcb *vmcb01 = svm->vmcb01.ptr;
729
730         if (!svm->nested.hsave_msr) {
731                 kvm_inject_gp(vcpu, 0);
732                 return 1;
733         }
734
735         if (is_smm(vcpu)) {
736                 kvm_queue_exception(vcpu, UD_VECTOR);
737                 return 1;
738         }
739
740         vmcb12_gpa = svm->vmcb->save.rax;
741         ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
742         if (ret == -EINVAL) {
743                 kvm_inject_gp(vcpu, 0);
744                 return 1;
745         } else if (ret) {
746                 return kvm_skip_emulated_instruction(vcpu);
747         }
748
749         ret = kvm_skip_emulated_instruction(vcpu);
750
751         vmcb12 = map.hva;
752
753         if (WARN_ON_ONCE(!svm->nested.initialized))
754                 return -EINVAL;
755
756         nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
757         nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
758
759         if (!nested_vmcb_check_save(vcpu) ||
760             !nested_vmcb_check_controls(vcpu)) {
761                 vmcb12->control.exit_code    = SVM_EXIT_ERR;
762                 vmcb12->control.exit_code_hi = 0;
763                 vmcb12->control.exit_info_1  = 0;
764                 vmcb12->control.exit_info_2  = 0;
765                 goto out;
766         }
767
768         /*
769          * Since vmcb01 is not in use, we can use it to store some of the L1
770          * state.
771          */
772         vmcb01->save.efer   = vcpu->arch.efer;
773         vmcb01->save.cr0    = kvm_read_cr0(vcpu);
774         vmcb01->save.cr4    = vcpu->arch.cr4;
775         vmcb01->save.rflags = kvm_get_rflags(vcpu);
776         vmcb01->save.rip    = kvm_rip_read(vcpu);
777
778         if (!npt_enabled)
779                 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
780
781         svm->nested.nested_run_pending = 1;
782
783         if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
784                 goto out_exit_err;
785
786         if (nested_svm_vmrun_msrpm(svm))
787                 goto out;
788
789 out_exit_err:
790         svm->nested.nested_run_pending = 0;
791
792         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
793         svm->vmcb->control.exit_code_hi = 0;
794         svm->vmcb->control.exit_info_1  = 0;
795         svm->vmcb->control.exit_info_2  = 0;
796
797         nested_svm_vmexit(svm);
798
799 out:
800         kvm_vcpu_unmap(vcpu, &map, true);
801
802         return ret;
803 }
804
805 /* Copy state save area fields which are handled by VMRUN */
806 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
807                           struct vmcb_save_area *from_save)
808 {
809         to_save->es = from_save->es;
810         to_save->cs = from_save->cs;
811         to_save->ss = from_save->ss;
812         to_save->ds = from_save->ds;
813         to_save->gdtr = from_save->gdtr;
814         to_save->idtr = from_save->idtr;
815         to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
816         to_save->efer = from_save->efer;
817         to_save->cr0 = from_save->cr0;
818         to_save->cr3 = from_save->cr3;
819         to_save->cr4 = from_save->cr4;
820         to_save->rax = from_save->rax;
821         to_save->rsp = from_save->rsp;
822         to_save->rip = from_save->rip;
823         to_save->cpl = 0;
824 }
825
826 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
827 {
828         to_vmcb->save.fs = from_vmcb->save.fs;
829         to_vmcb->save.gs = from_vmcb->save.gs;
830         to_vmcb->save.tr = from_vmcb->save.tr;
831         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
832         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
833         to_vmcb->save.star = from_vmcb->save.star;
834         to_vmcb->save.lstar = from_vmcb->save.lstar;
835         to_vmcb->save.cstar = from_vmcb->save.cstar;
836         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
837         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
838         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
839         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
840 }
841
842 int nested_svm_vmexit(struct vcpu_svm *svm)
843 {
844         struct kvm_vcpu *vcpu = &svm->vcpu;
845         struct vmcb *vmcb01 = svm->vmcb01.ptr;
846         struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
847         struct vmcb *vmcb12;
848         struct kvm_host_map map;
849         int rc;
850
851         /* Triple faults in L2 should never escape. */
852         WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
853
854         rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
855         if (rc) {
856                 if (rc == -EINVAL)
857                         kvm_inject_gp(vcpu, 0);
858                 return 1;
859         }
860
861         vmcb12 = map.hva;
862
863         /* Exit Guest-Mode */
864         leave_guest_mode(vcpu);
865         svm->nested.vmcb12_gpa = 0;
866         WARN_ON_ONCE(svm->nested.nested_run_pending);
867
868         kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
869
870         /* in case we halted in L2 */
871         svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
872
873         /* Give the current vmcb to the guest */
874
875         vmcb12->save.es     = vmcb02->save.es;
876         vmcb12->save.cs     = vmcb02->save.cs;
877         vmcb12->save.ss     = vmcb02->save.ss;
878         vmcb12->save.ds     = vmcb02->save.ds;
879         vmcb12->save.gdtr   = vmcb02->save.gdtr;
880         vmcb12->save.idtr   = vmcb02->save.idtr;
881         vmcb12->save.efer   = svm->vcpu.arch.efer;
882         vmcb12->save.cr0    = kvm_read_cr0(vcpu);
883         vmcb12->save.cr3    = kvm_read_cr3(vcpu);
884         vmcb12->save.cr2    = vmcb02->save.cr2;
885         vmcb12->save.cr4    = svm->vcpu.arch.cr4;
886         vmcb12->save.rflags = kvm_get_rflags(vcpu);
887         vmcb12->save.rip    = kvm_rip_read(vcpu);
888         vmcb12->save.rsp    = kvm_rsp_read(vcpu);
889         vmcb12->save.rax    = kvm_rax_read(vcpu);
890         vmcb12->save.dr7    = vmcb02->save.dr7;
891         vmcb12->save.dr6    = svm->vcpu.arch.dr6;
892         vmcb12->save.cpl    = vmcb02->save.cpl;
893
894         vmcb12->control.int_state         = vmcb02->control.int_state;
895         vmcb12->control.exit_code         = vmcb02->control.exit_code;
896         vmcb12->control.exit_code_hi      = vmcb02->control.exit_code_hi;
897         vmcb12->control.exit_info_1       = vmcb02->control.exit_info_1;
898         vmcb12->control.exit_info_2       = vmcb02->control.exit_info_2;
899
900         if (vmcb12->control.exit_code != SVM_EXIT_ERR)
901                 nested_save_pending_event_to_vmcb12(svm, vmcb12);
902
903         if (svm->nrips_enabled)
904                 vmcb12->control.next_rip  = vmcb02->control.next_rip;
905
906         vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
907         vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
908         vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
909         vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
910
911         nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
912
913         svm_switch_vmcb(svm, &svm->vmcb01);
914
915         /*
916          * On vmexit the  GIF is set to false and
917          * no event can be injected in L1.
918          */
919         svm_set_gif(svm, false);
920         vmcb01->control.exit_int_info = 0;
921
922         svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
923         if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
924                 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
925                 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
926         }
927
928         if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
929                 WARN_ON(!svm->tsc_scaling_enabled);
930                 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
931                 svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
932         }
933
934         svm->nested.ctl.nested_cr3 = 0;
935
936         /*
937          * Restore processor state that had been saved in vmcb01
938          */
939         kvm_set_rflags(vcpu, vmcb01->save.rflags);
940         svm_set_efer(vcpu, vmcb01->save.efer);
941         svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
942         svm_set_cr4(vcpu, vmcb01->save.cr4);
943         kvm_rax_write(vcpu, vmcb01->save.rax);
944         kvm_rsp_write(vcpu, vmcb01->save.rsp);
945         kvm_rip_write(vcpu, vmcb01->save.rip);
946
947         svm->vcpu.arch.dr7 = DR7_FIXED_1;
948         kvm_update_dr7(&svm->vcpu);
949
950         trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
951                                        vmcb12->control.exit_info_1,
952                                        vmcb12->control.exit_info_2,
953                                        vmcb12->control.exit_int_info,
954                                        vmcb12->control.exit_int_info_err,
955                                        KVM_ISA_SVM);
956
957         kvm_vcpu_unmap(vcpu, &map, true);
958
959         nested_svm_transition_tlb_flush(vcpu);
960
961         nested_svm_uninit_mmu_context(vcpu);
962
963         rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
964         if (rc)
965                 return 1;
966
967         /*
968          * Drop what we picked up for L2 via svm_complete_interrupts() so it
969          * doesn't end up in L1.
970          */
971         svm->vcpu.arch.nmi_injected = false;
972         kvm_clear_exception_queue(vcpu);
973         kvm_clear_interrupt_queue(vcpu);
974
975         /*
976          * If we are here following the completion of a VMRUN that
977          * is being single-stepped, queue the pending #DB intercept
978          * right now so that it an be accounted for before we execute
979          * L1's next instruction.
980          */
981         if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
982                 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
983
984         return 0;
985 }
986
987 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
988 {
989         nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
990 }
991
992 int svm_allocate_nested(struct vcpu_svm *svm)
993 {
994         struct page *vmcb02_page;
995
996         if (svm->nested.initialized)
997                 return 0;
998
999         vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1000         if (!vmcb02_page)
1001                 return -ENOMEM;
1002         svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1003         svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1004
1005         svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1006         if (!svm->nested.msrpm)
1007                 goto err_free_vmcb02;
1008         svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1009
1010         svm->nested.initialized = true;
1011         return 0;
1012
1013 err_free_vmcb02:
1014         __free_page(vmcb02_page);
1015         return -ENOMEM;
1016 }
1017
1018 void svm_free_nested(struct vcpu_svm *svm)
1019 {
1020         if (!svm->nested.initialized)
1021                 return;
1022
1023         svm_vcpu_free_msrpm(svm->nested.msrpm);
1024         svm->nested.msrpm = NULL;
1025
1026         __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1027         svm->nested.vmcb02.ptr = NULL;
1028
1029         /*
1030          * When last_vmcb12_gpa matches the current vmcb12 gpa,
1031          * some vmcb12 fields are not loaded if they are marked clean
1032          * in the vmcb12, since in this case they are up to date already.
1033          *
1034          * When the vmcb02 is freed, this optimization becomes invalid.
1035          */
1036         svm->nested.last_vmcb12_gpa = INVALID_GPA;
1037
1038         svm->nested.initialized = false;
1039 }
1040
1041 /*
1042  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
1043  */
1044 void svm_leave_nested(struct kvm_vcpu *vcpu)
1045 {
1046         struct vcpu_svm *svm = to_svm(vcpu);
1047
1048         if (is_guest_mode(vcpu)) {
1049                 svm->nested.nested_run_pending = 0;
1050                 svm->nested.vmcb12_gpa = INVALID_GPA;
1051
1052                 leave_guest_mode(vcpu);
1053
1054                 svm_switch_vmcb(svm, &svm->vmcb01);
1055
1056                 nested_svm_uninit_mmu_context(vcpu);
1057                 vmcb_mark_all_dirty(svm->vmcb);
1058         }
1059
1060         kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1061 }
1062
1063 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1064 {
1065         u32 offset, msr, value;
1066         int write, mask;
1067
1068         if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1069                 return NESTED_EXIT_HOST;
1070
1071         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1072         offset = svm_msrpm_offset(msr);
1073         write  = svm->vmcb->control.exit_info_1 & 1;
1074         mask   = 1 << ((2 * (msr & 0xf)) + write);
1075
1076         if (offset == MSR_INVALID)
1077                 return NESTED_EXIT_DONE;
1078
1079         /* Offset is in 32 bit units but need in 8 bit units */
1080         offset *= 4;
1081
1082         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1083                 return NESTED_EXIT_DONE;
1084
1085         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1086 }
1087
1088 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1089 {
1090         unsigned port, size, iopm_len;
1091         u16 val, mask;
1092         u8 start_bit;
1093         u64 gpa;
1094
1095         if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1096                 return NESTED_EXIT_HOST;
1097
1098         port = svm->vmcb->control.exit_info_1 >> 16;
1099         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1100                 SVM_IOIO_SIZE_SHIFT;
1101         gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
1102         start_bit = port % 8;
1103         iopm_len = (start_bit + size > 8) ? 2 : 1;
1104         mask = (0xf >> (4 - size)) << start_bit;
1105         val = 0;
1106
1107         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1108                 return NESTED_EXIT_DONE;
1109
1110         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1111 }
1112
1113 static int nested_svm_intercept(struct vcpu_svm *svm)
1114 {
1115         u32 exit_code = svm->vmcb->control.exit_code;
1116         int vmexit = NESTED_EXIT_HOST;
1117
1118         switch (exit_code) {
1119         case SVM_EXIT_MSR:
1120                 vmexit = nested_svm_exit_handled_msr(svm);
1121                 break;
1122         case SVM_EXIT_IOIO:
1123                 vmexit = nested_svm_intercept_ioio(svm);
1124                 break;
1125         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1126                 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1127                         vmexit = NESTED_EXIT_DONE;
1128                 break;
1129         }
1130         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1131                 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1132                         vmexit = NESTED_EXIT_DONE;
1133                 break;
1134         }
1135         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1136                 /*
1137                  * Host-intercepted exceptions have been checked already in
1138                  * nested_svm_exit_special.  There is nothing to do here,
1139                  * the vmexit is injected by svm_check_nested_events.
1140                  */
1141                 vmexit = NESTED_EXIT_DONE;
1142                 break;
1143         }
1144         case SVM_EXIT_ERR: {
1145                 vmexit = NESTED_EXIT_DONE;
1146                 break;
1147         }
1148         default: {
1149                 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1150                         vmexit = NESTED_EXIT_DONE;
1151         }
1152         }
1153
1154         return vmexit;
1155 }
1156
1157 int nested_svm_exit_handled(struct vcpu_svm *svm)
1158 {
1159         int vmexit;
1160
1161         vmexit = nested_svm_intercept(svm);
1162
1163         if (vmexit == NESTED_EXIT_DONE)
1164                 nested_svm_vmexit(svm);
1165
1166         return vmexit;
1167 }
1168
1169 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1170 {
1171         if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1172                 kvm_queue_exception(vcpu, UD_VECTOR);
1173                 return 1;
1174         }
1175
1176         if (to_svm(vcpu)->vmcb->save.cpl) {
1177                 kvm_inject_gp(vcpu, 0);
1178                 return 1;
1179         }
1180
1181         return 0;
1182 }
1183
1184 static bool nested_exit_on_exception(struct vcpu_svm *svm)
1185 {
1186         unsigned int nr = svm->vcpu.arch.exception.nr;
1187
1188         return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1189 }
1190
1191 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1192 {
1193         unsigned int nr = svm->vcpu.arch.exception.nr;
1194         struct vmcb *vmcb = svm->vmcb;
1195
1196         vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1197         vmcb->control.exit_code_hi = 0;
1198
1199         if (svm->vcpu.arch.exception.has_error_code)
1200                 vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1201
1202         /*
1203          * EXITINFO2 is undefined for all exception intercepts other
1204          * than #PF.
1205          */
1206         if (nr == PF_VECTOR) {
1207                 if (svm->vcpu.arch.exception.nested_apf)
1208                         vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1209                 else if (svm->vcpu.arch.exception.has_payload)
1210                         vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1211                 else
1212                         vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1213         } else if (nr == DB_VECTOR) {
1214                 /* See inject_pending_event.  */
1215                 kvm_deliver_exception_payload(&svm->vcpu);
1216                 if (svm->vcpu.arch.dr7 & DR7_GD) {
1217                         svm->vcpu.arch.dr7 &= ~DR7_GD;
1218                         kvm_update_dr7(&svm->vcpu);
1219                 }
1220         } else
1221                 WARN_ON(svm->vcpu.arch.exception.has_payload);
1222
1223         nested_svm_vmexit(svm);
1224 }
1225
1226 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1227 {
1228         return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1229 }
1230
1231 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1232 {
1233         struct vcpu_svm *svm = to_svm(vcpu);
1234         bool block_nested_events =
1235                 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1236         struct kvm_lapic *apic = vcpu->arch.apic;
1237
1238         if (lapic_in_kernel(vcpu) &&
1239             test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1240                 if (block_nested_events)
1241                         return -EBUSY;
1242                 if (!nested_exit_on_init(svm))
1243                         return 0;
1244                 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1245                 return 0;
1246         }
1247
1248         if (vcpu->arch.exception.pending) {
1249                 /*
1250                  * Only a pending nested run can block a pending exception.
1251                  * Otherwise an injected NMI/interrupt should either be
1252                  * lost or delivered to the nested hypervisor in the EXITINTINFO
1253                  * vmcb field, while delivering the pending exception.
1254                  */
1255                 if (svm->nested.nested_run_pending)
1256                         return -EBUSY;
1257                 if (!nested_exit_on_exception(svm))
1258                         return 0;
1259                 nested_svm_inject_exception_vmexit(svm);
1260                 return 0;
1261         }
1262
1263         if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1264                 if (block_nested_events)
1265                         return -EBUSY;
1266                 if (!nested_exit_on_smi(svm))
1267                         return 0;
1268                 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1269                 return 0;
1270         }
1271
1272         if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1273                 if (block_nested_events)
1274                         return -EBUSY;
1275                 if (!nested_exit_on_nmi(svm))
1276                         return 0;
1277                 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1278                 return 0;
1279         }
1280
1281         if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1282                 if (block_nested_events)
1283                         return -EBUSY;
1284                 if (!nested_exit_on_intr(svm))
1285                         return 0;
1286                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1287                 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1288                 return 0;
1289         }
1290
1291         return 0;
1292 }
1293
1294 int nested_svm_exit_special(struct vcpu_svm *svm)
1295 {
1296         u32 exit_code = svm->vmcb->control.exit_code;
1297
1298         switch (exit_code) {
1299         case SVM_EXIT_INTR:
1300         case SVM_EXIT_NMI:
1301         case SVM_EXIT_NPF:
1302                 return NESTED_EXIT_HOST;
1303         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1304                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1305
1306                 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1307                     excp_bits)
1308                         return NESTED_EXIT_HOST;
1309                 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1310                          svm->vcpu.arch.apf.host_apf_flags)
1311                         /* Trap async PF even if not shadowing */
1312                         return NESTED_EXIT_HOST;
1313                 break;
1314         }
1315         default:
1316                 break;
1317         }
1318
1319         return NESTED_EXIT_CONTINUE;
1320 }
1321
1322 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1323 {
1324         struct vcpu_svm *svm = to_svm(vcpu);
1325
1326         vcpu->arch.tsc_scaling_ratio =
1327                 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1328                                                svm->tsc_ratio_msr);
1329         svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
1330 }
1331
1332 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
1333 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1334                                               struct vmcb_ctrl_area_cached *from)
1335 {
1336         unsigned int i;
1337
1338         memset(dst, 0, sizeof(struct vmcb_control_area));
1339
1340         for (i = 0; i < MAX_INTERCEPT; i++)
1341                 dst->intercepts[i] = from->intercepts[i];
1342
1343         dst->iopm_base_pa         = from->iopm_base_pa;
1344         dst->msrpm_base_pa        = from->msrpm_base_pa;
1345         dst->tsc_offset           = from->tsc_offset;
1346         dst->asid                 = from->asid;
1347         dst->tlb_ctl              = from->tlb_ctl;
1348         dst->int_ctl              = from->int_ctl;
1349         dst->int_vector           = from->int_vector;
1350         dst->int_state            = from->int_state;
1351         dst->exit_code            = from->exit_code;
1352         dst->exit_code_hi         = from->exit_code_hi;
1353         dst->exit_info_1          = from->exit_info_1;
1354         dst->exit_info_2          = from->exit_info_2;
1355         dst->exit_int_info        = from->exit_int_info;
1356         dst->exit_int_info_err    = from->exit_int_info_err;
1357         dst->nested_ctl           = from->nested_ctl;
1358         dst->event_inj            = from->event_inj;
1359         dst->event_inj_err        = from->event_inj_err;
1360         dst->nested_cr3           = from->nested_cr3;
1361         dst->virt_ext              = from->virt_ext;
1362         dst->pause_filter_count   = from->pause_filter_count;
1363         dst->pause_filter_thresh  = from->pause_filter_thresh;
1364         /* 'clean' and 'reserved_sw' are not changed by KVM */
1365 }
1366
1367 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1368                                 struct kvm_nested_state __user *user_kvm_nested_state,
1369                                 u32 user_data_size)
1370 {
1371         struct vcpu_svm *svm;
1372         struct vmcb_control_area *ctl;
1373         unsigned long r;
1374         struct kvm_nested_state kvm_state = {
1375                 .flags = 0,
1376                 .format = KVM_STATE_NESTED_FORMAT_SVM,
1377                 .size = sizeof(kvm_state),
1378         };
1379         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1380                 &user_kvm_nested_state->data.svm[0];
1381
1382         if (!vcpu)
1383                 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1384
1385         svm = to_svm(vcpu);
1386
1387         if (user_data_size < kvm_state.size)
1388                 goto out;
1389
1390         /* First fill in the header and copy it out.  */
1391         if (is_guest_mode(vcpu)) {
1392                 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1393                 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1394                 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1395
1396                 if (svm->nested.nested_run_pending)
1397                         kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1398         }
1399
1400         if (gif_set(svm))
1401                 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1402
1403         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1404                 return -EFAULT;
1405
1406         if (!is_guest_mode(vcpu))
1407                 goto out;
1408
1409         /*
1410          * Copy over the full size of the VMCB rather than just the size
1411          * of the structs.
1412          */
1413         if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1414                 return -EFAULT;
1415
1416         ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1417         if (!ctl)
1418                 return -ENOMEM;
1419
1420         nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1421         r = copy_to_user(&user_vmcb->control, ctl,
1422                          sizeof(user_vmcb->control));
1423         kfree(ctl);
1424         if (r)
1425                 return -EFAULT;
1426
1427         if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1428                          sizeof(user_vmcb->save)))
1429                 return -EFAULT;
1430 out:
1431         return kvm_state.size;
1432 }
1433
1434 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1435                                 struct kvm_nested_state __user *user_kvm_nested_state,
1436                                 struct kvm_nested_state *kvm_state)
1437 {
1438         struct vcpu_svm *svm = to_svm(vcpu);
1439         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1440                 &user_kvm_nested_state->data.svm[0];
1441         struct vmcb_control_area *ctl;
1442         struct vmcb_save_area *save;
1443         struct vmcb_save_area_cached save_cached;
1444         struct vmcb_ctrl_area_cached ctl_cached;
1445         unsigned long cr0;
1446         int ret;
1447
1448         BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1449                      KVM_STATE_NESTED_SVM_VMCB_SIZE);
1450
1451         if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1452                 return -EINVAL;
1453
1454         if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1455                                  KVM_STATE_NESTED_RUN_PENDING |
1456                                  KVM_STATE_NESTED_GIF_SET))
1457                 return -EINVAL;
1458
1459         /*
1460          * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1461          * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1462          */
1463         if (!(vcpu->arch.efer & EFER_SVME)) {
1464                 /* GIF=1 and no guest mode are required if SVME=0.  */
1465                 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1466                         return -EINVAL;
1467         }
1468
1469         /* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1470         if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1471                 return -EINVAL;
1472
1473         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1474                 svm_leave_nested(vcpu);
1475                 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1476                 return 0;
1477         }
1478
1479         if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1480                 return -EINVAL;
1481         if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1482                 return -EINVAL;
1483
1484         ret  = -ENOMEM;
1485         ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1486         save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1487         if (!ctl || !save)
1488                 goto out_free;
1489
1490         ret = -EFAULT;
1491         if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1492                 goto out_free;
1493         if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1494                 goto out_free;
1495
1496         ret = -EINVAL;
1497         __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1498         if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1499                 goto out_free;
1500
1501         /*
1502          * Processor state contains L2 state.  Check that it is
1503          * valid for guest mode (see nested_vmcb_check_save).
1504          */
1505         cr0 = kvm_read_cr0(vcpu);
1506         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1507                 goto out_free;
1508
1509         /*
1510          * Validate host state saved from before VMRUN (see
1511          * nested_svm_check_permissions).
1512          */
1513         __nested_copy_vmcb_save_to_cache(&save_cached, save);
1514         if (!(save->cr0 & X86_CR0_PG) ||
1515             !(save->cr0 & X86_CR0_PE) ||
1516             (save->rflags & X86_EFLAGS_VM) ||
1517             !__nested_vmcb_check_save(vcpu, &save_cached))
1518                 goto out_free;
1519
1520
1521         /*
1522          * All checks done, we can enter guest mode. Userspace provides
1523          * vmcb12.control, which will be combined with L1 and stored into
1524          * vmcb02, and the L1 save state which we store in vmcb01.
1525          * L2 registers if needed are moved from the current VMCB to VMCB02.
1526          */
1527
1528         if (is_guest_mode(vcpu))
1529                 svm_leave_nested(vcpu);
1530         else
1531                 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1532
1533         svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1534
1535         svm->nested.nested_run_pending =
1536                 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1537
1538         svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1539
1540         svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1541         nested_copy_vmcb_control_to_cache(svm, ctl);
1542
1543         svm_switch_vmcb(svm, &svm->nested.vmcb02);
1544         nested_vmcb02_prepare_control(svm);
1545
1546         /*
1547          * While the nested guest CR3 is already checked and set by
1548          * KVM_SET_SREGS, it was set when nested state was yet loaded,
1549          * thus MMU might not be initialized correctly.
1550          * Set it again to fix this.
1551          */
1552
1553         ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1554                                   nested_npt_enabled(svm), false);
1555         if (WARN_ON_ONCE(ret))
1556                 goto out_free;
1557
1558         svm->nested.force_msr_bitmap_recalc = true;
1559
1560         kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1561         ret = 0;
1562 out_free:
1563         kfree(save);
1564         kfree(ctl);
1565
1566         return ret;
1567 }
1568
1569 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1570 {
1571         struct vcpu_svm *svm = to_svm(vcpu);
1572
1573         if (WARN_ON(!is_guest_mode(vcpu)))
1574                 return true;
1575
1576         if (!vcpu->arch.pdptrs_from_userspace &&
1577             !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1578                 /*
1579                  * Reload the guest's PDPTRs since after a migration
1580                  * the guest CR3 might be restored prior to setting the nested
1581                  * state which can lead to a load of wrong PDPTRs.
1582                  */
1583                 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1584                         return false;
1585
1586         if (!nested_svm_vmrun_msrpm(svm)) {
1587                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1588                 vcpu->run->internal.suberror =
1589                         KVM_INTERNAL_ERROR_EMULATION;
1590                 vcpu->run->internal.ndata = 0;
1591                 return false;
1592         }
1593
1594         return true;
1595 }
1596
1597 struct kvm_x86_nested_ops svm_nested_ops = {
1598         .leave_nested = svm_leave_nested,
1599         .check_events = svm_check_nested_events,
1600         .triple_fault = nested_svm_triple_fault,
1601         .get_nested_state_pages = svm_get_nested_state_pages,
1602         .get_state = svm_get_nested_state,
1603         .set_state = svm_set_nested_state,
1604 };