svm/sev: Register SEV and SEV-ES ASIDs to the misc controller
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / nested.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33                                        struct x86_exception *fault)
34 {
35         struct vcpu_svm *svm = to_svm(vcpu);
36
37         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38                 /*
39                  * TODO: track the cause of the nested page fault, and
40                  * correctly fill in the high bits of exit_info_1.
41                  */
42                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43                 svm->vmcb->control.exit_code_hi = 0;
44                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45                 svm->vmcb->control.exit_info_2 = fault->address;
46         }
47
48         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49         svm->vmcb->control.exit_info_1 |= fault->error_code;
50
51         nested_svm_vmexit(svm);
52 }
53
54 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
55 {
56        struct vcpu_svm *svm = to_svm(vcpu);
57        WARN_ON(!is_guest_mode(vcpu));
58
59        if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60            !svm->nested.nested_run_pending) {
61                svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62                svm->vmcb->control.exit_code_hi = 0;
63                svm->vmcb->control.exit_info_1 = fault->error_code;
64                svm->vmcb->control.exit_info_2 = fault->address;
65                nested_svm_vmexit(svm);
66        } else {
67                kvm_inject_page_fault(vcpu, fault);
68        }
69 }
70
71 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
72 {
73         struct vcpu_svm *svm = to_svm(vcpu);
74         u64 cr3 = svm->nested.ctl.nested_cr3;
75         u64 pdpte;
76         int ret;
77
78         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
79                                        offset_in_page(cr3) + index * 8, 8);
80         if (ret)
81                 return 0;
82         return pdpte;
83 }
84
85 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
86 {
87         struct vcpu_svm *svm = to_svm(vcpu);
88
89         return svm->nested.ctl.nested_cr3;
90 }
91
92 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
93 {
94         struct vcpu_svm *svm = to_svm(vcpu);
95         struct vmcb *hsave = svm->nested.hsave;
96
97         WARN_ON(mmu_is_nested(vcpu));
98
99         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
100         kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
101                                 svm->nested.ctl.nested_cr3);
102         vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
103         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
104         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
105         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
106         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
107 }
108
109 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
110 {
111         vcpu->arch.mmu = &vcpu->arch.root_mmu;
112         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
113 }
114
115 void recalc_intercepts(struct vcpu_svm *svm)
116 {
117         struct vmcb_control_area *c, *h, *g;
118         unsigned int i;
119
120         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
121
122         if (!is_guest_mode(&svm->vcpu))
123                 return;
124
125         c = &svm->vmcb->control;
126         h = &svm->nested.hsave->control;
127         g = &svm->nested.ctl;
128
129         for (i = 0; i < MAX_INTERCEPT; i++)
130                 c->intercepts[i] = h->intercepts[i];
131
132         if (g->int_ctl & V_INTR_MASKING_MASK) {
133                 /* We only want the cr8 intercept bits of L1 */
134                 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
135                 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
136
137                 /*
138                  * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
139                  * affect any interrupt we may want to inject; therefore,
140                  * interrupt window vmexits are irrelevant to L0.
141                  */
142                 vmcb_clr_intercept(c, INTERCEPT_VINTR);
143         }
144
145         /* We don't want to see VMMCALLs from a nested guest */
146         vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
147
148         for (i = 0; i < MAX_INTERCEPT; i++)
149                 c->intercepts[i] |= g->intercepts[i];
150 }
151
152 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
153                                    struct vmcb_control_area *from)
154 {
155         unsigned int i;
156
157         for (i = 0; i < MAX_INTERCEPT; i++)
158                 dst->intercepts[i] = from->intercepts[i];
159
160         dst->iopm_base_pa         = from->iopm_base_pa;
161         dst->msrpm_base_pa        = from->msrpm_base_pa;
162         dst->tsc_offset           = from->tsc_offset;
163         /* asid not copied, it is handled manually for svm->vmcb.  */
164         dst->tlb_ctl              = from->tlb_ctl;
165         dst->int_ctl              = from->int_ctl;
166         dst->int_vector           = from->int_vector;
167         dst->int_state            = from->int_state;
168         dst->exit_code            = from->exit_code;
169         dst->exit_code_hi         = from->exit_code_hi;
170         dst->exit_info_1          = from->exit_info_1;
171         dst->exit_info_2          = from->exit_info_2;
172         dst->exit_int_info        = from->exit_int_info;
173         dst->exit_int_info_err    = from->exit_int_info_err;
174         dst->nested_ctl           = from->nested_ctl;
175         dst->event_inj            = from->event_inj;
176         dst->event_inj_err        = from->event_inj_err;
177         dst->nested_cr3           = from->nested_cr3;
178         dst->virt_ext              = from->virt_ext;
179         dst->pause_filter_count   = from->pause_filter_count;
180         dst->pause_filter_thresh  = from->pause_filter_thresh;
181 }
182
183 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
184 {
185         /*
186          * This function merges the msr permission bitmaps of kvm and the
187          * nested vmcb. It is optimized in that it only merges the parts where
188          * the kvm msr permission bitmap may contain zero bits
189          */
190         int i;
191
192         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
193                 return true;
194
195         for (i = 0; i < MSRPM_OFFSETS; i++) {
196                 u32 value, p;
197                 u64 offset;
198
199                 if (msrpm_offsets[i] == 0xffffffff)
200                         break;
201
202                 p      = msrpm_offsets[i];
203                 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
204
205                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
206                         return false;
207
208                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
209         }
210
211         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
212
213         return true;
214 }
215
216 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
217 {
218         struct vcpu_svm *svm = to_svm(vcpu);
219
220         if (WARN_ON(!is_guest_mode(vcpu)))
221                 return true;
222
223         if (!nested_svm_vmrun_msrpm(svm)) {
224                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
225                 vcpu->run->internal.suberror =
226                         KVM_INTERNAL_ERROR_EMULATION;
227                 vcpu->run->internal.ndata = 0;
228                 return false;
229         }
230
231         return true;
232 }
233
234 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
235 {
236         if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
237                 return false;
238
239         if (control->asid == 0)
240                 return false;
241
242         if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
243             !npt_enabled)
244                 return false;
245
246         return true;
247 }
248
249 static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
250 {
251         struct kvm_vcpu *vcpu = &svm->vcpu;
252         bool vmcb12_lma;
253
254         /*
255          * FIXME: these should be done after copying the fields,
256          * to avoid TOC/TOU races.  For these save area checks
257          * the possible damage is limited since kvm_set_cr0 and
258          * kvm_set_cr4 handle failure; EFER_SVME is an exception
259          * so it is force-set later in nested_prepare_vmcb_save.
260          */
261         if ((vmcb12->save.efer & EFER_SVME) == 0)
262                 return false;
263
264         if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
265                 return false;
266
267         if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
268                 return false;
269
270         vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
271
272         if (vmcb12_lma) {
273                 if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
274                     !(vmcb12->save.cr0 & X86_CR0_PE) ||
275                     kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
276                         return false;
277         }
278         if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
279                 return false;
280
281         return true;
282 }
283
284 static void load_nested_vmcb_control(struct vcpu_svm *svm,
285                                      struct vmcb_control_area *control)
286 {
287         copy_vmcb_control_area(&svm->nested.ctl, control);
288
289         /* Copy it here because nested_svm_check_controls will check it.  */
290         svm->nested.ctl.asid           = control->asid;
291         svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
292         svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
293 }
294
295 /*
296  * Synchronize fields that are written by the processor, so that
297  * they can be copied back into the nested_vmcb.
298  */
299 void sync_nested_vmcb_control(struct vcpu_svm *svm)
300 {
301         u32 mask;
302         svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
303         svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
304
305         /* Only a few fields of int_ctl are written by the processor.  */
306         mask = V_IRQ_MASK | V_TPR_MASK;
307         if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
308             svm_is_intercept(svm, INTERCEPT_VINTR)) {
309                 /*
310                  * In order to request an interrupt window, L0 is usurping
311                  * svm->vmcb->control.int_ctl and possibly setting V_IRQ
312                  * even if it was clear in L1's VMCB.  Restoring it would be
313                  * wrong.  However, in this case V_IRQ will remain true until
314                  * interrupt_window_interception calls svm_clear_vintr and
315                  * restores int_ctl.  We can just leave it aside.
316                  */
317                 mask &= ~V_IRQ_MASK;
318         }
319         svm->nested.ctl.int_ctl        &= ~mask;
320         svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
321 }
322
323 /*
324  * Transfer any event that L0 or L1 wanted to inject into L2 to
325  * EXIT_INT_INFO.
326  */
327 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
328                                            struct vmcb *vmcb12)
329 {
330         struct kvm_vcpu *vcpu = &svm->vcpu;
331         u32 exit_int_info = 0;
332         unsigned int nr;
333
334         if (vcpu->arch.exception.injected) {
335                 nr = vcpu->arch.exception.nr;
336                 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
337
338                 if (vcpu->arch.exception.has_error_code) {
339                         exit_int_info |= SVM_EVTINJ_VALID_ERR;
340                         vmcb12->control.exit_int_info_err =
341                                 vcpu->arch.exception.error_code;
342                 }
343
344         } else if (vcpu->arch.nmi_injected) {
345                 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
346
347         } else if (vcpu->arch.interrupt.injected) {
348                 nr = vcpu->arch.interrupt.nr;
349                 exit_int_info = nr | SVM_EVTINJ_VALID;
350
351                 if (vcpu->arch.interrupt.soft)
352                         exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
353                 else
354                         exit_int_info |= SVM_EVTINJ_TYPE_INTR;
355         }
356
357         vmcb12->control.exit_int_info = exit_int_info;
358 }
359
360 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
361 {
362         return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
363 }
364
365 /*
366  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
367  * if we are emulating VM-Entry into a guest with NPT enabled.
368  */
369 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
370                                bool nested_npt)
371 {
372         if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
373                 return -EINVAL;
374
375         if (!nested_npt && is_pae_paging(vcpu) &&
376             (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
377                 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
378                         return -EINVAL;
379         }
380
381         /*
382          * TODO: optimize unconditional TLB flush/MMU sync here and in
383          * kvm_init_shadow_npt_mmu().
384          */
385         if (!nested_npt)
386                 kvm_mmu_new_pgd(vcpu, cr3, false, false);
387
388         vcpu->arch.cr3 = cr3;
389         kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
390
391         kvm_init_mmu(vcpu, false);
392
393         return 0;
394 }
395
396 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
397 {
398         /* Load the nested guest state */
399         svm->vmcb->save.es = vmcb12->save.es;
400         svm->vmcb->save.cs = vmcb12->save.cs;
401         svm->vmcb->save.ss = vmcb12->save.ss;
402         svm->vmcb->save.ds = vmcb12->save.ds;
403         svm->vmcb->save.gdtr = vmcb12->save.gdtr;
404         svm->vmcb->save.idtr = vmcb12->save.idtr;
405         kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
406
407         /*
408          * Force-set EFER_SVME even though it is checked earlier on the
409          * VMCB12, because the guest can flip the bit between the check
410          * and now.  Clearing EFER_SVME would call svm_free_nested.
411          */
412         svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
413
414         svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
415         svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
416         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
417         kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
418         kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
419         kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
420
421         /* In case we don't even reach vcpu_run, the fields are not updated */
422         svm->vmcb->save.rax = vmcb12->save.rax;
423         svm->vmcb->save.rsp = vmcb12->save.rsp;
424         svm->vmcb->save.rip = vmcb12->save.rip;
425         svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
426         svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
427         svm->vmcb->save.cpl = vmcb12->save.cpl;
428 }
429
430 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
431 {
432         const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
433
434         if (nested_npt_enabled(svm))
435                 nested_svm_init_mmu_context(&svm->vcpu);
436
437         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
438                 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
439
440         svm->vmcb->control.int_ctl             =
441                 (svm->nested.ctl.int_ctl & ~mask) |
442                 (svm->nested.hsave->control.int_ctl & mask);
443
444         svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
445         svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
446         svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
447         svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
448         svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
449
450         svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
451         svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
452
453         /* Enter Guest-Mode */
454         enter_guest_mode(&svm->vcpu);
455
456         /*
457          * Merge guest and host intercepts - must be called  with vcpu in
458          * guest-mode to take affect here
459          */
460         recalc_intercepts(svm);
461
462         vmcb_mark_all_dirty(svm->vmcb);
463 }
464
465 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
466                          struct vmcb *vmcb12)
467 {
468         int ret;
469
470         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
471                                vmcb12->save.rip,
472                                vmcb12->control.int_ctl,
473                                vmcb12->control.event_inj,
474                                vmcb12->control.nested_ctl);
475
476         trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
477                                     vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
478                                     vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
479                                     vmcb12->control.intercepts[INTERCEPT_WORD3],
480                                     vmcb12->control.intercepts[INTERCEPT_WORD4],
481                                     vmcb12->control.intercepts[INTERCEPT_WORD5]);
482
483
484         svm->nested.vmcb12_gpa = vmcb12_gpa;
485         nested_prepare_vmcb_control(svm);
486         nested_prepare_vmcb_save(svm, vmcb12);
487
488         ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
489                                   nested_npt_enabled(svm));
490         if (ret)
491                 return ret;
492
493         if (!npt_enabled)
494                 svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
495
496         svm_set_gif(svm, true);
497
498         return 0;
499 }
500
501 int nested_svm_vmrun(struct vcpu_svm *svm)
502 {
503         int ret;
504         struct vmcb *vmcb12;
505         struct vmcb *hsave = svm->nested.hsave;
506         struct vmcb *vmcb = svm->vmcb;
507         struct kvm_host_map map;
508         u64 vmcb12_gpa;
509
510         if (is_smm(&svm->vcpu)) {
511                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
512                 return 1;
513         }
514
515         vmcb12_gpa = svm->vmcb->save.rax;
516         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
517         if (ret == -EINVAL) {
518                 kvm_inject_gp(&svm->vcpu, 0);
519                 return 1;
520         } else if (ret) {
521                 return kvm_skip_emulated_instruction(&svm->vcpu);
522         }
523
524         ret = kvm_skip_emulated_instruction(&svm->vcpu);
525
526         vmcb12 = map.hva;
527
528         if (WARN_ON_ONCE(!svm->nested.initialized))
529                 return -EINVAL;
530
531         load_nested_vmcb_control(svm, &vmcb12->control);
532
533         if (!nested_vmcb_check_save(svm, vmcb12) ||
534             !nested_vmcb_check_controls(&svm->nested.ctl)) {
535                 vmcb12->control.exit_code    = SVM_EXIT_ERR;
536                 vmcb12->control.exit_code_hi = 0;
537                 vmcb12->control.exit_info_1  = 0;
538                 vmcb12->control.exit_info_2  = 0;
539                 goto out;
540         }
541
542
543         /* Clear internal status */
544         kvm_clear_exception_queue(&svm->vcpu);
545         kvm_clear_interrupt_queue(&svm->vcpu);
546
547         /*
548          * Save the old vmcb, so we don't need to pick what we save, but can
549          * restore everything when a VMEXIT occurs
550          */
551         hsave->save.es     = vmcb->save.es;
552         hsave->save.cs     = vmcb->save.cs;
553         hsave->save.ss     = vmcb->save.ss;
554         hsave->save.ds     = vmcb->save.ds;
555         hsave->save.gdtr   = vmcb->save.gdtr;
556         hsave->save.idtr   = vmcb->save.idtr;
557         hsave->save.efer   = svm->vcpu.arch.efer;
558         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
559         hsave->save.cr4    = svm->vcpu.arch.cr4;
560         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
561         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
562         hsave->save.rsp    = vmcb->save.rsp;
563         hsave->save.rax    = vmcb->save.rax;
564         if (npt_enabled)
565                 hsave->save.cr3    = vmcb->save.cr3;
566         else
567                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
568
569         copy_vmcb_control_area(&hsave->control, &vmcb->control);
570
571         svm->nested.nested_run_pending = 1;
572
573         if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
574                 goto out_exit_err;
575
576         if (nested_svm_vmrun_msrpm(svm))
577                 goto out;
578
579 out_exit_err:
580         svm->nested.nested_run_pending = 0;
581
582         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
583         svm->vmcb->control.exit_code_hi = 0;
584         svm->vmcb->control.exit_info_1  = 0;
585         svm->vmcb->control.exit_info_2  = 0;
586
587         nested_svm_vmexit(svm);
588
589 out:
590         kvm_vcpu_unmap(&svm->vcpu, &map, true);
591
592         return ret;
593 }
594
595 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
596 {
597         to_vmcb->save.fs = from_vmcb->save.fs;
598         to_vmcb->save.gs = from_vmcb->save.gs;
599         to_vmcb->save.tr = from_vmcb->save.tr;
600         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
601         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
602         to_vmcb->save.star = from_vmcb->save.star;
603         to_vmcb->save.lstar = from_vmcb->save.lstar;
604         to_vmcb->save.cstar = from_vmcb->save.cstar;
605         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
606         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
607         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
608         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
609 }
610
611 int nested_svm_vmexit(struct vcpu_svm *svm)
612 {
613         int rc;
614         struct vmcb *vmcb12;
615         struct vmcb *hsave = svm->nested.hsave;
616         struct vmcb *vmcb = svm->vmcb;
617         struct kvm_host_map map;
618
619         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
620         if (rc) {
621                 if (rc == -EINVAL)
622                         kvm_inject_gp(&svm->vcpu, 0);
623                 return 1;
624         }
625
626         vmcb12 = map.hva;
627
628         /* Exit Guest-Mode */
629         leave_guest_mode(&svm->vcpu);
630         svm->nested.vmcb12_gpa = 0;
631         WARN_ON_ONCE(svm->nested.nested_run_pending);
632
633         kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
634
635         /* in case we halted in L2 */
636         svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
637
638         /* Give the current vmcb to the guest */
639
640         vmcb12->save.es     = vmcb->save.es;
641         vmcb12->save.cs     = vmcb->save.cs;
642         vmcb12->save.ss     = vmcb->save.ss;
643         vmcb12->save.ds     = vmcb->save.ds;
644         vmcb12->save.gdtr   = vmcb->save.gdtr;
645         vmcb12->save.idtr   = vmcb->save.idtr;
646         vmcb12->save.efer   = svm->vcpu.arch.efer;
647         vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
648         vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
649         vmcb12->save.cr2    = vmcb->save.cr2;
650         vmcb12->save.cr4    = svm->vcpu.arch.cr4;
651         vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
652         vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
653         vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
654         vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
655         vmcb12->save.dr7    = vmcb->save.dr7;
656         vmcb12->save.dr6    = svm->vcpu.arch.dr6;
657         vmcb12->save.cpl    = vmcb->save.cpl;
658
659         vmcb12->control.int_state         = vmcb->control.int_state;
660         vmcb12->control.exit_code         = vmcb->control.exit_code;
661         vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
662         vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
663         vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
664
665         if (vmcb12->control.exit_code != SVM_EXIT_ERR)
666                 nested_vmcb_save_pending_event(svm, vmcb12);
667
668         if (svm->nrips_enabled)
669                 vmcb12->control.next_rip  = vmcb->control.next_rip;
670
671         vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
672         vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
673         vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
674         vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
675
676         vmcb12->control.pause_filter_count =
677                 svm->vmcb->control.pause_filter_count;
678         vmcb12->control.pause_filter_thresh =
679                 svm->vmcb->control.pause_filter_thresh;
680
681         /* Restore the original control entries */
682         copy_vmcb_control_area(&vmcb->control, &hsave->control);
683
684         /* On vmexit the  GIF is set to false */
685         svm_set_gif(svm, false);
686
687         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
688                 svm->vcpu.arch.l1_tsc_offset;
689
690         svm->nested.ctl.nested_cr3 = 0;
691
692         /* Restore selected save entries */
693         svm->vmcb->save.es = hsave->save.es;
694         svm->vmcb->save.cs = hsave->save.cs;
695         svm->vmcb->save.ss = hsave->save.ss;
696         svm->vmcb->save.ds = hsave->save.ds;
697         svm->vmcb->save.gdtr = hsave->save.gdtr;
698         svm->vmcb->save.idtr = hsave->save.idtr;
699         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
700         kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED);
701         svm_set_efer(&svm->vcpu, hsave->save.efer);
702         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
703         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
704         kvm_rax_write(&svm->vcpu, hsave->save.rax);
705         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
706         kvm_rip_write(&svm->vcpu, hsave->save.rip);
707         svm->vmcb->save.dr7 = DR7_FIXED_1;
708         svm->vmcb->save.cpl = 0;
709         svm->vmcb->control.exit_int_info = 0;
710
711         vmcb_mark_all_dirty(svm->vmcb);
712
713         trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
714                                        vmcb12->control.exit_info_1,
715                                        vmcb12->control.exit_info_2,
716                                        vmcb12->control.exit_int_info,
717                                        vmcb12->control.exit_int_info_err,
718                                        KVM_ISA_SVM);
719
720         kvm_vcpu_unmap(&svm->vcpu, &map, true);
721
722         nested_svm_uninit_mmu_context(&svm->vcpu);
723
724         rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
725         if (rc)
726                 return 1;
727
728         if (npt_enabled)
729                 svm->vmcb->save.cr3 = hsave->save.cr3;
730
731         /*
732          * Drop what we picked up for L2 via svm_complete_interrupts() so it
733          * doesn't end up in L1.
734          */
735         svm->vcpu.arch.nmi_injected = false;
736         kvm_clear_exception_queue(&svm->vcpu);
737         kvm_clear_interrupt_queue(&svm->vcpu);
738
739         return 0;
740 }
741
742 int svm_allocate_nested(struct vcpu_svm *svm)
743 {
744         struct page *hsave_page;
745
746         if (svm->nested.initialized)
747                 return 0;
748
749         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
750         if (!hsave_page)
751                 return -ENOMEM;
752         svm->nested.hsave = page_address(hsave_page);
753
754         svm->nested.msrpm = svm_vcpu_alloc_msrpm();
755         if (!svm->nested.msrpm)
756                 goto err_free_hsave;
757         svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
758
759         svm->nested.initialized = true;
760         return 0;
761
762 err_free_hsave:
763         __free_page(hsave_page);
764         return -ENOMEM;
765 }
766
767 void svm_free_nested(struct vcpu_svm *svm)
768 {
769         if (!svm->nested.initialized)
770                 return;
771
772         svm_vcpu_free_msrpm(svm->nested.msrpm);
773         svm->nested.msrpm = NULL;
774
775         __free_page(virt_to_page(svm->nested.hsave));
776         svm->nested.hsave = NULL;
777
778         svm->nested.initialized = false;
779 }
780
781 /*
782  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
783  */
784 void svm_leave_nested(struct vcpu_svm *svm)
785 {
786         if (is_guest_mode(&svm->vcpu)) {
787                 struct vmcb *hsave = svm->nested.hsave;
788                 struct vmcb *vmcb = svm->vmcb;
789
790                 svm->nested.nested_run_pending = 0;
791                 leave_guest_mode(&svm->vcpu);
792                 copy_vmcb_control_area(&vmcb->control, &hsave->control);
793                 nested_svm_uninit_mmu_context(&svm->vcpu);
794                 vmcb_mark_all_dirty(svm->vmcb);
795         }
796
797         kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
798 }
799
800 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
801 {
802         u32 offset, msr, value;
803         int write, mask;
804
805         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
806                 return NESTED_EXIT_HOST;
807
808         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
809         offset = svm_msrpm_offset(msr);
810         write  = svm->vmcb->control.exit_info_1 & 1;
811         mask   = 1 << ((2 * (msr & 0xf)) + write);
812
813         if (offset == MSR_INVALID)
814                 return NESTED_EXIT_DONE;
815
816         /* Offset is in 32 bit units but need in 8 bit units */
817         offset *= 4;
818
819         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
820                 return NESTED_EXIT_DONE;
821
822         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
823 }
824
825 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
826 {
827         unsigned port, size, iopm_len;
828         u16 val, mask;
829         u8 start_bit;
830         u64 gpa;
831
832         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
833                 return NESTED_EXIT_HOST;
834
835         port = svm->vmcb->control.exit_info_1 >> 16;
836         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
837                 SVM_IOIO_SIZE_SHIFT;
838         gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
839         start_bit = port % 8;
840         iopm_len = (start_bit + size > 8) ? 2 : 1;
841         mask = (0xf >> (4 - size)) << start_bit;
842         val = 0;
843
844         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
845                 return NESTED_EXIT_DONE;
846
847         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
848 }
849
850 static int nested_svm_intercept(struct vcpu_svm *svm)
851 {
852         u32 exit_code = svm->vmcb->control.exit_code;
853         int vmexit = NESTED_EXIT_HOST;
854
855         switch (exit_code) {
856         case SVM_EXIT_MSR:
857                 vmexit = nested_svm_exit_handled_msr(svm);
858                 break;
859         case SVM_EXIT_IOIO:
860                 vmexit = nested_svm_intercept_ioio(svm);
861                 break;
862         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
863                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
864                         vmexit = NESTED_EXIT_DONE;
865                 break;
866         }
867         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
868                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
869                         vmexit = NESTED_EXIT_DONE;
870                 break;
871         }
872         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
873                 /*
874                  * Host-intercepted exceptions have been checked already in
875                  * nested_svm_exit_special.  There is nothing to do here,
876                  * the vmexit is injected by svm_check_nested_events.
877                  */
878                 vmexit = NESTED_EXIT_DONE;
879                 break;
880         }
881         case SVM_EXIT_ERR: {
882                 vmexit = NESTED_EXIT_DONE;
883                 break;
884         }
885         default: {
886                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
887                         vmexit = NESTED_EXIT_DONE;
888         }
889         }
890
891         return vmexit;
892 }
893
894 int nested_svm_exit_handled(struct vcpu_svm *svm)
895 {
896         int vmexit;
897
898         vmexit = nested_svm_intercept(svm);
899
900         if (vmexit == NESTED_EXIT_DONE)
901                 nested_svm_vmexit(svm);
902
903         return vmexit;
904 }
905
906 int nested_svm_check_permissions(struct vcpu_svm *svm)
907 {
908         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
909             !is_paging(&svm->vcpu)) {
910                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
911                 return 1;
912         }
913
914         if (svm->vmcb->save.cpl) {
915                 kvm_inject_gp(&svm->vcpu, 0);
916                 return 1;
917         }
918
919         return 0;
920 }
921
922 static bool nested_exit_on_exception(struct vcpu_svm *svm)
923 {
924         unsigned int nr = svm->vcpu.arch.exception.nr;
925
926         return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
927 }
928
929 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
930 {
931         unsigned int nr = svm->vcpu.arch.exception.nr;
932
933         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
934         svm->vmcb->control.exit_code_hi = 0;
935
936         if (svm->vcpu.arch.exception.has_error_code)
937                 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
938
939         /*
940          * EXITINFO2 is undefined for all exception intercepts other
941          * than #PF.
942          */
943         if (nr == PF_VECTOR) {
944                 if (svm->vcpu.arch.exception.nested_apf)
945                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
946                 else if (svm->vcpu.arch.exception.has_payload)
947                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
948                 else
949                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
950         } else if (nr == DB_VECTOR) {
951                 /* See inject_pending_event.  */
952                 kvm_deliver_exception_payload(&svm->vcpu);
953                 if (svm->vcpu.arch.dr7 & DR7_GD) {
954                         svm->vcpu.arch.dr7 &= ~DR7_GD;
955                         kvm_update_dr7(&svm->vcpu);
956                 }
957         } else
958                 WARN_ON(svm->vcpu.arch.exception.has_payload);
959
960         nested_svm_vmexit(svm);
961 }
962
963 static void nested_svm_smi(struct vcpu_svm *svm)
964 {
965         svm->vmcb->control.exit_code = SVM_EXIT_SMI;
966         svm->vmcb->control.exit_info_1 = 0;
967         svm->vmcb->control.exit_info_2 = 0;
968
969         nested_svm_vmexit(svm);
970 }
971
972 static void nested_svm_nmi(struct vcpu_svm *svm)
973 {
974         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
975         svm->vmcb->control.exit_info_1 = 0;
976         svm->vmcb->control.exit_info_2 = 0;
977
978         nested_svm_vmexit(svm);
979 }
980
981 static void nested_svm_intr(struct vcpu_svm *svm)
982 {
983         trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
984
985         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
986         svm->vmcb->control.exit_info_1 = 0;
987         svm->vmcb->control.exit_info_2 = 0;
988
989         nested_svm_vmexit(svm);
990 }
991
992 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
993 {
994         return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
995 }
996
997 static void nested_svm_init(struct vcpu_svm *svm)
998 {
999         svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
1000         svm->vmcb->control.exit_info_1 = 0;
1001         svm->vmcb->control.exit_info_2 = 0;
1002
1003         nested_svm_vmexit(svm);
1004 }
1005
1006
1007 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1008 {
1009         struct vcpu_svm *svm = to_svm(vcpu);
1010         bool block_nested_events =
1011                 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1012         struct kvm_lapic *apic = vcpu->arch.apic;
1013
1014         if (lapic_in_kernel(vcpu) &&
1015             test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1016                 if (block_nested_events)
1017                         return -EBUSY;
1018                 if (!nested_exit_on_init(svm))
1019                         return 0;
1020                 nested_svm_init(svm);
1021                 return 0;
1022         }
1023
1024         if (vcpu->arch.exception.pending) {
1025                 if (block_nested_events)
1026                         return -EBUSY;
1027                 if (!nested_exit_on_exception(svm))
1028                         return 0;
1029                 nested_svm_inject_exception_vmexit(svm);
1030                 return 0;
1031         }
1032
1033         if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1034                 if (block_nested_events)
1035                         return -EBUSY;
1036                 if (!nested_exit_on_smi(svm))
1037                         return 0;
1038                 nested_svm_smi(svm);
1039                 return 0;
1040         }
1041
1042         if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1043                 if (block_nested_events)
1044                         return -EBUSY;
1045                 if (!nested_exit_on_nmi(svm))
1046                         return 0;
1047                 nested_svm_nmi(svm);
1048                 return 0;
1049         }
1050
1051         if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1052                 if (block_nested_events)
1053                         return -EBUSY;
1054                 if (!nested_exit_on_intr(svm))
1055                         return 0;
1056                 nested_svm_intr(svm);
1057                 return 0;
1058         }
1059
1060         return 0;
1061 }
1062
1063 int nested_svm_exit_special(struct vcpu_svm *svm)
1064 {
1065         u32 exit_code = svm->vmcb->control.exit_code;
1066
1067         switch (exit_code) {
1068         case SVM_EXIT_INTR:
1069         case SVM_EXIT_NMI:
1070         case SVM_EXIT_NPF:
1071                 return NESTED_EXIT_HOST;
1072         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1073                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1074
1075                 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1076                                 excp_bits)
1077                         return NESTED_EXIT_HOST;
1078                 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1079                          svm->vcpu.arch.apf.host_apf_flags)
1080                         /* Trap async PF even if not shadowing */
1081                         return NESTED_EXIT_HOST;
1082                 break;
1083         }
1084         default:
1085                 break;
1086         }
1087
1088         return NESTED_EXIT_CONTINUE;
1089 }
1090
1091 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1092                                 struct kvm_nested_state __user *user_kvm_nested_state,
1093                                 u32 user_data_size)
1094 {
1095         struct vcpu_svm *svm;
1096         struct kvm_nested_state kvm_state = {
1097                 .flags = 0,
1098                 .format = KVM_STATE_NESTED_FORMAT_SVM,
1099                 .size = sizeof(kvm_state),
1100         };
1101         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1102                 &user_kvm_nested_state->data.svm[0];
1103
1104         if (!vcpu)
1105                 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1106
1107         svm = to_svm(vcpu);
1108
1109         if (user_data_size < kvm_state.size)
1110                 goto out;
1111
1112         /* First fill in the header and copy it out.  */
1113         if (is_guest_mode(vcpu)) {
1114                 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1115                 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1116                 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1117
1118                 if (svm->nested.nested_run_pending)
1119                         kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1120         }
1121
1122         if (gif_set(svm))
1123                 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1124
1125         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1126                 return -EFAULT;
1127
1128         if (!is_guest_mode(vcpu))
1129                 goto out;
1130
1131         /*
1132          * Copy over the full size of the VMCB rather than just the size
1133          * of the structs.
1134          */
1135         if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1136                 return -EFAULT;
1137         if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1138                          sizeof(user_vmcb->control)))
1139                 return -EFAULT;
1140         if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1141                          sizeof(user_vmcb->save)))
1142                 return -EFAULT;
1143
1144 out:
1145         return kvm_state.size;
1146 }
1147
1148 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1149                                 struct kvm_nested_state __user *user_kvm_nested_state,
1150                                 struct kvm_nested_state *kvm_state)
1151 {
1152         struct vcpu_svm *svm = to_svm(vcpu);
1153         struct vmcb *hsave = svm->nested.hsave;
1154         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1155                 &user_kvm_nested_state->data.svm[0];
1156         struct vmcb_control_area *ctl;
1157         struct vmcb_save_area *save;
1158         int ret;
1159         u32 cr0;
1160
1161         BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1162                      KVM_STATE_NESTED_SVM_VMCB_SIZE);
1163
1164         if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1165                 return -EINVAL;
1166
1167         if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1168                                  KVM_STATE_NESTED_RUN_PENDING |
1169                                  KVM_STATE_NESTED_GIF_SET))
1170                 return -EINVAL;
1171
1172         /*
1173          * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1174          * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1175          */
1176         if (!(vcpu->arch.efer & EFER_SVME)) {
1177                 /* GIF=1 and no guest mode are required if SVME=0.  */
1178                 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1179                         return -EINVAL;
1180         }
1181
1182         /* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1183         if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1184                 return -EINVAL;
1185
1186         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1187                 svm_leave_nested(svm);
1188                 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1189                 return 0;
1190         }
1191
1192         if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1193                 return -EINVAL;
1194         if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1195                 return -EINVAL;
1196
1197         ret  = -ENOMEM;
1198         ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
1199         save = kzalloc(sizeof(*save), GFP_KERNEL);
1200         if (!ctl || !save)
1201                 goto out_free;
1202
1203         ret = -EFAULT;
1204         if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1205                 goto out_free;
1206         if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1207                 goto out_free;
1208
1209         ret = -EINVAL;
1210         if (!nested_vmcb_check_controls(ctl))
1211                 goto out_free;
1212
1213         /*
1214          * Processor state contains L2 state.  Check that it is
1215          * valid for guest mode (see nested_vmcb_checks).
1216          */
1217         cr0 = kvm_read_cr0(vcpu);
1218         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1219                 goto out_free;
1220
1221         /*
1222          * Validate host state saved from before VMRUN (see
1223          * nested_svm_check_permissions).
1224          * TODO: validate reserved bits for all saved state.
1225          */
1226         if (!(save->cr0 & X86_CR0_PG))
1227                 goto out_free;
1228         if (!(save->efer & EFER_SVME))
1229                 goto out_free;
1230
1231         /*
1232          * All checks done, we can enter guest mode.  L1 control fields
1233          * come from the nested save state.  Guest state is already
1234          * in the registers, the save area of the nested state instead
1235          * contains saved L1 state.
1236          */
1237
1238         svm->nested.nested_run_pending =
1239                 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1240
1241         copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1242         hsave->save = *save;
1243
1244         svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1245         load_nested_vmcb_control(svm, ctl);
1246         nested_prepare_vmcb_control(svm);
1247
1248         kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1249         ret = 0;
1250 out_free:
1251         kfree(save);
1252         kfree(ctl);
1253
1254         return ret;
1255 }
1256
1257 struct kvm_x86_nested_ops svm_nested_ops = {
1258         .check_events = svm_check_nested_events,
1259         .get_nested_state_pages = svm_get_nested_state_pages,
1260         .get_state = svm_get_nested_state,
1261         .set_state = svm_set_nested_state,
1262 };