KVM: SVM: do not allow VMRUN inside SMM
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / nested.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22
23 #include "kvm_emulate.h"
24 #include "trace.h"
25 #include "mmu.h"
26 #include "x86.h"
27 #include "svm.h"
28
29 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
30                                        struct x86_exception *fault)
31 {
32         struct vcpu_svm *svm = to_svm(vcpu);
33
34         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
35                 /*
36                  * TODO: track the cause of the nested page fault, and
37                  * correctly fill in the high bits of exit_info_1.
38                  */
39                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
40                 svm->vmcb->control.exit_code_hi = 0;
41                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
42                 svm->vmcb->control.exit_info_2 = fault->address;
43         }
44
45         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
46         svm->vmcb->control.exit_info_1 |= fault->error_code;
47
48         /*
49          * The present bit is always zero for page structure faults on real
50          * hardware.
51          */
52         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
53                 svm->vmcb->control.exit_info_1 &= ~1;
54
55         nested_svm_vmexit(svm);
56 }
57
58 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
59 {
60         struct vcpu_svm *svm = to_svm(vcpu);
61         u64 cr3 = svm->nested.nested_cr3;
62         u64 pdpte;
63         int ret;
64
65         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
66                                        offset_in_page(cr3) + index * 8, 8);
67         if (ret)
68                 return 0;
69         return pdpte;
70 }
71
72 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
73 {
74         struct vcpu_svm *svm = to_svm(vcpu);
75
76         return svm->nested.nested_cr3;
77 }
78
79 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
80 {
81         WARN_ON(mmu_is_nested(vcpu));
82
83         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
84         kvm_init_shadow_mmu(vcpu);
85         vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
86         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
87         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88         vcpu->arch.mmu->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
89         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
90         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
91 }
92
93 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
94 {
95         vcpu->arch.mmu = &vcpu->arch.root_mmu;
96         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
97 }
98
99 void recalc_intercepts(struct vcpu_svm *svm)
100 {
101         struct vmcb_control_area *c, *h;
102         struct nested_state *g;
103
104         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
105
106         if (!is_guest_mode(&svm->vcpu))
107                 return;
108
109         c = &svm->vmcb->control;
110         h = &svm->nested.hsave->control;
111         g = &svm->nested;
112
113         c->intercept_cr = h->intercept_cr;
114         c->intercept_dr = h->intercept_dr;
115         c->intercept_exceptions = h->intercept_exceptions;
116         c->intercept = h->intercept;
117
118         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
119                 /* We only want the cr8 intercept bits of L1 */
120                 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
121                 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
122
123                 /*
124                  * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
125                  * affect any interrupt we may want to inject; therefore,
126                  * interrupt window vmexits are irrelevant to L0.
127                  */
128                 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
129         }
130
131         /* We don't want to see VMMCALLs from a nested guest */
132         c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
133
134         c->intercept_cr |= g->intercept_cr;
135         c->intercept_dr |= g->intercept_dr;
136         c->intercept_exceptions |= g->intercept_exceptions;
137         c->intercept |= g->intercept;
138 }
139
140 static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
141 {
142         struct vmcb_control_area *dst  = &dst_vmcb->control;
143         struct vmcb_control_area *from = &from_vmcb->control;
144
145         dst->intercept_cr         = from->intercept_cr;
146         dst->intercept_dr         = from->intercept_dr;
147         dst->intercept_exceptions = from->intercept_exceptions;
148         dst->intercept            = from->intercept;
149         dst->iopm_base_pa         = from->iopm_base_pa;
150         dst->msrpm_base_pa        = from->msrpm_base_pa;
151         dst->tsc_offset           = from->tsc_offset;
152         dst->asid                 = from->asid;
153         dst->tlb_ctl              = from->tlb_ctl;
154         dst->int_ctl              = from->int_ctl;
155         dst->int_vector           = from->int_vector;
156         dst->int_state            = from->int_state;
157         dst->exit_code            = from->exit_code;
158         dst->exit_code_hi         = from->exit_code_hi;
159         dst->exit_info_1          = from->exit_info_1;
160         dst->exit_info_2          = from->exit_info_2;
161         dst->exit_int_info        = from->exit_int_info;
162         dst->exit_int_info_err    = from->exit_int_info_err;
163         dst->nested_ctl           = from->nested_ctl;
164         dst->event_inj            = from->event_inj;
165         dst->event_inj_err        = from->event_inj_err;
166         dst->nested_cr3           = from->nested_cr3;
167         dst->virt_ext              = from->virt_ext;
168         dst->pause_filter_count   = from->pause_filter_count;
169         dst->pause_filter_thresh  = from->pause_filter_thresh;
170 }
171
172 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
173 {
174         /*
175          * This function merges the msr permission bitmaps of kvm and the
176          * nested vmcb. It is optimized in that it only merges the parts where
177          * the kvm msr permission bitmap may contain zero bits
178          */
179         int i;
180
181         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
182                 return true;
183
184         for (i = 0; i < MSRPM_OFFSETS; i++) {
185                 u32 value, p;
186                 u64 offset;
187
188                 if (msrpm_offsets[i] == 0xffffffff)
189                         break;
190
191                 p      = msrpm_offsets[i];
192                 offset = svm->nested.vmcb_msrpm + (p * 4);
193
194                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
195                         return false;
196
197                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
198         }
199
200         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
201
202         return true;
203 }
204
205 static bool nested_vmcb_checks(struct vmcb *vmcb)
206 {
207         if ((vmcb->save.efer & EFER_SVME) == 0)
208                 return false;
209
210         if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
211             (vmcb->save.cr0 & X86_CR0_NW))
212                 return false;
213
214         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
215                 return false;
216
217         if (vmcb->control.asid == 0)
218                 return false;
219
220         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
221             !npt_enabled)
222                 return false;
223
224         return true;
225 }
226
227 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
228                           struct vmcb *nested_vmcb, struct kvm_host_map *map)
229 {
230         bool evaluate_pending_interrupts =
231                 is_intercept(svm, INTERCEPT_VINTR) ||
232                 is_intercept(svm, INTERCEPT_IRET);
233
234         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
235                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
236         else
237                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
238
239         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
240                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
241                 nested_svm_init_mmu_context(&svm->vcpu);
242         }
243
244         /* Load the nested guest state */
245         svm->vmcb->save.es = nested_vmcb->save.es;
246         svm->vmcb->save.cs = nested_vmcb->save.cs;
247         svm->vmcb->save.ss = nested_vmcb->save.ss;
248         svm->vmcb->save.ds = nested_vmcb->save.ds;
249         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
250         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
251         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
252         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
253         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
254         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
255         if (npt_enabled) {
256                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
257                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
258         } else
259                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
260
261         /* Guest paging mode is active - reset mmu */
262         kvm_mmu_reset_context(&svm->vcpu);
263
264         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
265         kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
266         kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
267         kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
268
269         /* In case we don't even reach vcpu_run, the fields are not updated */
270         svm->vmcb->save.rax = nested_vmcb->save.rax;
271         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
272         svm->vmcb->save.rip = nested_vmcb->save.rip;
273         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
274         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
275         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
276
277         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
278         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
279
280         /* cache intercepts */
281         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
282         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
283         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
284         svm->nested.intercept            = nested_vmcb->control.intercept;
285
286         svm_flush_tlb(&svm->vcpu);
287         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
288         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
289                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
290         else
291                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
292
293         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
294         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
295
296         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
297         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
298         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
299         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
300         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
301
302         svm->vmcb->control.pause_filter_count =
303                 nested_vmcb->control.pause_filter_count;
304         svm->vmcb->control.pause_filter_thresh =
305                 nested_vmcb->control.pause_filter_thresh;
306
307         kvm_vcpu_unmap(&svm->vcpu, map, true);
308
309         /* Enter Guest-Mode */
310         enter_guest_mode(&svm->vcpu);
311
312         /*
313          * Merge guest and host intercepts - must be called  with vcpu in
314          * guest-mode to take affect here
315          */
316         recalc_intercepts(svm);
317
318         svm->nested.vmcb = vmcb_gpa;
319
320         /*
321          * If L1 had a pending IRQ/NMI before executing VMRUN,
322          * which wasn't delivered because it was disallowed (e.g.
323          * interrupts disabled), L0 needs to evaluate if this pending
324          * event should cause an exit from L2 to L1 or be delivered
325          * directly to L2.
326          *
327          * Usually this would be handled by the processor noticing an
328          * IRQ/NMI window request.  However, VMRUN can unblock interrupts
329          * by implicitly setting GIF, so force L0 to perform pending event
330          * evaluation by requesting a KVM_REQ_EVENT.
331          */
332         enable_gif(svm);
333         if (unlikely(evaluate_pending_interrupts))
334                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
335
336         mark_all_dirty(svm->vmcb);
337 }
338
339 int nested_svm_vmrun(struct vcpu_svm *svm)
340 {
341         int ret;
342         struct vmcb *nested_vmcb;
343         struct vmcb *hsave = svm->nested.hsave;
344         struct vmcb *vmcb = svm->vmcb;
345         struct kvm_host_map map;
346         u64 vmcb_gpa;
347
348         if (is_smm(&svm->vcpu)) {
349                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
350                 return 1;
351         }
352
353         vmcb_gpa = svm->vmcb->save.rax;
354         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
355         if (ret == -EINVAL) {
356                 kvm_inject_gp(&svm->vcpu, 0);
357                 return 1;
358         } else if (ret) {
359                 return kvm_skip_emulated_instruction(&svm->vcpu);
360         }
361
362         ret = kvm_skip_emulated_instruction(&svm->vcpu);
363
364         nested_vmcb = map.hva;
365
366         if (!nested_vmcb_checks(nested_vmcb)) {
367                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
368                 nested_vmcb->control.exit_code_hi = 0;
369                 nested_vmcb->control.exit_info_1  = 0;
370                 nested_vmcb->control.exit_info_2  = 0;
371
372                 kvm_vcpu_unmap(&svm->vcpu, &map, true);
373
374                 return ret;
375         }
376
377         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
378                                nested_vmcb->save.rip,
379                                nested_vmcb->control.int_ctl,
380                                nested_vmcb->control.event_inj,
381                                nested_vmcb->control.nested_ctl);
382
383         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
384                                     nested_vmcb->control.intercept_cr >> 16,
385                                     nested_vmcb->control.intercept_exceptions,
386                                     nested_vmcb->control.intercept);
387
388         /* Clear internal status */
389         kvm_clear_exception_queue(&svm->vcpu);
390         kvm_clear_interrupt_queue(&svm->vcpu);
391
392         /*
393          * Save the old vmcb, so we don't need to pick what we save, but can
394          * restore everything when a VMEXIT occurs
395          */
396         hsave->save.es     = vmcb->save.es;
397         hsave->save.cs     = vmcb->save.cs;
398         hsave->save.ss     = vmcb->save.ss;
399         hsave->save.ds     = vmcb->save.ds;
400         hsave->save.gdtr   = vmcb->save.gdtr;
401         hsave->save.idtr   = vmcb->save.idtr;
402         hsave->save.efer   = svm->vcpu.arch.efer;
403         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
404         hsave->save.cr4    = svm->vcpu.arch.cr4;
405         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
406         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
407         hsave->save.rsp    = vmcb->save.rsp;
408         hsave->save.rax    = vmcb->save.rax;
409         if (npt_enabled)
410                 hsave->save.cr3    = vmcb->save.cr3;
411         else
412                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
413
414         copy_vmcb_control_area(hsave, vmcb);
415
416         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
417
418         if (!nested_svm_vmrun_msrpm(svm)) {
419                 svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
420                 svm->vmcb->control.exit_code_hi = 0;
421                 svm->vmcb->control.exit_info_1  = 0;
422                 svm->vmcb->control.exit_info_2  = 0;
423
424                 nested_svm_vmexit(svm);
425         }
426
427         return ret;
428 }
429
430 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
431 {
432         to_vmcb->save.fs = from_vmcb->save.fs;
433         to_vmcb->save.gs = from_vmcb->save.gs;
434         to_vmcb->save.tr = from_vmcb->save.tr;
435         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
436         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
437         to_vmcb->save.star = from_vmcb->save.star;
438         to_vmcb->save.lstar = from_vmcb->save.lstar;
439         to_vmcb->save.cstar = from_vmcb->save.cstar;
440         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
441         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
442         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
443         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
444 }
445
446 int nested_svm_vmexit(struct vcpu_svm *svm)
447 {
448         int rc;
449         struct vmcb *nested_vmcb;
450         struct vmcb *hsave = svm->nested.hsave;
451         struct vmcb *vmcb = svm->vmcb;
452         struct kvm_host_map map;
453
454         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
455                                        vmcb->control.exit_info_1,
456                                        vmcb->control.exit_info_2,
457                                        vmcb->control.exit_int_info,
458                                        vmcb->control.exit_int_info_err,
459                                        KVM_ISA_SVM);
460
461         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
462         if (rc) {
463                 if (rc == -EINVAL)
464                         kvm_inject_gp(&svm->vcpu, 0);
465                 return 1;
466         }
467
468         nested_vmcb = map.hva;
469
470         /* Exit Guest-Mode */
471         leave_guest_mode(&svm->vcpu);
472         svm->nested.vmcb = 0;
473
474         /* Give the current vmcb to the guest */
475         disable_gif(svm);
476
477         nested_vmcb->save.es     = vmcb->save.es;
478         nested_vmcb->save.cs     = vmcb->save.cs;
479         nested_vmcb->save.ss     = vmcb->save.ss;
480         nested_vmcb->save.ds     = vmcb->save.ds;
481         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
482         nested_vmcb->save.idtr   = vmcb->save.idtr;
483         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
484         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
485         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
486         nested_vmcb->save.cr2    = vmcb->save.cr2;
487         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
488         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
489         nested_vmcb->save.rip    = vmcb->save.rip;
490         nested_vmcb->save.rsp    = vmcb->save.rsp;
491         nested_vmcb->save.rax    = vmcb->save.rax;
492         nested_vmcb->save.dr7    = vmcb->save.dr7;
493         nested_vmcb->save.dr6    = vmcb->save.dr6;
494         nested_vmcb->save.cpl    = vmcb->save.cpl;
495
496         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
497         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
498         nested_vmcb->control.int_state         = vmcb->control.int_state;
499         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
500         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
501         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
502         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
503         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
504         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
505
506         if (svm->nrips_enabled)
507                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
508
509         /*
510          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
511          * to make sure that we do not lose injected events. So check event_inj
512          * here and copy it to exit_int_info if it is valid.
513          * Exit_int_info and event_inj can't be both valid because the case
514          * below only happens on a VMRUN instruction intercept which has
515          * no valid exit_int_info set.
516          */
517         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
518                 struct vmcb_control_area *nc = &nested_vmcb->control;
519
520                 nc->exit_int_info     = vmcb->control.event_inj;
521                 nc->exit_int_info_err = vmcb->control.event_inj_err;
522         }
523
524         nested_vmcb->control.tlb_ctl           = 0;
525         nested_vmcb->control.event_inj         = 0;
526         nested_vmcb->control.event_inj_err     = 0;
527
528         nested_vmcb->control.pause_filter_count =
529                 svm->vmcb->control.pause_filter_count;
530         nested_vmcb->control.pause_filter_thresh =
531                 svm->vmcb->control.pause_filter_thresh;
532
533         /* We always set V_INTR_MASKING and remember the old value in hflags */
534         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
535                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
536
537         /* Restore the original control entries */
538         copy_vmcb_control_area(vmcb, hsave);
539
540         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
541         kvm_clear_exception_queue(&svm->vcpu);
542         kvm_clear_interrupt_queue(&svm->vcpu);
543
544         svm->nested.nested_cr3 = 0;
545
546         /* Restore selected save entries */
547         svm->vmcb->save.es = hsave->save.es;
548         svm->vmcb->save.cs = hsave->save.cs;
549         svm->vmcb->save.ss = hsave->save.ss;
550         svm->vmcb->save.ds = hsave->save.ds;
551         svm->vmcb->save.gdtr = hsave->save.gdtr;
552         svm->vmcb->save.idtr = hsave->save.idtr;
553         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
554         svm_set_efer(&svm->vcpu, hsave->save.efer);
555         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
556         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
557         if (npt_enabled) {
558                 svm->vmcb->save.cr3 = hsave->save.cr3;
559                 svm->vcpu.arch.cr3 = hsave->save.cr3;
560         } else {
561                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
562         }
563         kvm_rax_write(&svm->vcpu, hsave->save.rax);
564         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
565         kvm_rip_write(&svm->vcpu, hsave->save.rip);
566         svm->vmcb->save.dr7 = 0;
567         svm->vmcb->save.cpl = 0;
568         svm->vmcb->control.exit_int_info = 0;
569
570         mark_all_dirty(svm->vmcb);
571
572         kvm_vcpu_unmap(&svm->vcpu, &map, true);
573
574         nested_svm_uninit_mmu_context(&svm->vcpu);
575         kvm_mmu_reset_context(&svm->vcpu);
576         kvm_mmu_load(&svm->vcpu);
577
578         /*
579          * Drop what we picked up for L2 via svm_complete_interrupts() so it
580          * doesn't end up in L1.
581          */
582         svm->vcpu.arch.nmi_injected = false;
583         kvm_clear_exception_queue(&svm->vcpu);
584         kvm_clear_interrupt_queue(&svm->vcpu);
585
586         return 0;
587 }
588
589 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
590 {
591         u32 offset, msr, value;
592         int write, mask;
593
594         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
595                 return NESTED_EXIT_HOST;
596
597         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
598         offset = svm_msrpm_offset(msr);
599         write  = svm->vmcb->control.exit_info_1 & 1;
600         mask   = 1 << ((2 * (msr & 0xf)) + write);
601
602         if (offset == MSR_INVALID)
603                 return NESTED_EXIT_DONE;
604
605         /* Offset is in 32 bit units but need in 8 bit units */
606         offset *= 4;
607
608         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
609                 return NESTED_EXIT_DONE;
610
611         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
612 }
613
614 /* DB exceptions for our internal use must not cause vmexit */
615 static int nested_svm_intercept_db(struct vcpu_svm *svm)
616 {
617         unsigned long dr6;
618
619         /* if we're not singlestepping, it's not ours */
620         if (!svm->nmi_singlestep)
621                 return NESTED_EXIT_DONE;
622
623         /* if it's not a singlestep exception, it's not ours */
624         if (kvm_get_dr(&svm->vcpu, 6, &dr6))
625                 return NESTED_EXIT_DONE;
626         if (!(dr6 & DR6_BS))
627                 return NESTED_EXIT_DONE;
628
629         /* if the guest is singlestepping, it should get the vmexit */
630         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
631                 disable_nmi_singlestep(svm);
632                 return NESTED_EXIT_DONE;
633         }
634
635         /* it's ours, the nested hypervisor must not see this one */
636         return NESTED_EXIT_HOST;
637 }
638
639 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
640 {
641         unsigned port, size, iopm_len;
642         u16 val, mask;
643         u8 start_bit;
644         u64 gpa;
645
646         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
647                 return NESTED_EXIT_HOST;
648
649         port = svm->vmcb->control.exit_info_1 >> 16;
650         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
651                 SVM_IOIO_SIZE_SHIFT;
652         gpa  = svm->nested.vmcb_iopm + (port / 8);
653         start_bit = port % 8;
654         iopm_len = (start_bit + size > 8) ? 2 : 1;
655         mask = (0xf >> (4 - size)) << start_bit;
656         val = 0;
657
658         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
659                 return NESTED_EXIT_DONE;
660
661         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
662 }
663
664 static int nested_svm_intercept(struct vcpu_svm *svm)
665 {
666         u32 exit_code = svm->vmcb->control.exit_code;
667         int vmexit = NESTED_EXIT_HOST;
668
669         switch (exit_code) {
670         case SVM_EXIT_MSR:
671                 vmexit = nested_svm_exit_handled_msr(svm);
672                 break;
673         case SVM_EXIT_IOIO:
674                 vmexit = nested_svm_intercept_ioio(svm);
675                 break;
676         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
677                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
678                 if (svm->nested.intercept_cr & bit)
679                         vmexit = NESTED_EXIT_DONE;
680                 break;
681         }
682         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
683                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
684                 if (svm->nested.intercept_dr & bit)
685                         vmexit = NESTED_EXIT_DONE;
686                 break;
687         }
688         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
689                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
690                 if (svm->nested.intercept_exceptions & excp_bits) {
691                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
692                                 vmexit = nested_svm_intercept_db(svm);
693                         else
694                                 vmexit = NESTED_EXIT_DONE;
695                 }
696                 /* async page fault always cause vmexit */
697                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
698                          svm->vcpu.arch.exception.nested_apf != 0)
699                         vmexit = NESTED_EXIT_DONE;
700                 break;
701         }
702         case SVM_EXIT_ERR: {
703                 vmexit = NESTED_EXIT_DONE;
704                 break;
705         }
706         default: {
707                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
708                 if (svm->nested.intercept & exit_bits)
709                         vmexit = NESTED_EXIT_DONE;
710         }
711         }
712
713         return vmexit;
714 }
715
716 int nested_svm_exit_handled(struct vcpu_svm *svm)
717 {
718         int vmexit;
719
720         vmexit = nested_svm_intercept(svm);
721
722         if (vmexit == NESTED_EXIT_DONE)
723                 nested_svm_vmexit(svm);
724
725         return vmexit;
726 }
727
728 int nested_svm_check_permissions(struct vcpu_svm *svm)
729 {
730         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
731             !is_paging(&svm->vcpu)) {
732                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
733                 return 1;
734         }
735
736         if (svm->vmcb->save.cpl) {
737                 kvm_inject_gp(&svm->vcpu, 0);
738                 return 1;
739         }
740
741         return 0;
742 }
743
744 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
745                                bool has_error_code, u32 error_code)
746 {
747         int vmexit;
748
749         if (!is_guest_mode(&svm->vcpu))
750                 return 0;
751
752         vmexit = nested_svm_intercept(svm);
753         if (vmexit != NESTED_EXIT_DONE)
754                 return 0;
755
756         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
757         svm->vmcb->control.exit_code_hi = 0;
758         svm->vmcb->control.exit_info_1 = error_code;
759
760         /*
761          * EXITINFO2 is undefined for all exception intercepts other
762          * than #PF.
763          */
764         if (svm->vcpu.arch.exception.nested_apf)
765                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
766         else if (svm->vcpu.arch.exception.has_payload)
767                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
768         else
769                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
770
771         svm->nested.exit_required = true;
772         return vmexit;
773 }
774
775 static void nested_svm_intr(struct vcpu_svm *svm)
776 {
777         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
778         svm->vmcb->control.exit_info_1 = 0;
779         svm->vmcb->control.exit_info_2 = 0;
780
781         /* nested_svm_vmexit this gets called afterwards from handle_exit */
782         svm->nested.exit_required = true;
783         trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
784 }
785
786 static bool nested_exit_on_intr(struct vcpu_svm *svm)
787 {
788         return (svm->nested.intercept & 1ULL);
789 }
790
791 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
792 {
793         struct vcpu_svm *svm = to_svm(vcpu);
794         bool block_nested_events =
795                 kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
796
797         if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
798                 if (block_nested_events)
799                         return -EBUSY;
800                 nested_svm_intr(svm);
801                 return 0;
802         }
803
804         return 0;
805 }
806
807 int nested_svm_exit_special(struct vcpu_svm *svm)
808 {
809         u32 exit_code = svm->vmcb->control.exit_code;
810
811         switch (exit_code) {
812         case SVM_EXIT_INTR:
813         case SVM_EXIT_NMI:
814         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
815                 return NESTED_EXIT_HOST;
816         case SVM_EXIT_NPF:
817                 /* For now we are always handling NPFs when using them */
818                 if (npt_enabled)
819                         return NESTED_EXIT_HOST;
820                 break;
821         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
822                 /* When we're shadowing, trap PFs, but not async PF */
823                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
824                         return NESTED_EXIT_HOST;
825                 break;
826         default:
827                 break;
828         }
829
830         return NESTED_EXIT_CONTINUE;
831 }
832
833 struct kvm_x86_nested_ops svm_nested_ops = {
834         .check_events = svm_check_nested_events,
835 };