Merge branch 'kvm-amd-fixes' into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / nested.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "svm.h"
29
30 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
31                                        struct x86_exception *fault)
32 {
33         struct vcpu_svm *svm = to_svm(vcpu);
34
35         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
36                 /*
37                  * TODO: track the cause of the nested page fault, and
38                  * correctly fill in the high bits of exit_info_1.
39                  */
40                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
41                 svm->vmcb->control.exit_code_hi = 0;
42                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
43                 svm->vmcb->control.exit_info_2 = fault->address;
44         }
45
46         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
47         svm->vmcb->control.exit_info_1 |= fault->error_code;
48
49         /*
50          * The present bit is always zero for page structure faults on real
51          * hardware.
52          */
53         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
54                 svm->vmcb->control.exit_info_1 &= ~1;
55
56         nested_svm_vmexit(svm);
57 }
58
59 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60 {
61         struct vcpu_svm *svm = to_svm(vcpu);
62         u64 cr3 = svm->nested.nested_cr3;
63         u64 pdpte;
64         int ret;
65
66         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
67                                        offset_in_page(cr3) + index * 8, 8);
68         if (ret)
69                 return 0;
70         return pdpte;
71 }
72
73 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
74 {
75         struct vcpu_svm *svm = to_svm(vcpu);
76
77         return svm->nested.nested_cr3;
78 }
79
80 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
81 {
82         WARN_ON(mmu_is_nested(vcpu));
83
84         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
85         kvm_init_shadow_mmu(vcpu);
86         vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
87         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
88         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
89         vcpu->arch.mmu->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
90         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
91         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
92 }
93
94 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
95 {
96         vcpu->arch.mmu = &vcpu->arch.root_mmu;
97         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
98 }
99
100 void recalc_intercepts(struct vcpu_svm *svm)
101 {
102         struct vmcb_control_area *c, *h;
103         struct nested_state *g;
104
105         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
106
107         if (!is_guest_mode(&svm->vcpu))
108                 return;
109
110         c = &svm->vmcb->control;
111         h = &svm->nested.hsave->control;
112         g = &svm->nested;
113
114         c->intercept_cr = h->intercept_cr;
115         c->intercept_dr = h->intercept_dr;
116         c->intercept_exceptions = h->intercept_exceptions;
117         c->intercept = h->intercept;
118
119         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
120                 /* We only want the cr8 intercept bits of L1 */
121                 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
122                 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
123
124                 /*
125                  * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
126                  * affect any interrupt we may want to inject; therefore,
127                  * interrupt window vmexits are irrelevant to L0.
128                  */
129                 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
130         }
131
132         /* We don't want to see VMMCALLs from a nested guest */
133         c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
134
135         c->intercept_cr |= g->intercept_cr;
136         c->intercept_dr |= g->intercept_dr;
137         c->intercept_exceptions |= g->intercept_exceptions;
138         c->intercept |= g->intercept;
139 }
140
141 static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
142 {
143         struct vmcb_control_area *dst  = &dst_vmcb->control;
144         struct vmcb_control_area *from = &from_vmcb->control;
145
146         dst->intercept_cr         = from->intercept_cr;
147         dst->intercept_dr         = from->intercept_dr;
148         dst->intercept_exceptions = from->intercept_exceptions;
149         dst->intercept            = from->intercept;
150         dst->iopm_base_pa         = from->iopm_base_pa;
151         dst->msrpm_base_pa        = from->msrpm_base_pa;
152         dst->tsc_offset           = from->tsc_offset;
153         dst->asid                 = from->asid;
154         dst->tlb_ctl              = from->tlb_ctl;
155         dst->int_ctl              = from->int_ctl;
156         dst->int_vector           = from->int_vector;
157         dst->int_state            = from->int_state;
158         dst->exit_code            = from->exit_code;
159         dst->exit_code_hi         = from->exit_code_hi;
160         dst->exit_info_1          = from->exit_info_1;
161         dst->exit_info_2          = from->exit_info_2;
162         dst->exit_int_info        = from->exit_int_info;
163         dst->exit_int_info_err    = from->exit_int_info_err;
164         dst->nested_ctl           = from->nested_ctl;
165         dst->event_inj            = from->event_inj;
166         dst->event_inj_err        = from->event_inj_err;
167         dst->nested_cr3           = from->nested_cr3;
168         dst->virt_ext              = from->virt_ext;
169         dst->pause_filter_count   = from->pause_filter_count;
170         dst->pause_filter_thresh  = from->pause_filter_thresh;
171 }
172
173 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
174 {
175         /*
176          * This function merges the msr permission bitmaps of kvm and the
177          * nested vmcb. It is optimized in that it only merges the parts where
178          * the kvm msr permission bitmap may contain zero bits
179          */
180         int i;
181
182         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
183                 return true;
184
185         for (i = 0; i < MSRPM_OFFSETS; i++) {
186                 u32 value, p;
187                 u64 offset;
188
189                 if (msrpm_offsets[i] == 0xffffffff)
190                         break;
191
192                 p      = msrpm_offsets[i];
193                 offset = svm->nested.vmcb_msrpm + (p * 4);
194
195                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
196                         return false;
197
198                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
199         }
200
201         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
202
203         return true;
204 }
205
206 static bool nested_vmcb_checks(struct vmcb *vmcb)
207 {
208         if ((vmcb->save.efer & EFER_SVME) == 0)
209                 return false;
210
211         if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
212             (vmcb->save.cr0 & X86_CR0_NW))
213                 return false;
214
215         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
216                 return false;
217
218         if (vmcb->control.asid == 0)
219                 return false;
220
221         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
222             !npt_enabled)
223                 return false;
224
225         return true;
226 }
227
228 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
229                           struct vmcb *nested_vmcb, struct kvm_host_map *map)
230 {
231         bool evaluate_pending_interrupts =
232                 is_intercept(svm, INTERCEPT_VINTR) ||
233                 is_intercept(svm, INTERCEPT_IRET);
234
235         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
236                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
237         else
238                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
239
240         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
241                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
242                 nested_svm_init_mmu_context(&svm->vcpu);
243         }
244
245         /* Load the nested guest state */
246         svm->vmcb->save.es = nested_vmcb->save.es;
247         svm->vmcb->save.cs = nested_vmcb->save.cs;
248         svm->vmcb->save.ss = nested_vmcb->save.ss;
249         svm->vmcb->save.ds = nested_vmcb->save.ds;
250         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
251         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
252         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
253         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
254         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
255         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
256         if (npt_enabled) {
257                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
258                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
259         } else
260                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
261
262         /* Guest paging mode is active - reset mmu */
263         kvm_mmu_reset_context(&svm->vcpu);
264
265         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
266         kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
267         kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
268         kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
269
270         /* In case we don't even reach vcpu_run, the fields are not updated */
271         svm->vmcb->save.rax = nested_vmcb->save.rax;
272         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
273         svm->vmcb->save.rip = nested_vmcb->save.rip;
274         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
275         svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
276         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
277
278         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
279         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
280
281         /* cache intercepts */
282         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
283         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
284         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
285         svm->nested.intercept            = nested_vmcb->control.intercept;
286
287         svm_flush_tlb(&svm->vcpu);
288         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
289         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
290                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
291         else
292                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
293
294         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
295         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
296
297         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
298         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
299         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
300         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
301         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
302
303         svm->vmcb->control.pause_filter_count =
304                 nested_vmcb->control.pause_filter_count;
305         svm->vmcb->control.pause_filter_thresh =
306                 nested_vmcb->control.pause_filter_thresh;
307
308         kvm_vcpu_unmap(&svm->vcpu, map, true);
309
310         /* Enter Guest-Mode */
311         enter_guest_mode(&svm->vcpu);
312
313         /*
314          * Merge guest and host intercepts - must be called  with vcpu in
315          * guest-mode to take affect here
316          */
317         recalc_intercepts(svm);
318
319         svm->nested.vmcb = vmcb_gpa;
320
321         /*
322          * If L1 had a pending IRQ/NMI before executing VMRUN,
323          * which wasn't delivered because it was disallowed (e.g.
324          * interrupts disabled), L0 needs to evaluate if this pending
325          * event should cause an exit from L2 to L1 or be delivered
326          * directly to L2.
327          *
328          * Usually this would be handled by the processor noticing an
329          * IRQ/NMI window request.  However, VMRUN can unblock interrupts
330          * by implicitly setting GIF, so force L0 to perform pending event
331          * evaluation by requesting a KVM_REQ_EVENT.
332          */
333         enable_gif(svm);
334         if (unlikely(evaluate_pending_interrupts))
335                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
336
337         mark_all_dirty(svm->vmcb);
338 }
339
340 int nested_svm_vmrun(struct vcpu_svm *svm)
341 {
342         int ret;
343         struct vmcb *nested_vmcb;
344         struct vmcb *hsave = svm->nested.hsave;
345         struct vmcb *vmcb = svm->vmcb;
346         struct kvm_host_map map;
347         u64 vmcb_gpa;
348
349         if (is_smm(&svm->vcpu)) {
350                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
351                 return 1;
352         }
353
354         vmcb_gpa = svm->vmcb->save.rax;
355         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
356         if (ret == -EINVAL) {
357                 kvm_inject_gp(&svm->vcpu, 0);
358                 return 1;
359         } else if (ret) {
360                 return kvm_skip_emulated_instruction(&svm->vcpu);
361         }
362
363         ret = kvm_skip_emulated_instruction(&svm->vcpu);
364
365         nested_vmcb = map.hva;
366
367         if (!nested_vmcb_checks(nested_vmcb)) {
368                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
369                 nested_vmcb->control.exit_code_hi = 0;
370                 nested_vmcb->control.exit_info_1  = 0;
371                 nested_vmcb->control.exit_info_2  = 0;
372
373                 kvm_vcpu_unmap(&svm->vcpu, &map, true);
374
375                 return ret;
376         }
377
378         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
379                                nested_vmcb->save.rip,
380                                nested_vmcb->control.int_ctl,
381                                nested_vmcb->control.event_inj,
382                                nested_vmcb->control.nested_ctl);
383
384         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
385                                     nested_vmcb->control.intercept_cr >> 16,
386                                     nested_vmcb->control.intercept_exceptions,
387                                     nested_vmcb->control.intercept);
388
389         /* Clear internal status */
390         kvm_clear_exception_queue(&svm->vcpu);
391         kvm_clear_interrupt_queue(&svm->vcpu);
392
393         /*
394          * Save the old vmcb, so we don't need to pick what we save, but can
395          * restore everything when a VMEXIT occurs
396          */
397         hsave->save.es     = vmcb->save.es;
398         hsave->save.cs     = vmcb->save.cs;
399         hsave->save.ss     = vmcb->save.ss;
400         hsave->save.ds     = vmcb->save.ds;
401         hsave->save.gdtr   = vmcb->save.gdtr;
402         hsave->save.idtr   = vmcb->save.idtr;
403         hsave->save.efer   = svm->vcpu.arch.efer;
404         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
405         hsave->save.cr4    = svm->vcpu.arch.cr4;
406         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
407         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
408         hsave->save.rsp    = vmcb->save.rsp;
409         hsave->save.rax    = vmcb->save.rax;
410         if (npt_enabled)
411                 hsave->save.cr3    = vmcb->save.cr3;
412         else
413                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
414
415         copy_vmcb_control_area(hsave, vmcb);
416
417         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
418
419         if (!nested_svm_vmrun_msrpm(svm)) {
420                 svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
421                 svm->vmcb->control.exit_code_hi = 0;
422                 svm->vmcb->control.exit_info_1  = 0;
423                 svm->vmcb->control.exit_info_2  = 0;
424
425                 nested_svm_vmexit(svm);
426         }
427
428         return ret;
429 }
430
431 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
432 {
433         to_vmcb->save.fs = from_vmcb->save.fs;
434         to_vmcb->save.gs = from_vmcb->save.gs;
435         to_vmcb->save.tr = from_vmcb->save.tr;
436         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
437         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
438         to_vmcb->save.star = from_vmcb->save.star;
439         to_vmcb->save.lstar = from_vmcb->save.lstar;
440         to_vmcb->save.cstar = from_vmcb->save.cstar;
441         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
442         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
443         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
444         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
445 }
446
447 int nested_svm_vmexit(struct vcpu_svm *svm)
448 {
449         int rc;
450         struct vmcb *nested_vmcb;
451         struct vmcb *hsave = svm->nested.hsave;
452         struct vmcb *vmcb = svm->vmcb;
453         struct kvm_host_map map;
454
455         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
456                                        vmcb->control.exit_info_1,
457                                        vmcb->control.exit_info_2,
458                                        vmcb->control.exit_int_info,
459                                        vmcb->control.exit_int_info_err,
460                                        KVM_ISA_SVM);
461
462         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
463         if (rc) {
464                 if (rc == -EINVAL)
465                         kvm_inject_gp(&svm->vcpu, 0);
466                 return 1;
467         }
468
469         nested_vmcb = map.hva;
470
471         /* Exit Guest-Mode */
472         leave_guest_mode(&svm->vcpu);
473         svm->nested.vmcb = 0;
474
475         /* Give the current vmcb to the guest */
476         disable_gif(svm);
477
478         nested_vmcb->save.es     = vmcb->save.es;
479         nested_vmcb->save.cs     = vmcb->save.cs;
480         nested_vmcb->save.ss     = vmcb->save.ss;
481         nested_vmcb->save.ds     = vmcb->save.ds;
482         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
483         nested_vmcb->save.idtr   = vmcb->save.idtr;
484         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
485         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
486         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
487         nested_vmcb->save.cr2    = vmcb->save.cr2;
488         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
489         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
490         nested_vmcb->save.rip    = vmcb->save.rip;
491         nested_vmcb->save.rsp    = vmcb->save.rsp;
492         nested_vmcb->save.rax    = vmcb->save.rax;
493         nested_vmcb->save.dr7    = vmcb->save.dr7;
494         nested_vmcb->save.dr6    = svm->vcpu.arch.dr6;
495         nested_vmcb->save.cpl    = vmcb->save.cpl;
496
497         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
498         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
499         nested_vmcb->control.int_state         = vmcb->control.int_state;
500         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
501         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
502         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
503         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
504         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
505         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
506
507         if (svm->nrips_enabled)
508                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
509
510         /*
511          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
512          * to make sure that we do not lose injected events. So check event_inj
513          * here and copy it to exit_int_info if it is valid.
514          * Exit_int_info and event_inj can't be both valid because the case
515          * below only happens on a VMRUN instruction intercept which has
516          * no valid exit_int_info set.
517          */
518         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
519                 struct vmcb_control_area *nc = &nested_vmcb->control;
520
521                 nc->exit_int_info     = vmcb->control.event_inj;
522                 nc->exit_int_info_err = vmcb->control.event_inj_err;
523         }
524
525         nested_vmcb->control.tlb_ctl           = 0;
526         nested_vmcb->control.event_inj         = 0;
527         nested_vmcb->control.event_inj_err     = 0;
528
529         nested_vmcb->control.pause_filter_count =
530                 svm->vmcb->control.pause_filter_count;
531         nested_vmcb->control.pause_filter_thresh =
532                 svm->vmcb->control.pause_filter_thresh;
533
534         /* We always set V_INTR_MASKING and remember the old value in hflags */
535         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
536                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
537
538         /* Restore the original control entries */
539         copy_vmcb_control_area(vmcb, hsave);
540
541         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
542         kvm_clear_exception_queue(&svm->vcpu);
543         kvm_clear_interrupt_queue(&svm->vcpu);
544
545         svm->nested.nested_cr3 = 0;
546
547         /* Restore selected save entries */
548         svm->vmcb->save.es = hsave->save.es;
549         svm->vmcb->save.cs = hsave->save.cs;
550         svm->vmcb->save.ss = hsave->save.ss;
551         svm->vmcb->save.ds = hsave->save.ds;
552         svm->vmcb->save.gdtr = hsave->save.gdtr;
553         svm->vmcb->save.idtr = hsave->save.idtr;
554         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
555         svm_set_efer(&svm->vcpu, hsave->save.efer);
556         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
557         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
558         if (npt_enabled) {
559                 svm->vmcb->save.cr3 = hsave->save.cr3;
560                 svm->vcpu.arch.cr3 = hsave->save.cr3;
561         } else {
562                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
563         }
564         kvm_rax_write(&svm->vcpu, hsave->save.rax);
565         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
566         kvm_rip_write(&svm->vcpu, hsave->save.rip);
567         svm->vmcb->save.dr7 = 0;
568         svm->vmcb->save.cpl = 0;
569         svm->vmcb->control.exit_int_info = 0;
570
571         mark_all_dirty(svm->vmcb);
572
573         kvm_vcpu_unmap(&svm->vcpu, &map, true);
574
575         nested_svm_uninit_mmu_context(&svm->vcpu);
576         kvm_mmu_reset_context(&svm->vcpu);
577         kvm_mmu_load(&svm->vcpu);
578
579         /*
580          * Drop what we picked up for L2 via svm_complete_interrupts() so it
581          * doesn't end up in L1.
582          */
583         svm->vcpu.arch.nmi_injected = false;
584         kvm_clear_exception_queue(&svm->vcpu);
585         kvm_clear_interrupt_queue(&svm->vcpu);
586
587         return 0;
588 }
589
590 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
591 {
592         u32 offset, msr, value;
593         int write, mask;
594
595         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
596                 return NESTED_EXIT_HOST;
597
598         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
599         offset = svm_msrpm_offset(msr);
600         write  = svm->vmcb->control.exit_info_1 & 1;
601         mask   = 1 << ((2 * (msr & 0xf)) + write);
602
603         if (offset == MSR_INVALID)
604                 return NESTED_EXIT_DONE;
605
606         /* Offset is in 32 bit units but need in 8 bit units */
607         offset *= 4;
608
609         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
610                 return NESTED_EXIT_DONE;
611
612         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
613 }
614
615 /* DB exceptions for our internal use must not cause vmexit */
616 static int nested_svm_intercept_db(struct vcpu_svm *svm)
617 {
618         unsigned long dr6 = svm->vmcb->save.dr6;
619
620         /* Always catch it and pass it to userspace if debugging.  */
621         if (svm->vcpu.guest_debug &
622             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
623                 return NESTED_EXIT_HOST;
624
625         /* if we're not singlestepping, it's not ours */
626         if (!svm->nmi_singlestep)
627                 goto reflected_db;
628
629         /* if it's not a singlestep exception, it's not ours */
630         if (!(dr6 & DR6_BS))
631                 goto reflected_db;
632
633         /* if the guest is singlestepping, it should get the vmexit */
634         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
635                 disable_nmi_singlestep(svm);
636                 goto reflected_db;
637         }
638
639         /* it's ours, the nested hypervisor must not see this one */
640         return NESTED_EXIT_HOST;
641
642 reflected_db:
643         /*
644          * Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
645          * it will be moved into the nested VMCB by nested_svm_vmexit.  Once
646          * exceptions will be moved to svm_check_nested_events, all this stuff
647          * will just go away and we could just return NESTED_EXIT_HOST
648          * unconditionally.  db_interception will queue the exception, which
649          * will be processed by svm_check_nested_events if a nested vmexit is
650          * required, and we will just use kvm_deliver_exception_payload to copy
651          * the payload to DR6 before vmexit.
652          */
653         WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
654         svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
655         svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
656         return NESTED_EXIT_DONE;
657 }
658
659 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
660 {
661         unsigned port, size, iopm_len;
662         u16 val, mask;
663         u8 start_bit;
664         u64 gpa;
665
666         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
667                 return NESTED_EXIT_HOST;
668
669         port = svm->vmcb->control.exit_info_1 >> 16;
670         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
671                 SVM_IOIO_SIZE_SHIFT;
672         gpa  = svm->nested.vmcb_iopm + (port / 8);
673         start_bit = port % 8;
674         iopm_len = (start_bit + size > 8) ? 2 : 1;
675         mask = (0xf >> (4 - size)) << start_bit;
676         val = 0;
677
678         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
679                 return NESTED_EXIT_DONE;
680
681         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
682 }
683
684 static int nested_svm_intercept(struct vcpu_svm *svm)
685 {
686         u32 exit_code = svm->vmcb->control.exit_code;
687         int vmexit = NESTED_EXIT_HOST;
688
689         switch (exit_code) {
690         case SVM_EXIT_MSR:
691                 vmexit = nested_svm_exit_handled_msr(svm);
692                 break;
693         case SVM_EXIT_IOIO:
694                 vmexit = nested_svm_intercept_ioio(svm);
695                 break;
696         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
697                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
698                 if (svm->nested.intercept_cr & bit)
699                         vmexit = NESTED_EXIT_DONE;
700                 break;
701         }
702         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
703                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
704                 if (svm->nested.intercept_dr & bit)
705                         vmexit = NESTED_EXIT_DONE;
706                 break;
707         }
708         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
709                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
710                 if (svm->nested.intercept_exceptions & excp_bits) {
711                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
712                                 vmexit = nested_svm_intercept_db(svm);
713                         else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
714                                  svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
715                                 vmexit = NESTED_EXIT_HOST;
716                         else
717                                 vmexit = NESTED_EXIT_DONE;
718                 }
719                 /* async page fault always cause vmexit */
720                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
721                          svm->vcpu.arch.exception.nested_apf != 0)
722                         vmexit = NESTED_EXIT_DONE;
723                 break;
724         }
725         case SVM_EXIT_ERR: {
726                 vmexit = NESTED_EXIT_DONE;
727                 break;
728         }
729         default: {
730                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
731                 if (svm->nested.intercept & exit_bits)
732                         vmexit = NESTED_EXIT_DONE;
733         }
734         }
735
736         return vmexit;
737 }
738
739 int nested_svm_exit_handled(struct vcpu_svm *svm)
740 {
741         int vmexit;
742
743         vmexit = nested_svm_intercept(svm);
744
745         if (vmexit == NESTED_EXIT_DONE)
746                 nested_svm_vmexit(svm);
747
748         return vmexit;
749 }
750
751 int nested_svm_check_permissions(struct vcpu_svm *svm)
752 {
753         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
754             !is_paging(&svm->vcpu)) {
755                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
756                 return 1;
757         }
758
759         if (svm->vmcb->save.cpl) {
760                 kvm_inject_gp(&svm->vcpu, 0);
761                 return 1;
762         }
763
764         return 0;
765 }
766
767 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
768                                bool has_error_code, u32 error_code)
769 {
770         int vmexit;
771
772         if (!is_guest_mode(&svm->vcpu))
773                 return 0;
774
775         vmexit = nested_svm_intercept(svm);
776         if (vmexit != NESTED_EXIT_DONE)
777                 return 0;
778
779         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
780         svm->vmcb->control.exit_code_hi = 0;
781         svm->vmcb->control.exit_info_1 = error_code;
782
783         /*
784          * EXITINFO2 is undefined for all exception intercepts other
785          * than #PF.
786          */
787         if (svm->vcpu.arch.exception.nested_apf)
788                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
789         else if (svm->vcpu.arch.exception.has_payload)
790                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
791         else
792                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
793
794         svm->nested.exit_required = true;
795         return vmexit;
796 }
797
798 static void nested_svm_intr(struct vcpu_svm *svm)
799 {
800         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
801         svm->vmcb->control.exit_info_1 = 0;
802         svm->vmcb->control.exit_info_2 = 0;
803
804         /* nested_svm_vmexit this gets called afterwards from handle_exit */
805         svm->nested.exit_required = true;
806         trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
807 }
808
809 static bool nested_exit_on_intr(struct vcpu_svm *svm)
810 {
811         return (svm->nested.intercept & 1ULL);
812 }
813
814 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
815 {
816         struct vcpu_svm *svm = to_svm(vcpu);
817         bool block_nested_events =
818                 kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
819
820         if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
821                 if (block_nested_events)
822                         return -EBUSY;
823                 nested_svm_intr(svm);
824                 return 0;
825         }
826
827         return 0;
828 }
829
830 int nested_svm_exit_special(struct vcpu_svm *svm)
831 {
832         u32 exit_code = svm->vmcb->control.exit_code;
833
834         switch (exit_code) {
835         case SVM_EXIT_INTR:
836         case SVM_EXIT_NMI:
837         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
838                 return NESTED_EXIT_HOST;
839         case SVM_EXIT_NPF:
840                 /* For now we are always handling NPFs when using them */
841                 if (npt_enabled)
842                         return NESTED_EXIT_HOST;
843                 break;
844         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
845                 /* When we're shadowing, trap PFs, but not async PF */
846                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
847                         return NESTED_EXIT_HOST;
848                 break;
849         default:
850                 break;
851         }
852
853         return NESTED_EXIT_CONTINUE;
854 }
855
856 struct kvm_x86_nested_ops svm_nested_ops = {
857         .check_events = svm_check_nested_events,
858 };