KVM: nSVM: extract preparation of VMCB for nested run
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / nested.c
index 1ae13fd..73be7af 100644 (file)
@@ -25,6 +25,7 @@
 #include "trace.h"
 #include "mmu.h"
 #include "x86.h"
+#include "lapic.h"
 #include "svm.h"
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -227,22 +228,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
        return true;
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-                         struct vmcb *nested_vmcb, struct kvm_host_map *map)
+static void load_nested_vmcb_control(struct vcpu_svm *svm,
+                                    struct vmcb_control_area *control)
 {
-       bool evaluate_pending_interrupts =
-               is_intercept(svm, INTERCEPT_VINTR) ||
-               is_intercept(svm, INTERCEPT_IRET);
+       svm->nested.nested_cr3 = control->nested_cr3;
 
-       if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
-               svm->vcpu.arch.hflags |= HF_HIF_MASK;
-       else
-               svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+       svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL;
+       svm->nested.vmcb_iopm  = control->iopm_base_pa  & ~0x0fffULL;
 
-       if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
-               svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
+       /* cache intercepts */
+       svm->nested.intercept_cr         = control->intercept_cr;
+       svm->nested.intercept_dr         = control->intercept_dr;
+       svm->nested.intercept_exceptions = control->intercept_exceptions;
+       svm->nested.intercept            = control->intercept;
+
+       svm->vcpu.arch.tsc_offset += control->tsc_offset;
+}
+
+static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+{
+       if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
                nested_svm_init_mmu_context(&svm->vcpu);
-       }
 
        /* Load the nested guest state */
        svm->vmcb->save.es = nested_vmcb->save.es;
@@ -255,11 +261,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
        svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
        svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
-       if (npt_enabled) {
-               svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
-               svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
-       } else
-               (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
+       (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
 
        /* Guest paging mode is active - reset mmu */
        kvm_mmu_reset_context(&svm->vcpu);
@@ -276,26 +278,19 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
        svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
+}
 
-       svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
-       svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
-
-       /* cache intercepts */
-       svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
-       svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
-       svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
-       svm->nested.intercept            = nested_vmcb->control.intercept;
-
+static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+{
        svm_flush_tlb(&svm->vcpu);
-       svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
        else
                svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
-       svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
        svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
 
+       svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
        svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
        svm->vmcb->control.int_state = nested_vmcb->control.int_state;
@@ -307,8 +302,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->vmcb->control.pause_filter_thresh =
                nested_vmcb->control.pause_filter_thresh;
 
-       kvm_vcpu_unmap(&svm->vcpu, map, true);
-
        /* Enter Guest-Mode */
        enter_guest_mode(&svm->vcpu);
 
@@ -318,7 +311,25 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
         */
        recalc_intercepts(svm);
 
+       mark_all_dirty(svm->vmcb);
+}
+
+void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+                         struct vmcb *nested_vmcb)
+{
+       bool evaluate_pending_interrupts =
+               is_intercept(svm, INTERCEPT_VINTR) ||
+               is_intercept(svm, INTERCEPT_IRET);
+
        svm->nested.vmcb = vmcb_gpa;
+       if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
+               svm->vcpu.arch.hflags |= HF_HIF_MASK;
+       else
+               svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+
+       load_nested_vmcb_control(svm, &nested_vmcb->control);
+       nested_prepare_vmcb_save(svm, nested_vmcb);
+       nested_prepare_vmcb_control(svm, nested_vmcb);
 
        /*
         * If L1 had a pending IRQ/NMI before executing VMRUN,
@@ -335,8 +346,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        enable_gif(svm);
        if (unlikely(evaluate_pending_interrupts))
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-
-       mark_all_dirty(svm->vmcb);
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)
@@ -371,10 +380,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
                nested_vmcb->control.exit_code_hi = 0;
                nested_vmcb->control.exit_info_1  = 0;
                nested_vmcb->control.exit_info_2  = 0;
-
-               kvm_vcpu_unmap(&svm->vcpu, &map, true);
-
-               return ret;
+               goto out;
        }
 
        trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
@@ -417,7 +423,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        copy_vmcb_control_area(hsave, vmcb);
 
        svm->nested.nested_run_pending = 1;
-       enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
+       enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
 
        if (!nested_svm_vmrun_msrpm(svm)) {
                svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
@@ -428,6 +434,9 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
                nested_svm_vmexit(svm);
        }
 
+out:
+       kvm_vcpu_unmap(&svm->vcpu, &map, true);
+
        return ret;
 }
 
@@ -788,12 +797,37 @@ static void nested_svm_intr(struct vcpu_svm *svm)
        nested_svm_vmexit(svm);
 }
 
+static inline bool nested_exit_on_init(struct vcpu_svm *svm)
+{
+       return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
+}
+
+static void nested_svm_init(struct vcpu_svm *svm)
+{
+       svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
+       svm->vmcb->control.exit_info_1 = 0;
+       svm->vmcb->control.exit_info_2 = 0;
+
+       nested_svm_vmexit(svm);
+}
+
+
 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool block_nested_events =
-               kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
-               svm->nested.nested_run_pending;
+               kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       if (lapic_in_kernel(vcpu) &&
+           test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+               if (block_nested_events)
+                       return -EBUSY;
+               if (!nested_exit_on_init(svm))
+                       return 0;
+               nested_svm_init(svm);
+               return 0;
+       }
 
        if (vcpu->arch.exception.pending) {
                if (block_nested_events)