Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / nested.c
index b3f77d1..ccb03d6 100644 (file)
@@ -2207,7 +2207,8 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
        }
 }
 
-static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
+                                struct vmcs12 *vmcs12)
 {
        u32 exec_control;
        u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
@@ -2218,23 +2219,22 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
        /*
         * PIN CONTROLS
         */
-       exec_control = vmx_pin_based_exec_ctrl(vmx);
+       exec_control = __pin_controls_get(vmcs01);
        exec_control |= (vmcs12->pin_based_vm_exec_control &
                         ~PIN_BASED_VMX_PREEMPTION_TIMER);
 
        /* Posted interrupts setting is only taken from vmcs12.  */
-       if (nested_cpu_has_posted_intr(vmcs12)) {
+       vmx->nested.pi_pending = false;
+       if (nested_cpu_has_posted_intr(vmcs12))
                vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
-               vmx->nested.pi_pending = false;
-       } else {
+       else
                exec_control &= ~PIN_BASED_POSTED_INTR;
-       }
        pin_controls_set(vmx, exec_control);
 
        /*
         * EXEC CONTROLS
         */
-       exec_control = vmx_exec_control(vmx); /* L0's desires */
+       exec_control = __exec_controls_get(vmcs01); /* L0's desires */
        exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
        exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
        exec_control &= ~CPU_BASED_TPR_SHADOW;
@@ -2271,10 +2271,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
         * SECONDARY EXEC CONTROLS
         */
        if (cpu_has_secondary_exec_ctrls()) {
-               exec_control = vmx->secondary_exec_control;
+               exec_control = __secondary_exec_controls_get(vmcs01);
 
                /* Take the following fields only from vmcs12 */
                exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                                  SECONDARY_EXEC_ENABLE_INVPCID |
                                  SECONDARY_EXEC_ENABLE_RDTSCP |
                                  SECONDARY_EXEC_XSAVES |
@@ -2282,7 +2283,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
                                  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                                  SECONDARY_EXEC_APIC_REGISTER_VIRT |
                                  SECONDARY_EXEC_ENABLE_VMFUNC |
-                                 SECONDARY_EXEC_TSC_SCALING);
+                                 SECONDARY_EXEC_TSC_SCALING |
+                                 SECONDARY_EXEC_DESC);
+
                if (nested_cpu_has(vmcs12,
                                   CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
                        exec_control |= vmcs12->secondary_vm_exec_control;
@@ -2322,8 +2325,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
         * on the related bits (if supported by the CPU) in the hope that
         * we can avoid VMWrites during vmx_set_efer().
         */
-       exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
-                       ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
+       exec_control = __vm_entry_controls_get(vmcs01);
+       exec_control |= vmcs12->vm_entry_controls;
+       exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
        if (cpu_has_load_ia32_efer()) {
                if (guest_efer & EFER_LMA)
                        exec_control |= VM_ENTRY_IA32E_MODE;
@@ -2339,9 +2343,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
         * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
         * bits may be modified by vmx_set_efer() in prepare_vmcs02().
         */
-       exec_control = vmx_vmexit_ctrl();
+       exec_control = __vm_exit_controls_get(vmcs01);
        if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
                exec_control |= VM_EXIT_LOAD_IA32_EFER;
+       else
+               exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
        vm_exit_controls_set(vmx, exec_control);
 
        /*
@@ -3384,7 +3390,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 
        vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
 
-       prepare_vmcs02_early(vmx, vmcs12);
+       prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
 
        if (from_vmentry) {
                if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
@@ -4304,7 +4310,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                seg.l = 1;
        else
                seg.db = 1;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
        seg = (struct kvm_segment) {
                .base = 0,
                .limit = 0xFFFFFFFF,
@@ -4315,17 +4321,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                .g = 1
        };
        seg.selector = vmcs12->host_ds_selector;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
        seg.selector = vmcs12->host_es_selector;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
        seg.selector = vmcs12->host_ss_selector;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
        seg.selector = vmcs12->host_fs_selector;
        seg.base = vmcs12->host_fs_base;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
        seg.selector = vmcs12->host_gs_selector;
        seg.base = vmcs12->host_gs_base;
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
        seg = (struct kvm_segment) {
                .base = vmcs12->host_tr_base,
                .limit = 0x67,
@@ -4333,14 +4339,15 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                .type = 11,
                .present = 1
        };
-       vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
+
+       memset(&seg, 0, sizeof(seg));
+       seg.unusable = 1;
+       __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
 
        kvm_set_dr(vcpu, 7, 0x400);
        vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
 
-       if (cpu_has_vmx_msr_bitmap())
-               vmx_update_msr_bitmap(vcpu);
-
        if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
                                vmcs12->vm_exit_msr_load_count))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
@@ -4419,9 +4426,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
 
        kvm_mmu_reset_context(vcpu);
 
-       if (cpu_has_vmx_msr_bitmap())
-               vmx_update_msr_bitmap(vcpu);
-
        /*
         * This nasty bit of open coding is a compromise between blindly
         * loading L1's MSRs using the exit load lists (incorrect emulation