KVM: x86/mmu: Make .write_log_dirty a nested operation
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.c
index 08e26a9..8411118 100644 (file)
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #define KVM_VM_CR0_ALWAYS_ON                           \
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
         X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS                                     \
-       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
 
 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 {
-       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
-       if (enable_ept)
-               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+       if (!enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
        if (is_guest_mode(&vmx->vcpu))
                vmx->vcpu.arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
-       vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
 
@@ -4709,7 +4706,7 @@ static void kvm_machine_check(void)
                .flags = X86_EFLAGS_IF,
        };
 
-       do_machine_check(&regs, 0);
+       do_machine_check(&regs);
 #endif
 }
 
@@ -6606,23 +6603,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
-       u32 host_umwait_control;
-
-       if (!vmx_has_waitpkg(vmx))
-               return;
-
-       host_umwait_control = get_umwait_control_msr();
-
-       if (vmx->msr_ia32_umwait_control != host_umwait_control)
-               add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
-                       vmx->msr_ia32_umwait_control,
-                       host_umwait_control, false);
-       else
-               clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6728,9 +6708,7 @@ reenter_guest:
 
        pt_guest_enter(vmx);
 
-       if (vcpu_to_pmu(vcpu)->version)
-               atomic_switch_perf_msrs(vmx);
-       atomic_switch_umwait_control_msr(vmx);
+       atomic_switch_perf_msrs(vmx);
 
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
@@ -7501,42 +7479,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
        kvm_flush_pml_buffers(kvm);
 }
 
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
-{
-       struct vmcs12 *vmcs12;
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa, dst;
-
-       if (is_guest_mode(vcpu)) {
-               WARN_ON_ONCE(vmx->nested.pml_full);
-
-               /*
-                * Check if PML is enabled for the nested guest.
-                * Whether eptp bit 6 is set is already checked
-                * as part of A/D emulation.
-                */
-               vmcs12 = get_vmcs12(vcpu);
-               if (!nested_cpu_has_pml(vmcs12))
-                       return 0;
-
-               if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
-                       vmx->nested.pml_full = true;
-                       return 1;
-               }
-
-               gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
-               dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
-
-               if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
-                                        offset_in_page(dst), sizeof(gpa)))
-                       return 0;
-
-               vmcs12->guest_pml_index--;
-       }
-
-       return 0;
-}
-
 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
                                           struct kvm_memory_slot *memslot,
                                           gfn_t offset, unsigned long mask)
@@ -7965,7 +7907,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
        .flush_log_dirty = vmx_flush_log_dirty,
        .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
-       .write_log_dirty = vmx_write_pml_buffer,
 
        .pre_block = vmx_pre_block,
        .post_block = vmx_post_block,