KVM: x86: Hoist kvm_dirty_regs check out of sync_regs()
[linux-2.6-microblaze.git] / arch / x86 / kvm / x86.c
index 17468d9..e906c05 100644 (file)
@@ -66,6 +66,7 @@
 #include <asm/msr.h>
 #include <asm/desc.h>
 #include <asm/mce.h>
+#include <asm/pkru.h>
 #include <linux/kernel_stat.h>
 #include <asm/fpu/internal.h> /* Ugh! */
 #include <asm/pvclock.h>
@@ -939,7 +940,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
            (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
             (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
            vcpu->arch.pkru != vcpu->arch.host_pkru)
-               __write_pkru(vcpu->arch.pkru);
+               write_pkru(vcpu->arch.pkru);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
 
@@ -953,7 +954,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
             (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
                vcpu->arch.pkru = rdpkru();
                if (vcpu->arch.pkru != vcpu->arch.host_pkru)
-                       __write_pkru(vcpu->arch.host_pkru);
+                       write_pkru(vcpu->arch.host_pkru);
        }
 
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
@@ -3406,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
                if (data & 0x1) {
                        vcpu->arch.apf.pageready_pending = false;
@@ -3745,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.apf.msr_int_val;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
 
                msr_info->data = 0;
@@ -4357,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
 {
-       return kvm_arch_interrupt_allowed(vcpu) &&
-               kvm_cpu_accept_dm_intr(vcpu);
+       /*
+        * Do not cause an interrupt window exit if an exception
+        * is pending or an event needs reinjection; userspace
+        * might want to inject the interrupt manually using KVM_SET_REGS
+        * or KVM_SET_SREGS.  For that to work, we must be at an
+        * instruction boundary and with no events half-injected.
+        */
+       return (kvm_arch_interrupt_allowed(vcpu) &&
+               kvm_cpu_accept_dm_intr(vcpu) &&
+               !kvm_event_needs_reinjection(vcpu) &&
+               !vcpu->arch.exception.pending);
 }
 
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
@@ -4704,20 +4714,21 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
         */
        valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
+               u32 size, offset, ecx, edx;
                u64 xfeature_mask = valid & -valid;
                int xfeature_nr = fls64(xfeature_mask) - 1;
-               void *src = get_xsave_addr(xsave, xfeature_nr);
-
-               if (src) {
-                       u32 size, offset, ecx, edx;
-                       cpuid_count(XSTATE_CPUID, xfeature_nr,
-                                   &size, &offset, &ecx, &edx);
-                       if (xfeature_nr == XFEATURE_PKRU)
-                               memcpy(dest + offset, &vcpu->arch.pkru,
-                                      sizeof(vcpu->arch.pkru));
-                       else
-                               memcpy(dest + offset, src, size);
+               void *src;
 
+               cpuid_count(XSTATE_CPUID, xfeature_nr,
+                           &size, &offset, &ecx, &edx);
+
+               if (xfeature_nr == XFEATURE_PKRU) {
+                       memcpy(dest + offset, &vcpu->arch.pkru,
+                              sizeof(vcpu->arch.pkru));
+               } else {
+                       src = get_xsave_addr(xsave, xfeature_nr);
+                       if (src)
+                               memcpy(dest + offset, src, size);
                }
 
                valid -= xfeature_mask;
@@ -4747,18 +4758,20 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
         */
        valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
+               u32 size, offset, ecx, edx;
                u64 xfeature_mask = valid & -valid;
                int xfeature_nr = fls64(xfeature_mask) - 1;
-               void *dest = get_xsave_addr(xsave, xfeature_nr);
-
-               if (dest) {
-                       u32 size, offset, ecx, edx;
-                       cpuid_count(XSTATE_CPUID, xfeature_nr,
-                                   &size, &offset, &ecx, &edx);
-                       if (xfeature_nr == XFEATURE_PKRU)
-                               memcpy(&vcpu->arch.pkru, src + offset,
-                                      sizeof(vcpu->arch.pkru));
-                       else
+
+               cpuid_count(XSTATE_CPUID, xfeature_nr,
+                           &size, &offset, &ecx, &edx);
+
+               if (xfeature_nr == XFEATURE_PKRU) {
+                       memcpy(&vcpu->arch.pkru, src + offset,
+                              sizeof(vcpu->arch.pkru));
+               } else {
+                       void *dest = get_xsave_addr(xsave, xfeature_nr);
+
+                       if (dest)
                                memcpy(dest, src + offset, size);
                }
 
@@ -9382,6 +9395,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        }
 
        if (kvm_request_pending(vcpu)) {
+               if (kvm_check_request(KVM_REQ_VM_BUGGED, vcpu)) {
+                       r = -EIO;
+                       goto out;
+               }
                if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
                        if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
@@ -9597,6 +9614,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                set_debugreg(vcpu->arch.eff_db[3], 3);
                set_debugreg(vcpu->arch.dr6, 6);
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
+       } else if (unlikely(hw_breakpoint_active())) {
+               set_debugreg(0, 7);
        }
 
        for (;;) {
@@ -9885,7 +9904,7 @@ static void kvm_save_current_fpu(struct fpu *fpu)
                memcpy(&fpu->state, &current->thread.fpu.state,
                       fpu_kernel_xstate_size);
        else
-               copy_fpregs_to_fpstate(fpu);
+               save_fpregs_to_fpstate(fpu);
 }
 
 /* Swap (qemu) user FPU context for the guest FPU context. */
@@ -9901,7 +9920,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
         */
        if (vcpu->arch.guest_fpu)
                /* PKRU is separately restored in kvm_x86_ops.run. */
-               __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
+               __restore_fpregs_from_fpstate(&vcpu->arch.guest_fpu->state,
                                        ~XFEATURE_MASK_PKRU);
 
        fpregs_mark_activate();
@@ -9922,7 +9941,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_fpu)
                kvm_save_current_fpu(vcpu->arch.guest_fpu);
 
-       copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
+       restore_fpregs_from_fpstate(&vcpu->arch.user_fpu->state);
 
        fpregs_mark_activate();
        fpregs_unlock();
@@ -9961,7 +9980,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
+       if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
+           (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
                r = -EINVAL;
                goto out;
        }
@@ -10566,9 +10586,6 @@ static void store_regs(struct kvm_vcpu *vcpu)
 
 static int sync_regs(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
-               return -EINVAL;
-
        if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
                __set_regs(vcpu, &vcpu->run->s.regs.regs);
                vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
@@ -10981,9 +10998,6 @@ int kvm_arch_hardware_setup(void *opaque)
        int r;
 
        rdmsrl_safe(MSR_EFER, &host_efer);
-       if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
-                        !(host_efer & EFER_NX)))
-               return -EIO;
 
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                rdmsrl(MSR_IA32_XSS, host_xss);