KVM: x86: Allow the guest to run with dirty debug registers
[linux-2.6-microblaze.git] / arch / x86 / kvm / x86.c
index 0c76f7c..d906391 100644 (file)
@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
-{
-       /* TODO: reserve bits check */
-       kvm_lapic_set_base(vcpu, data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+       u64 old_state = vcpu->arch.apic_base &
+               (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+       u64 new_state = msr_info->data &
+               (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+       u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
+               0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+
+       if (!msr_info->host_initiated &&
+           ((msr_info->data & reserved_bits) != 0 ||
+            new_state == X2APIC_ENABLE ||
+            (new_state == MSR_IA32_APICBASE_ENABLE &&
+             old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
+            (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
+             old_state == 0)))
+               return 1;
+
+       kvm_lapic_set_base(vcpu, msr_info->data);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
@@ -579,13 +595,13 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-       u64 xcr0;
+       u64 xcr0 = xcr;
+       u64 old_xcr0 = vcpu->arch.xcr0;
        u64 valid_bits;
 
        /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
-       xcr0 = xcr;
        if (!(xcr0 & XSTATE_FP))
                return 1;
        if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -600,8 +616,14 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        if (xcr0 & ~valid_bits)
                return 1;
 
+       if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
+               return 1;
+
        kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
+
+       if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
+               kvm_update_cpuid(vcpu);
        return 0;
 }
 
@@ -737,7 +759,9 @@ static void kvm_update_dr7(struct kvm_vcpu *vcpu)
        else
                dr7 = vcpu->arch.dr7;
        kvm_x86_ops->set_dr7(vcpu, dr7);
-       vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
+       vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
+       if (dr7 & DR7_BP_EN_MASK)
+               vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
 }
 
 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
@@ -863,7 +887,7 @@ static u32 msrs_to_save[] = {
        MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
-       MSR_IA32_FEATURE_CONTROL
+       MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
 };
 
 static unsigned num_msrs_to_save;
@@ -1565,7 +1589,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        /* With all the info we got, fill in the values */
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
-       vcpu->last_kernel_ns = kernel_ns;
        vcpu->last_guest_tsc = tsc_timestamp;
 
        /*
@@ -1607,14 +1630,21 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
  * the others.
  *
  * So in those cases, request a kvmclock update for all vcpus.
- * The worst case for a remote vcpu to update its kvmclock
- * is then bounded by maximum nohz sleep latency.
+ * We need to rate-limit these requests though, as they can
+ * considerably slow guests that have a large number of vcpus.
+ * The time for a remote vcpu to update its kvmclock is bound
+ * by the delay we use to rate-limit the updates.
  */
 
-static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
+#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
+
+static void kvmclock_update_fn(struct work_struct *work)
 {
        int i;
-       struct kvm *kvm = v->kvm;
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
+                                          kvmclock_update_work);
+       struct kvm *kvm = container_of(ka, struct kvm, arch);
        struct kvm_vcpu *vcpu;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -1623,6 +1653,29 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
        }
 }
 
+static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
+{
+       struct kvm *kvm = v->kvm;
+
+       set_bit(KVM_REQ_CLOCK_UPDATE, &v->requests);
+       schedule_delayed_work(&kvm->arch.kvmclock_update_work,
+                                       KVMCLOCK_UPDATE_DELAY);
+}
+
+#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
+
+static void kvmclock_sync_fn(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
+                                          kvmclock_sync_work);
+       struct kvm *kvm = container_of(ka, struct kvm, arch);
+
+       schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
+       schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
+                                       KVMCLOCK_SYNC_PERIOD);
+}
+
 static bool msr_mtrr_valid(unsigned msr)
 {
        switch (msr) {
@@ -1840,6 +1893,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (__copy_to_user((void __user *)addr, instructions, 4))
                        return 1;
                kvm->arch.hv_hypercall = data;
+               mark_page_dirty(kvm, gfn);
                break;
        }
        case HV_X64_MSR_REFERENCE_TSC: {
@@ -1868,19 +1922,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
        case HV_X64_MSR_APIC_ASSIST_PAGE: {
+               u64 gfn;
                unsigned long addr;
 
                if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
                        vcpu->arch.hv_vapic = data;
                        break;
                }
-               addr = gfn_to_hva(vcpu->kvm, data >>
-                                 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+               gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
+               addr = gfn_to_hva(vcpu->kvm, gfn);
                if (kvm_is_error_hva(addr))
                        return 1;
                if (__clear_user((void __user *)addr, PAGE_SIZE))
                        return 1;
                vcpu->arch.hv_vapic = data;
+               mark_page_dirty(vcpu->kvm, gfn);
                break;
        }
        case HV_X64_MSR_EOI:
@@ -2006,8 +2062,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case 0x200 ... 0x2ff:
                return set_msr_mtrr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
-               kvm_set_apic_base(vcpu, data);
-               break;
+               return kvm_set_apic_base(vcpu, msr_info);
        case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
        case MSR_IA32_TSCDEADLINE:
@@ -2305,9 +2360,12 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case HV_X64_MSR_VP_INDEX: {
                int r;
                struct kvm_vcpu *v;
-               kvm_for_each_vcpu(r, v, vcpu->kvm)
-                       if (v == vcpu)
+               kvm_for_each_vcpu(r, v, vcpu->kvm) {
+                       if (v == vcpu) {
                                data = r;
+                               break;
+                       }
+               }
                break;
        }
        case HV_X64_MSR_EOI:
@@ -2598,10 +2656,10 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_GET_TSC_KHZ:
        case KVM_CAP_KVMCLOCK_CTRL:
        case KVM_CAP_READONLY_MEM:
+       case KVM_CAP_HYPERV_TIME:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
        case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_PCI_2_3:
-       case KVM_CAP_HYPERV_TIME:
 #endif
                r = 1;
                break;
@@ -4376,6 +4434,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
+       mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
        kvm_mmu_pte_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
@@ -5764,8 +5823,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
        kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
 }
 
-static void inject_pending_event(struct kvm_vcpu *vcpu)
+static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
 {
+       int r;
+
        /* try to reinject previous events if any */
        if (vcpu->arch.exception.pending) {
                trace_kvm_inj_exception(vcpu->arch.exception.nr,
@@ -5775,17 +5836,23 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
                                          vcpu->arch.exception.has_error_code,
                                          vcpu->arch.exception.error_code,
                                          vcpu->arch.exception.reinject);
-               return;
+               return 0;
        }
 
        if (vcpu->arch.nmi_injected) {
                kvm_x86_ops->set_nmi(vcpu);
-               return;
+               return 0;
        }
 
        if (vcpu->arch.interrupt.pending) {
                kvm_x86_ops->set_irq(vcpu);
-               return;
+               return 0;
+       }
+
+       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+               r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+               if (r != 0)
+                       return r;
        }
 
        /* try to inject new event if pending */
@@ -5802,6 +5869,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
                        kvm_x86_ops->set_irq(vcpu);
                }
        }
+       return 0;
 }
 
 static void process_nmi(struct kvm_vcpu *vcpu)
@@ -5906,15 +5974,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        goto out;
                }
 
-               inject_pending_event(vcpu);
-
+               if (inject_pending_event(vcpu, req_int_win) != 0)
+                       req_immediate_exit = true;
                /* enable NMI/IRQ window open exits if needed */
-               if (vcpu->arch.nmi_pending)
-                       req_immediate_exit =
-                               kvm_x86_ops->enable_nmi_window(vcpu) != 0;
+               else if (vcpu->arch.nmi_pending)
+                       kvm_x86_ops->enable_nmi_window(vcpu);
                else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-                       req_immediate_exit =
-                               kvm_x86_ops->enable_irq_window(vcpu) != 0;
+                       kvm_x86_ops->enable_irq_window(vcpu);
 
                if (kvm_lapic_enabled(vcpu)) {
                        /*
@@ -5974,11 +6040,27 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                set_debugreg(vcpu->arch.eff_db[1], 1);
                set_debugreg(vcpu->arch.eff_db[2], 2);
                set_debugreg(vcpu->arch.eff_db[3], 3);
+               set_debugreg(vcpu->arch.dr6, 6);
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
        kvm_x86_ops->run(vcpu);
 
+       /*
+        * Do this here before restoring debug registers on the host.  And
+        * since we do this before handling the vmexit, a DR access vmexit
+        * can (a) read the correct value of the debug registers, (b) set
+        * KVM_DEBUGREG_WONT_EXIT again.
+        */
+       if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
+               int i;
+
+               WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
+               kvm_x86_ops->sync_dirty_debug_regs(vcpu);
+               for (i = 0; i < KVM_NR_DB_REGS; i++)
+                       vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+       }
+
        /*
         * If the guest has used debug registers, at least dr7
         * will be disabled while returning to the host.
@@ -6168,7 +6250,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
                frag->len -= len;
        }
 
-       if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+       if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
                vcpu->mmio_needed = 0;
 
                /* FIXME: return into emulator if single-stepping.  */
@@ -6409,6 +6491,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
+       struct msr_data apic_base_msr;
        int mmu_reset_needed = 0;
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
@@ -6432,7 +6515,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
-       kvm_set_apic_base(vcpu, sregs->apic_base);
+       apic_base_msr.data = sregs->apic_base;
+       apic_base_msr.host_initiated = true;
+       kvm_set_apic_base(vcpu, &apic_base_msr);
 
        mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
        kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
@@ -6690,6 +6775,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
        int r;
        struct msr_data msr;
+       struct kvm *kvm = vcpu->kvm;
 
        r = vcpu_load(vcpu);
        if (r)
@@ -6700,6 +6786,9 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        kvm_write_tsc(vcpu, &msr);
        vcpu_put(vcpu);
 
+       schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
+                                       KVMCLOCK_SYNC_PERIOD);
+
        return r;
 }
 
@@ -6992,6 +7081,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        pvclock_update_vm_gtod_copy(kvm);
 
+       INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
+       INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+
        return 0;
 }
 
@@ -7029,6 +7121,8 @@ static void kvm_free_vcpus(struct kvm *kvm)
 
 void kvm_arch_sync_events(struct kvm *kvm)
 {
+       cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
+       cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
        kvm_free_all_assigned_devices(kvm);
        kvm_free_pit(kvm);
 }
@@ -7227,6 +7321,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
+       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
+               kvm_x86_ops->check_nested_events(vcpu, false);
+
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                !vcpu->arch.apf.halted)
                || !list_empty_careful(&vcpu->async_pf.done)