KVM: SVM: avoid refreshing avic if its state didn't change
[linux-2.6-microblaze.git] / arch / x86 / kvm / x86.c
index 3cedc7c..bf8cb10 100644 (file)
@@ -486,7 +486,14 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-asmlinkage __visible noinstr void kvm_spurious_fault(void)
+/*
+ * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
+ *
+ * Hardware virtualization extension instructions may fault if a reboot turns
+ * off virtualization while processes are running.  Usually after catching the
+ * fault we just panic; during reboot instead the instruction is ignored.
+ */
+noinstr void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
        BUG_ON(!kvm_rebooting);
@@ -1181,7 +1188,6 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
        if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
                for (i = 0; i < KVM_NR_DB_REGS; i++)
                        vcpu->arch.eff_db[i] = vcpu->arch.db[i];
-               vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
        }
 }
 
@@ -4311,12 +4317,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
-       /*
-        * If userspace has set any breakpoints or watchpoints, dr6 is restored
-        * on every vmexit, but if not, we might have a stale dr6 from the
-        * guest. do_debug expects dr6 to be cleared after it runs, do the same.
-        */
-       set_debugreg(0, 6);
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -8579,6 +8579,8 @@ EXPORT_SYMBOL_GPL(kvm_apicv_activated);
 
 static void kvm_apicv_init(struct kvm *kvm)
 {
+       mutex_init(&kvm->arch.apicv_update_lock);
+
        if (enable_apicv)
                clear_bit(APICV_INHIBIT_REASON_DISABLE,
                          &kvm->arch.apicv_inhibit_reasons);
@@ -9237,10 +9239,18 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
 
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
 {
+       bool activate;
+
        if (!lapic_in_kernel(vcpu))
                return;
 
-       vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
+       mutex_lock(&vcpu->kvm->arch.apicv_update_lock);
+
+       activate = kvm_apicv_activated(vcpu->kvm);
+       if (vcpu->arch.apicv_active == activate)
+               goto out;
+
+       vcpu->arch.apicv_active = activate;
        kvm_apic_update_apicv(vcpu);
        static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
 
@@ -9252,44 +9262,45 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
         */
        if (!vcpu->arch.apicv_active)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+out:
+       mutex_unlock(&vcpu->kvm->arch.apicv_update_lock);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
 
-/*
- * NOTE: Do not hold any lock prior to calling this.
- *
- * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
- * locked, because it calls __x86_set_memory_region() which does
- * synchronize_srcu(&kvm->srcu).
- */
-void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
 {
-       unsigned long old, new, expected;
+       unsigned long old, new;
 
        if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
            !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
                return;
 
-       old = READ_ONCE(kvm->arch.apicv_inhibit_reasons);
-       do {
-               expected = new = old;
-               if (activate)
-                       __clear_bit(bit, &new);
-               else
-                       __set_bit(bit, &new);
-               if (new == old)
-                       break;
-               old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new);
-       } while (old != expected);
-
-       if (!!old == !!new)
-               return;
+       old = new = kvm->arch.apicv_inhibit_reasons;
 
-       trace_kvm_apicv_update_request(activate, bit);
-       if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
-               static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate);
+       if (activate)
+               __clear_bit(bit, &new);
+       else
+               __set_bit(bit, &new);
+
+       if (!!old != !!new) {
+               trace_kvm_apicv_update_request(activate, bit);
+               kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
+               kvm->arch.apicv_inhibit_reasons = new;
+               if (new) {
+                       unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
+                       kvm_zap_gfn_range(kvm, gfn, gfn+1);
+               }
+       } else
+               kvm->arch.apicv_inhibit_reasons = new;
+}
+EXPORT_SYMBOL_GPL(__kvm_request_apicv_update);
 
-       kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
+void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+{
+       mutex_lock(&kvm->arch.apicv_update_lock);
+       __kvm_request_apicv_update(kvm, activate, bit);
+       mutex_unlock(&kvm->arch.apicv_update_lock);
 }
 EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
 
@@ -9603,8 +9614,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                set_debugreg(vcpu->arch.eff_db[1], 1);
                set_debugreg(vcpu->arch.eff_db[2], 2);
                set_debugreg(vcpu->arch.eff_db[3], 3);
-               set_debugreg(vcpu->arch.dr6, 6);
-               vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        } else if (unlikely(hw_breakpoint_active())) {
                set_debugreg(0, 7);
        }
@@ -9634,7 +9643,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                static_call(kvm_x86_sync_dirty_debug_regs)(vcpu);
                kvm_update_dr0123(vcpu);
                kvm_update_dr7(vcpu);
-               vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
        /*
@@ -11352,8 +11360,7 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
 
        for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
                int level = i + 1;
-               int lpages = gfn_to_index(slot->base_gfn + npages - 1,
-                                         slot->base_gfn, level) + 1;
+               int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
 
                WARN_ON(slot->arch.rmap[i]);
 
@@ -11436,8 +11443,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
                int lpages;
                int level = i + 1;
 
-               lpages = gfn_to_index(slot->base_gfn + npages - 1,
-                                     slot->base_gfn, level) + 1;
+               lpages = __kvm_mmu_slot_lpages(slot, npages, level);
 
                linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
                if (!linfo)