KVM: x86/mmu: Revert "Revert "KVM: MMU: zap pages in batch""
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 13 Sep 2019 02:46:07 +0000 (19:46 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 24 Sep 2019 12:35:35 +0000 (14:35 +0200)
Now that the fast invalidate mechanism has been reintroduced, restore
the performance tweaks for fast invalidation that existed prior to its
removal.

Paraphrashing the original changelog:

  Zap at least 10 shadow pages before releasing mmu_lock to reduce the
  overhead associated with re-acquiring the lock.

  Note: "10" is an arbitrary number, speculated to be high enough so
  that a vCPU isn't stuck zapping obsolete pages for an extended period,
  but small enough so that other vCPUs aren't starved waiting for
  mmu_lock.

This reverts commit 43d2b14b105fb00b8864c7b0ee7043cc1cc4a969.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 2ab0ac8..3acc4b0 100644 (file)
@@ -5671,12 +5671,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-
+#define BATCH_ZAP_PAGES        10
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        LIST_HEAD(invalid_list);
-       int ign;
+       int nr_zapped, batch = 0;
 
 restart:
        list_for_each_entry_safe_reverse(sp, node,
@@ -5689,28 +5689,6 @@ restart:
                        break;
 
                /*
-                * Do not repeatedly zap a root page to avoid unnecessary
-                * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
-                * progress:
-                *    vcpu 0                        vcpu 1
-                *                         call vcpu_enter_guest():
-                *                            1): handle KVM_REQ_MMU_RELOAD
-                *                                and require mmu-lock to
-                *                                load mmu
-                * repeat:
-                *    1): zap root page and
-                *        send KVM_REQ_MMU_RELOAD
-                *
-                *    2): if (cond_resched_lock(mmu-lock))
-                *
-                *                            2): hold mmu-lock and load mmu
-                *
-                *                            3): see KVM_REQ_MMU_RELOAD bit
-                *                                on vcpu->requests is set
-                *                                then return 1 to call
-                *                                vcpu_enter_guest() again.
-                *            goto repeat;
-                *
                 * Since we are reversely walking the list and the invalid
                 * list will be moved to the head, skip the invalid page
                 * can help us to avoid the infinity list walking.
@@ -5718,14 +5696,19 @@ restart:
                if (sp->role.invalid)
                        continue;
 
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+               if (batch >= BATCH_ZAP_PAGES &&
+                   (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
+                       batch = 0;
                        kvm_mmu_commit_zap_page(kvm, &invalid_list);
                        cond_resched_lock(&kvm->mmu_lock);
                        goto restart;
                }
 
-               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
+                                              &nr_zapped)) {
+                       batch += nr_zapped;
                        goto restart;
+               }
        }
 
        kvm_mmu_commit_zap_page(kvm, &invalid_list);