KVM: x86/mmu: Revert "KVM: x86/mmu: Remove is_obsolete() call"
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu.c
index 652f2ce..e552fd5 100644 (file)
@@ -2752,7 +2752,12 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
        } else {
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
 
-               if (!sp->role.invalid)
+               /*
+                * Obsolete pages cannot be used on any vCPUs, see the comment
+                * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
+                * treats invalid shadow pages as being obsolete.
+                */
+               if (!is_obsolete_sp(kvm, sp))
                        kvm_reload_remote_mmus(kvm);
        }
 
@@ -5675,7 +5680,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
-       LIST_HEAD(invalid_list);
        int nr_zapped, batch = 0;
 
 restart:
@@ -5708,8 +5712,8 @@ restart:
                        goto restart;
                }
 
-               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
-                                              &nr_zapped)) {
+               if (__kvm_mmu_prepare_zap_page(kvm, sp,
+                               &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
                        batch += nr_zapped;
                        goto restart;
                }
@@ -5720,7 +5724,7 @@ restart:
         * KVM is not in the middle of a lockless shadow page table walk, which
         * may reference the pages.
         */
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
 }
 
 /*
@@ -5752,6 +5756,11 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
        spin_unlock(&kvm->mmu_lock);
 }
 
+static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
+{
+       return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
+}
+
 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
@@ -6022,16 +6031,24 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                 * want to shrink a VM that only started to populate its MMU
                 * anyway.
                 */
-               if (!kvm->arch.n_used_mmu_pages)
+               if (!kvm->arch.n_used_mmu_pages &&
+                   !kvm_has_zapped_obsolete_pages(kvm))
                        continue;
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
 
+               if (kvm_has_zapped_obsolete_pages(kvm)) {
+                       kvm_mmu_commit_zap_page(kvm,
+                             &kvm->arch.zapped_obsolete_pages);
+                       goto unlock;
+               }
+
                if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
                        freed++;
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
 
+unlock:
                spin_unlock(&kvm->mmu_lock);
                srcu_read_unlock(&kvm->srcu, idx);