KVM: x86/mmu: Make TDP MMU root refcount atomic
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / tdp_mmu.c
index 7c337db..f55b415 100644 (file)
@@ -41,70 +41,82 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        rcu_barrier();
 }
 
-static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+                         gfn_t start, gfn_t end, bool can_yield, bool flush);
+
+static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
 {
-       if (kvm_mmu_put_root(kvm, root))
-               kvm_tdp_mmu_free_root(kvm, root);
+       free_page((unsigned long)sp->spt);
+       kmem_cache_free(mmu_page_header_cache, sp);
 }
 
-static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
-                                          struct kvm_mmu_page *root)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
+       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+
        lockdep_assert_held_write(&kvm->mmu_lock);
 
-       if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
-               return false;
+       if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+               return;
 
-       kvm_mmu_get_root(kvm, root);
-       return true;
+       WARN_ON(!root->tdp_mmu_page);
 
-}
+       list_del(&root->link);
 
-static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
-                                                    struct kvm_mmu_page *root)
-{
-       struct kvm_mmu_page *next_root;
+       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
 
-       next_root = list_next_entry(root, link);
-       tdp_mmu_put_root(kvm, root);
-       return next_root;
+       tdp_mmu_free_sp(root);
 }
 
 /*
- * Note: this iterator gets and puts references to the roots it iterates over.
- * This makes it safe to release the MMU lock and yield within the loop, but
- * if exiting the loop early, the caller must drop the reference to the most
- * recent root. (Unless keeping a live reference is desirable.)
+ * Finds the next valid root after root (or the first valid root if root
+ * is NULL), takes a reference on it, and returns that next root. If root
+ * is not NULL, this thread should have already taken a reference on it, and
+ * that reference will be dropped. If no valid root is found, this
+ * function will return NULL.
  */
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                          \
-       for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,        \
-                                     typeof(*_root), link);            \
-            tdp_mmu_next_root_valid(_kvm, _root);                      \
-            _root = tdp_mmu_next_root(_kvm, _root))
-
-#define for_each_tdp_mmu_root(_kvm, _root)                             \
-       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
-
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield, bool flush);
-
-void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
+static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+                                             struct kvm_mmu_page *prev_root)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+       struct kvm_mmu_page *next_root;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
 
-       WARN_ON(root->root_count);
-       WARN_ON(!root->tdp_mmu_page);
+       if (prev_root)
+               next_root = list_next_entry(prev_root, link);
+       else
+               next_root = list_first_entry(&kvm->arch.tdp_mmu_roots,
+                                            typeof(*next_root), link);
 
-       list_del(&root->link);
+       if (list_entry_is_head(next_root, &kvm->arch.tdp_mmu_roots, link))
+               next_root = NULL;
+       else
+               kvm_tdp_mmu_get_root(kvm, next_root);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
+       if (prev_root)
+               kvm_tdp_mmu_put_root(kvm, prev_root);
 
-       free_page((unsigned long)root->spt);
-       kmem_cache_free(mmu_page_header_cache, root);
+       return next_root;
 }
 
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)          \
+       for (_root = tdp_mmu_next_root(_kvm, NULL);             \
+            _root;                                             \
+            _root = tdp_mmu_next_root(_kvm, _root))            \
+               if (kvm_mmu_page_as_id(_root) != _as_id) {      \
+               } else
+
+#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                     \
+       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)     \
+               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
+               } else
+
 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
                                                   int level)
 {
@@ -148,15 +160,15 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
        role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
 
        /* Check for an existing root before allocating a new one. */
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
                if (root->role.word == role.word) {
-                       kvm_mmu_get_root(kvm, root);
+                       kvm_tdp_mmu_get_root(kvm, root);
                        goto out;
                }
        }
 
        root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
-       root->root_count = 1;
+       refcount_set(&root->tdp_mmu_root_count, 1);
 
        list_add(&root->link, &kvm->arch.tdp_mmu_roots);
 
@@ -164,12 +176,6 @@ out:
        return __pa(root->spt);
 }
 
-static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
-{
-       free_page((unsigned long)sp->spt);
-       kmem_cache_free(mmu_page_header_cache, sp);
-}
-
 /*
  * This is called through call_rcu in order to free TDP page table memory
  * safely with respect to other kernel threads that may be operating on
@@ -704,11 +710,8 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               if (kvm_mmu_page_as_id(root) != as_id)
-                       continue;
+       for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
                flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
-       }
 
        return flush;
 }
@@ -872,207 +875,139 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        return ret;
 }
 
-typedef int (*tdp_handler_t)(struct kvm *kvm, struct kvm_memory_slot *slot,
-                            struct kvm_mmu_page *root, gfn_t start, gfn_t end,
-                            unsigned long data);
-
-static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
-                                                       unsigned long start,
-                                                       unsigned long end,
-                                                       unsigned long data,
-                                                       tdp_handler_t handler)
+bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
+                                bool flush)
 {
-       struct kvm_memslots *slots;
-       struct kvm_memory_slot *memslot;
        struct kvm_mmu_page *root;
-       int ret = 0;
-       int as_id;
-
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               as_id = kvm_mmu_page_as_id(root);
-               slots = __kvm_memslots(kvm, as_id);
-               kvm_for_each_memslot(memslot, slots) {
-                       unsigned long hva_start, hva_end;
-                       gfn_t gfn_start, gfn_end;
-
-                       hva_start = max(start, memslot->userspace_addr);
-                       hva_end = min(end, memslot->userspace_addr +
-                                     (memslot->npages << PAGE_SHIFT));
-                       if (hva_start >= hva_end)
-                               continue;
-                       /*
-                        * {gfn(page) | page intersects with [hva_start, hva_end)} =
-                        * {gfn_start, gfn_start+1, ..., gfn_end-1}.
-                        */
-                       gfn_start = hva_to_gfn_memslot(hva_start, memslot);
-                       gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
 
-                       ret |= handler(kvm, memslot, root, gfn_start,
-                                      gfn_end, data);
-               }
-       }
+       for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
+               flush |= zap_gfn_range(kvm, root, range->start, range->end,
+                                      range->may_block, flush);
 
-       return ret;
+       return flush;
 }
 
-static __always_inline int kvm_tdp_mmu_handle_hva(struct kvm *kvm,
-                                                 unsigned long addr,
-                                                 unsigned long data,
-                                                 tdp_handler_t handler)
-{
-       return kvm_tdp_mmu_handle_hva_range(kvm, addr, addr + 1, data, handler);
-}
+typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
+                             struct kvm_gfn_range *range);
 
-static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
-                                    struct kvm_memory_slot *slot,
-                                    struct kvm_mmu_page *root, gfn_t start,
-                                    gfn_t end, unsigned long unused)
+static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
+                                                  struct kvm_gfn_range *range,
+                                                  tdp_handler_t handler)
 {
-       return zap_gfn_range(kvm, root, start, end, false, false);
-}
+       struct kvm_mmu_page *root;
+       struct tdp_iter iter;
+       bool ret = false;
 
-int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
-                             unsigned long end)
-{
-       return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
-                                           zap_gfn_range_hva_wrapper);
+       rcu_read_lock();
+
+       /*
+        * Don't support rescheduling, none of the MMU notifiers that funnel
+        * into this helper allow blocking; it'd be dead, wasteful code.
+        */
+       for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
+               tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
+                       ret |= handler(kvm, &iter, range);
+       }
+
+       rcu_read_unlock();
+
+       return ret;
 }
 
 /*
  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
  * if any of the GFNs in the range have been accessed.
  */
-static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
-                        struct kvm_mmu_page *root, gfn_t start, gfn_t end,
-                        unsigned long unused)
+static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
+                         struct kvm_gfn_range *range)
 {
-       struct tdp_iter iter;
-       int young = 0;
        u64 new_spte = 0;
 
-       rcu_read_lock();
+       /* If we have a non-accessed entry we don't need to change the pte. */
+       if (!is_accessed_spte(iter->old_spte))
+               return false;
 
-       tdp_root_for_each_leaf_pte(iter, root, start, end) {
+       new_spte = iter->old_spte;
+
+       if (spte_ad_enabled(new_spte)) {
+               new_spte &= ~shadow_accessed_mask;
+       } else {
                /*
-                * If we have a non-accessed entry we don't need to change the
-                * pte.
+                * Capture the dirty status of the page, so that it doesn't get
+                * lost when the SPTE is marked for access tracking.
                 */
-               if (!is_accessed_spte(iter.old_spte))
-                       continue;
-
-               new_spte = iter.old_spte;
+               if (is_writable_pte(new_spte))
+                       kvm_set_pfn_dirty(spte_to_pfn(new_spte));
 
-               if (spte_ad_enabled(new_spte)) {
-                       clear_bit((ffs(shadow_accessed_mask) - 1),
-                                 (unsigned long *)&new_spte);
-               } else {
-                       /*
-                        * Capture the dirty status of the page, so that it doesn't get
-                        * lost when the SPTE is marked for access tracking.
-                        */
-                       if (is_writable_pte(new_spte))
-                               kvm_set_pfn_dirty(spte_to_pfn(new_spte));
-
-                       new_spte = mark_spte_for_access_track(new_spte);
-               }
-               new_spte &= ~shadow_dirty_mask;
-
-               tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
-               young = 1;
-
-               trace_kvm_age_page(iter.gfn, iter.level, slot, young);
+               new_spte = mark_spte_for_access_track(new_spte);
        }
 
-       rcu_read_unlock();
+       tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
 
-       return young;
+       return true;
 }
 
-int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
-                             unsigned long end)
+bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
-                                           age_gfn_range);
+       return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
 }
 
-static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                       struct kvm_mmu_page *root, gfn_t gfn, gfn_t end,
-                       unsigned long unused)
+static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
+                        struct kvm_gfn_range *range)
 {
-       struct tdp_iter iter;
-
-       tdp_root_for_each_leaf_pte(iter, root, gfn, end)
-               if (is_accessed_spte(iter.old_spte))
-                       return 1;
-
-       return 0;
+       return is_accessed_spte(iter->old_spte);
 }
 
-int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       return kvm_tdp_mmu_handle_hva(kvm, hva, 0, test_age_gfn);
+       return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
 }
 
-/*
- * Handle the changed_pte MMU notifier for the TDP MMU.
- * data is a pointer to the new pte_t mapping the HVA specified by the MMU
- * notifier.
- * Returns non-zero if a flush is needed before releasing the MMU lock.
- */
-static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
-                       struct kvm_mmu_page *root, gfn_t gfn, gfn_t end,
-                       unsigned long data)
+static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
+                        struct kvm_gfn_range *range)
 {
-       struct tdp_iter iter;
-       pte_t *ptep = (pte_t *)data;
-       kvm_pfn_t new_pfn;
        u64 new_spte;
-       int need_flush = 0;
-
-       rcu_read_lock();
-
-       WARN_ON(pte_huge(*ptep) || (gfn + 1) != end);
 
-       new_pfn = pte_pfn(*ptep);
+       /* Huge pages aren't expected to be modified without first being zapped. */
+       WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
 
-       tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
-               if (iter.level != PG_LEVEL_4K)
-                       continue;
-
-               if (!is_shadow_present_pte(iter.old_spte))
-                       break;
-
-               /*
-                * Note, when changing a read-only SPTE, it's not strictly
-                * necessary to zero the SPTE before setting the new PFN, but
-                * doing so preserves the invariant that the PFN of a present
-                * leaf SPTE can never change.  See __handle_changed_spte().
-                */
-               tdp_mmu_set_spte(kvm, &iter, 0);
+       if (iter->level != PG_LEVEL_4K ||
+           !is_shadow_present_pte(iter->old_spte))
+               return false;
 
-               if (!pte_write(*ptep)) {
-                       new_spte = kvm_mmu_changed_pte_notifier_make_spte(
-                                       iter.old_spte, new_pfn);
+       /*
+        * Note, when changing a read-only SPTE, it's not strictly necessary to
+        * zero the SPTE before setting the new PFN, but doing so preserves the
+        * invariant that the PFN of a present * leaf SPTE can never change.
+        * See __handle_changed_spte().
+        */
+       tdp_mmu_set_spte(kvm, iter, 0);
 
-                       tdp_mmu_set_spte(kvm, &iter, new_spte);
-               }
+       if (!pte_write(range->pte)) {
+               new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
+                                                                 pte_pfn(range->pte));
 
-               need_flush = 1;
+               tdp_mmu_set_spte(kvm, iter, new_spte);
        }
 
-       if (need_flush)
-               kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
-
-       rcu_read_unlock();
-
-       return 0;
+       return true;
 }
 
-int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
-                            pte_t *host_ptep)
+/*
+ * Handle the changed_pte MMU notifier for the TDP MMU.
+ * data is a pointer to the new pte_t mapping the HVA specified by the MMU
+ * notifier.
+ * Returns non-zero if a flush is needed before releasing the MMU lock.
+ */
+bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       return kvm_tdp_mmu_handle_hva(kvm, address, (unsigned long)host_ptep,
-                                     set_tdp_spte);
+       bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
+
+       /* FIXME: return 'flush' instead of flushing here. */
+       if (flush)
+               kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
+
+       return false;
 }
 
 /*
@@ -1120,17 +1055,11 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
                             int min_level)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
                             slot->base_gfn + slot->npages, min_level);
-       }
 
        return spte_set;
 }
@@ -1185,17 +1114,11 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-       }
 
        return spte_set;
 }
@@ -1257,16 +1180,10 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                       bool wrprot)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       for_each_tdp_mmu_root(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root(kvm, root, slot->as_id)
                clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
-       }
 }
 
 /*
@@ -1275,7 +1192,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  */
 static bool zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
-                                      struct kvm_memory_slot *slot,
+                                      const struct kvm_memory_slot *slot,
                                       bool flush)
 {
        gfn_t start = slot->base_gfn;
@@ -1316,18 +1233,13 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
  * be replaced by large mappings, for GFNs within the slot.
  */
 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      struct kvm_memory_slot *slot, bool flush)
+                                      const struct kvm_memory_slot *slot,
+                                      bool flush)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
-
-       for_each_tdp_mmu_root_yield_safe(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
 
+       for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-       }
 
        return flush;
 }
@@ -1371,17 +1283,12 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn)
 {
        struct kvm_mmu_page *root;
-       int root_as_id;
        bool spte_set = false;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
-       for_each_tdp_mmu_root(kvm, root) {
-               root_as_id = kvm_mmu_page_as_id(root);
-               if (root_as_id != slot->as_id)
-                       continue;
-
+       for_each_tdp_mmu_root(kvm, root, slot->as_id)
                spte_set |= write_protect_gfn(kvm, root, gfn);
-       }
+
        return spte_set;
 }