1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
13 static bool __read_mostly tdp_mmu_enabled = false;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
19 if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
22 /* This should not be changed for the lifetime of the VM. */
23 kvm->arch.tdp_mmu_enabled = true;
25 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
30 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
34 lockdep_assert_held_read(&kvm->mmu_lock);
36 lockdep_assert_held_write(&kvm->mmu_lock);
39 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
41 if (!kvm->arch.tdp_mmu_enabled)
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
47 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 * can run before the VM is torn down.
53 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
54 gfn_t start, gfn_t end, bool can_yield, bool flush,
57 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
59 free_page((unsigned long)sp->spt);
60 kmem_cache_free(mmu_page_header_cache, sp);
64 * This is called through call_rcu in order to free TDP page table memory
65 * safely with respect to other kernel threads that may be operating on
67 * By only accessing TDP MMU page table memory in an RCU read critical
68 * section, and freeing it after a grace period, lockless access to that
69 * memory won't use it after it is freed.
71 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
73 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
79 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
82 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
84 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
86 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
89 WARN_ON(!root->tdp_mmu_page);
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
92 list_del_rcu(&root->link);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
95 zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
97 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
101 * Finds the next valid root after root (or the first valid root if root
102 * is NULL), takes a reference on it, and returns that next root. If root
103 * is not NULL, this thread should have already taken a reference on it, and
104 * that reference will be dropped. If no valid root is found, this
105 * function will return NULL.
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
108 struct kvm_mmu_page *prev_root,
111 struct kvm_mmu_page *next_root;
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118 typeof(*prev_root), link);
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*next_root), link);
123 while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
124 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125 &next_root->link, typeof(*next_root), link);
130 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
136 * Note: this iterator gets and puts references to the roots it iterates over.
137 * This makes it safe to release the MMU lock and yield within the loop, but
138 * if exiting the loop early, the caller must drop the reference to the most
139 * recent root. (Unless keeping a live reference is desirable.)
141 * If shared is set, this function is operating under the MMU lock in read
142 * mode. In the unlikely event that this thread must free a root, the lock
143 * will be temporarily dropped and reacquired in write mode.
145 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
146 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
148 _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
149 if (kvm_mmu_page_as_id(_root) != _as_id) { \
152 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
153 list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
154 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
155 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
156 if (kvm_mmu_page_as_id(_root) != _as_id) { \
159 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
162 union kvm_mmu_page_role role;
164 role = vcpu->arch.mmu->mmu_role.base;
167 role.gpte_is_8_bytes = true;
168 role.access = ACC_ALL;
173 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
176 struct kvm_mmu_page *sp;
178 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
179 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
180 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
182 sp->role.word = page_role_for_level(vcpu, level).word;
184 sp->tdp_mmu_page = true;
186 trace_kvm_mmu_get_page(sp, true);
191 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
193 union kvm_mmu_page_role role;
194 struct kvm *kvm = vcpu->kvm;
195 struct kvm_mmu_page *root;
197 lockdep_assert_held_write(&kvm->mmu_lock);
199 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
201 /* Check for an existing root before allocating a new one. */
202 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
203 if (root->role.word == role.word &&
204 kvm_tdp_mmu_get_root(kvm, root))
208 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
209 refcount_set(&root->tdp_mmu_root_count, 1);
211 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
212 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
213 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
216 return __pa(root->spt);
219 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
220 u64 old_spte, u64 new_spte, int level,
223 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
225 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
228 if (is_accessed_spte(old_spte) &&
229 (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
230 spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
231 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
234 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
235 u64 old_spte, u64 new_spte, int level)
238 struct kvm_memory_slot *slot;
240 if (level > PG_LEVEL_4K)
243 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
245 if ((!is_writable_pte(old_spte) || pfn_changed) &&
246 is_writable_pte(new_spte)) {
247 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
248 mark_page_dirty_in_slot(kvm, slot, gfn);
253 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
257 * @shared: This operation may not be running under the exclusive use of
258 * the MMU lock and the operation must synchronize with other
259 * threads that might be adding or removing pages.
260 * @account_nx: This page replaces a NX large page and should be marked for
263 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
264 bool shared, bool account_nx)
267 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
269 lockdep_assert_held_write(&kvm->mmu_lock);
271 list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
273 account_huge_nx_page(kvm, sp);
276 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
280 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
283 * @sp: the page to be removed
284 * @shared: This operation may not be running under the exclusive use of
285 * the MMU lock and the operation must synchronize with other
286 * threads that might be adding or removing pages.
288 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
294 lockdep_assert_held_write(&kvm->mmu_lock);
297 if (sp->lpage_disallowed)
298 unaccount_huge_nx_page(kvm, sp);
301 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
305 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
308 * @pt: the page removed from the paging structure
309 * @shared: This operation may not be running under the exclusive use
310 * of the MMU lock and the operation must synchronize with other
311 * threads that might be modifying SPTEs.
313 * Given a page table that has been removed from the TDP paging structure,
314 * iterates through the page table to clear SPTEs and free child page tables.
316 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
317 * protection. Since this thread removed it from the paging structure,
318 * this thread will be responsible for ensuring the page is freed. Hence the
319 * early rcu_dereferences in the function.
321 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
324 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
325 int level = sp->role.level;
326 gfn_t base_gfn = sp->gfn;
332 trace_kvm_mmu_prepare_zap_page(sp);
334 tdp_mmu_unlink_page(kvm, sp, shared);
336 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
337 sptep = rcu_dereference(pt) + i;
338 gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
342 * Set the SPTE to a nonpresent value that other
343 * threads will not overwrite. If the SPTE was
344 * already marked as removed then another thread
345 * handling a page fault could overwrite it, so
346 * set the SPTE until it is set from some other
347 * value to the removed SPTE value.
350 old_child_spte = xchg(sptep, REMOVED_SPTE);
351 if (!is_removed_spte(old_child_spte))
357 * If the SPTE is not MMU-present, there is no backing
358 * page associated with the SPTE and so no side effects
359 * that need to be recorded, and exclusive ownership of
360 * mmu_lock ensures the SPTE can't be made present.
361 * Note, zapping MMIO SPTEs is also unnecessary as they
362 * are guarded by the memslots generation, not by being
365 old_child_spte = READ_ONCE(*sptep);
366 if (!is_shadow_present_pte(old_child_spte))
370 * Marking the SPTE as a removed SPTE is not
371 * strictly necessary here as the MMU lock will
372 * stop other threads from concurrently modifying
373 * this SPTE. Using the removed SPTE value keeps
374 * the two branches consistent and simplifies
377 WRITE_ONCE(*sptep, REMOVED_SPTE);
379 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
380 old_child_spte, REMOVED_SPTE, level - 1,
384 kvm_flush_remote_tlbs_with_address(kvm, gfn,
385 KVM_PAGES_PER_HPAGE(level));
387 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
391 * handle_changed_spte - handle bookkeeping associated with an SPTE change
393 * @as_id: the address space of the paging structure the SPTE was a part of
394 * @gfn: the base GFN that was mapped by the SPTE
395 * @old_spte: The value of the SPTE before the change
396 * @new_spte: The value of the SPTE after the change
397 * @level: the level of the PT the SPTE is part of in the paging structure
398 * @shared: This operation may not be running under the exclusive use of
399 * the MMU lock and the operation must synchronize with other
400 * threads that might be modifying SPTEs.
402 * Handle bookkeeping that might result from the modification of a SPTE.
403 * This function must be called for all TDP SPTE modifications.
405 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
406 u64 old_spte, u64 new_spte, int level,
409 bool was_present = is_shadow_present_pte(old_spte);
410 bool is_present = is_shadow_present_pte(new_spte);
411 bool was_leaf = was_present && is_last_spte(old_spte, level);
412 bool is_leaf = is_present && is_last_spte(new_spte, level);
413 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
415 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
416 WARN_ON(level < PG_LEVEL_4K);
417 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
420 * If this warning were to trigger it would indicate that there was a
421 * missing MMU notifier or a race with some notifier handler.
422 * A present, leaf SPTE should never be directly replaced with another
423 * present leaf SPTE pointing to a different PFN. A notifier handler
424 * should be zapping the SPTE before the main MM's page table is
425 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
426 * thread before replacement.
428 if (was_leaf && is_leaf && pfn_changed) {
429 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
430 "SPTE with another present leaf SPTE mapping a\n"
432 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
433 as_id, gfn, old_spte, new_spte, level);
436 * Crash the host to prevent error propagation and guest data
442 if (old_spte == new_spte)
445 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
448 * The only times a SPTE should be changed from a non-present to
449 * non-present state is when an MMIO entry is installed/modified/
450 * removed. In that case, there is nothing to do here.
452 if (!was_present && !is_present) {
454 * If this change does not involve a MMIO SPTE or removed SPTE,
455 * it is unexpected. Log the change, though it should not
456 * impact the guest since both the former and current SPTEs
459 if (WARN_ON(!is_mmio_spte(old_spte) &&
460 !is_mmio_spte(new_spte) &&
461 !is_removed_spte(new_spte)))
462 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
463 "should not be replaced with another,\n"
464 "different nonpresent SPTE, unless one or both\n"
465 "are MMIO SPTEs, or the new SPTE is\n"
466 "a temporary removed SPTE.\n"
467 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
468 as_id, gfn, old_spte, new_spte, level);
473 if (was_leaf && is_dirty_spte(old_spte) &&
474 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
475 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
478 * Recursively handle child PTs if the change removed a subtree from
479 * the paging structure.
481 if (was_present && !was_leaf && (pfn_changed || !is_present))
482 handle_removed_tdp_mmu_page(kvm,
483 spte_to_child_pt(old_spte, level), shared);
486 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
487 u64 old_spte, u64 new_spte, int level,
490 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
492 handle_changed_spte_acc_track(old_spte, new_spte, level);
493 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
498 * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
499 * and handle the associated bookkeeping, but do not mark the page dirty
500 * in KVM's dirty bitmaps.
503 * @iter: a tdp_iter instance currently on the SPTE that should be set
504 * @new_spte: The value the SPTE should be set to
505 * Returns: true if the SPTE was set, false if it was not. If false is returned,
506 * this function will have no side-effects.
508 static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
509 struct tdp_iter *iter,
512 lockdep_assert_held_read(&kvm->mmu_lock);
515 * Do not change removed SPTEs. Only the thread that froze the SPTE
518 if (is_removed_spte(iter->old_spte))
521 if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
522 new_spte) != iter->old_spte)
525 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
526 new_spte, iter->level, true);
527 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
532 static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
533 struct tdp_iter *iter,
536 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
539 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
540 iter->old_spte, new_spte, iter->level);
544 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
545 struct tdp_iter *iter)
548 * Freeze the SPTE by setting it to a special,
549 * non-present value. This will stop other threads from
550 * immediately installing a present entry in its place
551 * before the TLBs are flushed.
553 if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
556 kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
557 KVM_PAGES_PER_HPAGE(iter->level));
560 * No other thread can overwrite the removed SPTE as they
561 * must either wait on the MMU lock or use
562 * tdp_mmu_set_spte_atomic which will not overwrite the
563 * special removed SPTE value. No bookkeeping is needed
564 * here since the SPTE is going from non-present
567 WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
574 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
576 * @iter: a tdp_iter instance currently on the SPTE that should be set
577 * @new_spte: The value the SPTE should be set to
578 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
579 * of the page. Should be set unless handling an MMU
580 * notifier for access tracking. Leaving record_acc_track
581 * unset in that case prevents page accesses from being
583 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
584 * appropriate for the change being made. Should be set
585 * unless performing certain dirty logging operations.
586 * Leaving record_dirty_log unset in that case prevents page
587 * writes from being double counted.
589 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
590 u64 new_spte, bool record_acc_track,
591 bool record_dirty_log)
593 lockdep_assert_held_write(&kvm->mmu_lock);
596 * No thread should be using this function to set SPTEs to the
597 * temporary removed SPTE value.
598 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
599 * should be used. If operating under the MMU lock in write mode, the
600 * use of the removed SPTE should not be necessary.
602 WARN_ON(is_removed_spte(iter->old_spte));
604 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
606 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
607 new_spte, iter->level, false);
608 if (record_acc_track)
609 handle_changed_spte_acc_track(iter->old_spte, new_spte,
611 if (record_dirty_log)
612 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
613 iter->old_spte, new_spte,
617 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
620 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
623 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
624 struct tdp_iter *iter,
627 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
630 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
631 struct tdp_iter *iter,
634 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
637 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
638 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
640 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
641 tdp_root_for_each_pte(_iter, _root, _start, _end) \
642 if (!is_shadow_present_pte(_iter.old_spte) || \
643 !is_last_spte(_iter.old_spte, _iter.level)) \
647 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
648 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
649 _mmu->shadow_root_level, _start, _end)
652 * Yield if the MMU lock is contended or this thread needs to return control
655 * If this function should yield and flush is set, it will perform a remote
656 * TLB flush before yielding.
658 * If this function yields, it will also reset the tdp_iter's walk over the
659 * paging structure and the calling function should skip to the next
660 * iteration to allow the iterator to continue its traversal from the
661 * paging structure root.
663 * Return true if this function yielded and the iterator's traversal was reset.
664 * Return false if a yield was not needed.
666 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
667 struct tdp_iter *iter, bool flush,
670 /* Ensure forward progress has been made before yielding. */
671 if (iter->next_last_level_gfn == iter->yielded_gfn)
674 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
678 kvm_flush_remote_tlbs(kvm);
681 cond_resched_rwlock_read(&kvm->mmu_lock);
683 cond_resched_rwlock_write(&kvm->mmu_lock);
687 WARN_ON(iter->gfn > iter->next_last_level_gfn);
689 tdp_iter_restart(iter);
698 * Tears down the mappings for the range of gfns, [start, end), and frees the
699 * non-root pages mapping GFNs strictly within that range. Returns true if
700 * SPTEs have been cleared and a TLB flush is needed before releasing the
703 * If can_yield is true, will release the MMU lock and reschedule if the
704 * scheduler needs the CPU or there is contention on the MMU lock. If this
705 * function cannot yield, it will not release the MMU lock or reschedule and
706 * the caller must ensure it does not supply too large a GFN range, or the
707 * operation can cause a soft lockup.
709 * If shared is true, this thread holds the MMU lock in read mode and must
710 * account for the possibility that other threads are modifying the paging
711 * structures concurrently. If shared is false, this thread should hold the
712 * MMU lock in write mode.
714 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
715 gfn_t start, gfn_t end, bool can_yield, bool flush,
718 struct tdp_iter iter;
720 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
724 tdp_root_for_each_pte(iter, root, start, end) {
727 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
732 if (!is_shadow_present_pte(iter.old_spte))
736 * If this is a non-last-level SPTE that covers a larger range
737 * than should be zapped, continue, and zap the mappings at a
740 if ((iter.gfn < start ||
741 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
742 !is_last_spte(iter.old_spte, iter.level))
746 tdp_mmu_set_spte(kvm, &iter, 0);
748 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
750 * The iter must explicitly re-read the SPTE because
751 * the atomic cmpxchg failed.
753 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
763 * Tears down the mappings for the range of gfns, [start, end), and frees the
764 * non-root pages mapping GFNs strictly within that range. Returns true if
765 * SPTEs have been cleared and a TLB flush is needed before releasing the
768 * If shared is true, this thread holds the MMU lock in read mode and must
769 * account for the possibility that other threads are modifying the paging
770 * structures concurrently. If shared is false, this thread should hold the
773 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
774 gfn_t end, bool can_yield, bool flush,
777 struct kvm_mmu_page *root;
779 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
780 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
786 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
788 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
792 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
793 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
797 kvm_flush_remote_tlbs(kvm);
800 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
801 struct kvm_mmu_page *prev_root)
803 struct kvm_mmu_page *next_root;
806 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
808 typeof(*prev_root), link);
810 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
811 typeof(*next_root), link);
813 while (next_root && !(next_root->role.invalid &&
814 refcount_read(&next_root->tdp_mmu_root_count)))
815 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
817 typeof(*next_root), link);
823 * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
824 * invalidated root, they will not be freed until this function drops the
825 * reference. Before dropping that reference, tear down the paging
826 * structure so that whichever thread does drop the last reference
827 * only has to do a trivial amount of work. Since the roots are invalid,
828 * no new SPTEs should be created under them.
830 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
832 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
833 struct kvm_mmu_page *next_root;
834 struct kvm_mmu_page *root;
837 lockdep_assert_held_read(&kvm->mmu_lock);
841 root = next_invalidated_root(kvm, NULL);
844 next_root = next_invalidated_root(kvm, root);
848 flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
852 * Put the reference acquired in
853 * kvm_tdp_mmu_invalidate_roots
855 kvm_tdp_mmu_put_root(kvm, root, true);
865 kvm_flush_remote_tlbs(kvm);
869 * Mark each TDP MMU root as invalid so that other threads
870 * will drop their references and allow the root count to
873 * Also take a reference on all roots so that this thread
874 * can do the bulk of the work required to free the roots
875 * once they are invalidated. Without this reference, a
876 * vCPU thread might drop the last reference to a root and
877 * get stuck with tearing down the entire paging structure.
879 * Roots which have a zero refcount should be skipped as
880 * they're already being torn down.
881 * Already invalid roots should be referenced again so that
882 * they aren't freed before kvm_tdp_mmu_zap_all_fast is
885 * This has essentially the same effect for the TDP MMU
886 * as updating mmu_valid_gen does for the shadow MMU.
888 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
890 struct kvm_mmu_page *root;
892 lockdep_assert_held_write(&kvm->mmu_lock);
893 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
894 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
895 root->role.invalid = true;
899 * Installs a last-level SPTE to handle a TDP page fault.
900 * (NPT/EPT violation/misconfiguration)
902 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
904 struct tdp_iter *iter,
905 kvm_pfn_t pfn, bool prefault)
909 int make_spte_ret = 0;
911 if (unlikely(is_noslot_pfn(pfn)))
912 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
914 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
915 pfn, iter->old_spte, prefault, true,
916 map_writable, !shadow_accessed_mask,
919 if (new_spte == iter->old_spte)
920 ret = RET_PF_SPURIOUS;
921 else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
925 * If the page fault was caused by a write but the page is write
926 * protected, emulation is needed. If the emulation was skipped,
927 * the vCPU would have the same fault again.
929 if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
931 ret = RET_PF_EMULATE;
932 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
935 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
936 if (unlikely(is_mmio_spte(new_spte))) {
937 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
939 ret = RET_PF_EMULATE;
941 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
942 rcu_dereference(iter->sptep));
946 vcpu->stat.pf_fixed++;
952 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
953 * page tables and SPTEs to translate the faulting guest physical address.
955 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
956 int map_writable, int max_level, kvm_pfn_t pfn,
959 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
960 bool write = error_code & PFERR_WRITE_MASK;
961 bool exec = error_code & PFERR_FETCH_MASK;
962 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
963 struct kvm_mmu *mmu = vcpu->arch.mmu;
964 struct tdp_iter iter;
965 struct kvm_mmu_page *sp;
969 gfn_t gfn = gpa >> PAGE_SHIFT;
973 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
975 if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
978 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
979 huge_page_disallowed, &req_level);
981 trace_kvm_mmu_spte_requested(gpa, level, pfn);
985 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
986 if (nx_huge_page_workaround_enabled)
987 disallowed_hugepage_adjust(iter.old_spte, gfn,
988 iter.level, &pfn, &level);
990 if (iter.level == level)
994 * If there is an SPTE mapping a large page at a higher level
995 * than the target, that SPTE must be cleared and replaced
996 * with a non-leaf SPTE.
998 if (is_shadow_present_pte(iter.old_spte) &&
999 is_large_pte(iter.old_spte)) {
1000 if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1004 * The iter must explicitly re-read the spte here
1005 * because the new value informs the !present
1008 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1011 if (!is_shadow_present_pte(iter.old_spte)) {
1012 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
1015 new_spte = make_nonleaf_spte(child_pt,
1016 !shadow_accessed_mask);
1018 if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
1020 tdp_mmu_link_page(vcpu->kvm, sp, true,
1021 huge_page_disallowed &&
1022 req_level >= iter.level);
1024 trace_kvm_mmu_get_page(sp, true);
1026 tdp_mmu_free_sp(sp);
1032 if (iter.level != level) {
1034 return RET_PF_RETRY;
1037 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
1044 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1047 struct kvm_mmu_page *root;
1049 for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
1050 flush |= zap_gfn_range(kvm, root, range->start, range->end,
1051 range->may_block, flush, false);
1056 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1057 struct kvm_gfn_range *range);
1059 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1060 struct kvm_gfn_range *range,
1061 tdp_handler_t handler)
1063 struct kvm_mmu_page *root;
1064 struct tdp_iter iter;
1070 * Don't support rescheduling, none of the MMU notifiers that funnel
1071 * into this helper allow blocking; it'd be dead, wasteful code.
1073 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1074 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1075 ret |= handler(kvm, &iter, range);
1084 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1085 * if any of the GFNs in the range have been accessed.
1087 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1088 struct kvm_gfn_range *range)
1092 /* If we have a non-accessed entry we don't need to change the pte. */
1093 if (!is_accessed_spte(iter->old_spte))
1096 new_spte = iter->old_spte;
1098 if (spte_ad_enabled(new_spte)) {
1099 new_spte &= ~shadow_accessed_mask;
1102 * Capture the dirty status of the page, so that it doesn't get
1103 * lost when the SPTE is marked for access tracking.
1105 if (is_writable_pte(new_spte))
1106 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1108 new_spte = mark_spte_for_access_track(new_spte);
1111 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1116 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1118 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1121 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1122 struct kvm_gfn_range *range)
1124 return is_accessed_spte(iter->old_spte);
1127 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1129 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1132 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1133 struct kvm_gfn_range *range)
1137 /* Huge pages aren't expected to be modified without first being zapped. */
1138 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1140 if (iter->level != PG_LEVEL_4K ||
1141 !is_shadow_present_pte(iter->old_spte))
1145 * Note, when changing a read-only SPTE, it's not strictly necessary to
1146 * zero the SPTE before setting the new PFN, but doing so preserves the
1147 * invariant that the PFN of a present * leaf SPTE can never change.
1148 * See __handle_changed_spte().
1150 tdp_mmu_set_spte(kvm, iter, 0);
1152 if (!pte_write(range->pte)) {
1153 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1154 pte_pfn(range->pte));
1156 tdp_mmu_set_spte(kvm, iter, new_spte);
1163 * Handle the changed_pte MMU notifier for the TDP MMU.
1164 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1166 * Returns non-zero if a flush is needed before releasing the MMU lock.
1168 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1170 bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1172 /* FIXME: return 'flush' instead of flushing here. */
1174 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1180 * Remove write access from all the SPTEs mapping GFNs [start, end). If
1181 * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1182 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1184 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1185 gfn_t start, gfn_t end, int min_level)
1187 struct tdp_iter iter;
1189 bool spte_set = false;
1193 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1195 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1196 min_level, start, end) {
1198 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1201 if (!is_shadow_present_pte(iter.old_spte) ||
1202 !is_last_spte(iter.old_spte, iter.level) ||
1203 !(iter.old_spte & PT_WRITABLE_MASK))
1206 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1208 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1211 * The iter must explicitly re-read the SPTE because
1212 * the atomic cmpxchg failed.
1214 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1225 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1226 * only affect leaf SPTEs down to min_level.
1227 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1229 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1232 struct kvm_mmu_page *root;
1233 bool spte_set = false;
1235 lockdep_assert_held_read(&kvm->mmu_lock);
1237 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1238 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1239 slot->base_gfn + slot->npages, min_level);
1245 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1246 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1247 * If AD bits are not enabled, this will require clearing the writable bit on
1248 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1251 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1252 gfn_t start, gfn_t end)
1254 struct tdp_iter iter;
1256 bool spte_set = false;
1260 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1262 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1265 if (spte_ad_need_write_protect(iter.old_spte)) {
1266 if (is_writable_pte(iter.old_spte))
1267 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1271 if (iter.old_spte & shadow_dirty_mask)
1272 new_spte = iter.old_spte & ~shadow_dirty_mask;
1277 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1280 * The iter must explicitly re-read the SPTE because
1281 * the atomic cmpxchg failed.
1283 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1294 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1295 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1296 * If AD bits are not enabled, this will require clearing the writable bit on
1297 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1300 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1302 struct kvm_mmu_page *root;
1303 bool spte_set = false;
1305 lockdep_assert_held_read(&kvm->mmu_lock);
1307 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1308 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1309 slot->base_gfn + slot->npages);
1315 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1316 * set in mask, starting at gfn. The given memslot is expected to contain all
1317 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1318 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1319 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1321 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1322 gfn_t gfn, unsigned long mask, bool wrprot)
1324 struct tdp_iter iter;
1329 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1330 gfn + BITS_PER_LONG) {
1334 if (iter.level > PG_LEVEL_4K ||
1335 !(mask & (1UL << (iter.gfn - gfn))))
1338 mask &= ~(1UL << (iter.gfn - gfn));
1340 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1341 if (is_writable_pte(iter.old_spte))
1342 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1346 if (iter.old_spte & shadow_dirty_mask)
1347 new_spte = iter.old_spte & ~shadow_dirty_mask;
1352 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1359 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1360 * set in mask, starting at gfn. The given memslot is expected to contain all
1361 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1362 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1363 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1365 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1366 struct kvm_memory_slot *slot,
1367 gfn_t gfn, unsigned long mask,
1370 struct kvm_mmu_page *root;
1372 lockdep_assert_held_write(&kvm->mmu_lock);
1373 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1374 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1378 * Clear leaf entries which could be replaced by large mappings, for
1379 * GFNs within the slot.
1381 static bool zap_collapsible_spte_range(struct kvm *kvm,
1382 struct kvm_mmu_page *root,
1383 const struct kvm_memory_slot *slot,
1386 gfn_t start = slot->base_gfn;
1387 gfn_t end = start + slot->npages;
1388 struct tdp_iter iter;
1393 tdp_root_for_each_pte(iter, root, start, end) {
1395 if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1400 if (!is_shadow_present_pte(iter.old_spte) ||
1401 !is_last_spte(iter.old_spte, iter.level))
1404 pfn = spte_to_pfn(iter.old_spte);
1405 if (kvm_is_reserved_pfn(pfn) ||
1406 iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1410 if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
1412 * The iter must explicitly re-read the SPTE because
1413 * the atomic cmpxchg failed.
1415 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1427 * Clear non-leaf entries (and free associated page tables) which could
1428 * be replaced by large mappings, for GFNs within the slot.
1430 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1431 const struct kvm_memory_slot *slot,
1434 struct kvm_mmu_page *root;
1436 lockdep_assert_held_read(&kvm->mmu_lock);
1438 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1439 flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1445 * Removes write access on the last level SPTE mapping this GFN and unsets the
1446 * MMU-writable bit to ensure future writes continue to be intercepted.
1447 * Returns true if an SPTE was set and a TLB flush is needed.
1449 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1452 struct tdp_iter iter;
1454 bool spte_set = false;
1458 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1459 if (!is_writable_pte(iter.old_spte))
1462 new_spte = iter.old_spte &
1463 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1465 tdp_mmu_set_spte(kvm, &iter, new_spte);
1475 * Removes write access on the last level SPTE mapping this GFN and unsets the
1476 * MMU-writable bit to ensure future writes continue to be intercepted.
1477 * Returns true if an SPTE was set and a TLB flush is needed.
1479 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1480 struct kvm_memory_slot *slot, gfn_t gfn)
1482 struct kvm_mmu_page *root;
1483 bool spte_set = false;
1485 lockdep_assert_held_write(&kvm->mmu_lock);
1486 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1487 spte_set |= write_protect_gfn(kvm, root, gfn);
1493 * Return the level of the lowest level SPTE added to sptes.
1494 * That SPTE may be non-present.
1496 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1499 struct tdp_iter iter;
1500 struct kvm_mmu *mmu = vcpu->arch.mmu;
1501 gfn_t gfn = addr >> PAGE_SHIFT;
1504 *root_level = vcpu->arch.mmu->shadow_root_level;
1508 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1510 sptes[leaf] = iter.old_spte;