1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
13 static bool __read_mostly tdp_mmu_enabled = false;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
19 if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
22 /* This should not be changed for the lifetime of the VM. */
23 kvm->arch.tdp_mmu_enabled = true;
25 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
30 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
34 lockdep_assert_held_read(&kvm->mmu_lock);
36 lockdep_assert_held_write(&kvm->mmu_lock);
39 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
41 if (!kvm->arch.tdp_mmu_enabled)
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
47 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 * can run before the VM is torn down.
53 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
54 gfn_t start, gfn_t end, bool can_yield, bool flush,
57 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
59 free_page((unsigned long)sp->spt);
60 kmem_cache_free(mmu_page_header_cache, sp);
64 * This is called through call_rcu in order to free TDP page table memory
65 * safely with respect to other kernel threads that may be operating on
67 * By only accessing TDP MMU page table memory in an RCU read critical
68 * section, and freeing it after a grace period, lockless access to that
69 * memory won't use it after it is freed.
71 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
73 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
79 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
82 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
84 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
86 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
89 WARN_ON(!root->tdp_mmu_page);
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
92 list_del_rcu(&root->link);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
95 zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
97 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
101 * Finds the next valid root after root (or the first valid root if root
102 * is NULL), takes a reference on it, and returns that next root. If root
103 * is not NULL, this thread should have already taken a reference on it, and
104 * that reference will be dropped. If no valid root is found, this
105 * function will return NULL.
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
108 struct kvm_mmu_page *prev_root,
111 struct kvm_mmu_page *next_root;
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118 typeof(*prev_root), link);
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*next_root), link);
123 while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
124 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125 &next_root->link, typeof(*next_root), link);
130 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
136 * Note: this iterator gets and puts references to the roots it iterates over.
137 * This makes it safe to release the MMU lock and yield within the loop, but
138 * if exiting the loop early, the caller must drop the reference to the most
139 * recent root. (Unless keeping a live reference is desirable.)
141 * If shared is set, this function is operating under the MMU lock in read
142 * mode. In the unlikely event that this thread must free a root, the lock
143 * will be temporarily dropped and reacquired in write mode.
145 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
146 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
148 _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
149 if (kvm_mmu_page_as_id(_root) != _as_id) { \
152 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
153 list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
154 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
155 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
156 if (kvm_mmu_page_as_id(_root) != _as_id) { \
159 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
162 union kvm_mmu_page_role role;
164 role = vcpu->arch.mmu->mmu_role.base;
167 role.gpte_is_8_bytes = true;
168 role.access = ACC_ALL;
173 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
176 struct kvm_mmu_page *sp;
178 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
179 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
180 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
182 sp->role.word = page_role_for_level(vcpu, level).word;
184 sp->tdp_mmu_page = true;
186 trace_kvm_mmu_get_page(sp, true);
191 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
193 union kvm_mmu_page_role role;
194 struct kvm *kvm = vcpu->kvm;
195 struct kvm_mmu_page *root;
197 lockdep_assert_held_write(&kvm->mmu_lock);
199 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
201 /* Check for an existing root before allocating a new one. */
202 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
203 if (root->role.word == role.word &&
204 kvm_tdp_mmu_get_root(kvm, root))
208 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
209 refcount_set(&root->tdp_mmu_root_count, 1);
211 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
212 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
213 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
216 return __pa(root->spt);
219 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
220 u64 old_spte, u64 new_spte, int level,
223 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
225 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
228 if (is_accessed_spte(old_spte) &&
229 (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
230 spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
231 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
234 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
235 u64 old_spte, u64 new_spte, int level)
238 struct kvm_memory_slot *slot;
240 if (level > PG_LEVEL_4K)
243 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
245 if ((!is_writable_pte(old_spte) || pfn_changed) &&
246 is_writable_pte(new_spte)) {
247 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
248 mark_page_dirty_in_slot(kvm, slot, gfn);
253 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
257 * @shared: This operation may not be running under the exclusive use of
258 * the MMU lock and the operation must synchronize with other
259 * threads that might be adding or removing pages.
260 * @account_nx: This page replaces a NX large page and should be marked for
263 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
264 bool shared, bool account_nx)
267 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
269 lockdep_assert_held_write(&kvm->mmu_lock);
271 list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
273 account_huge_nx_page(kvm, sp);
276 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
280 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
283 * @sp: the page to be removed
284 * @shared: This operation may not be running under the exclusive use of
285 * the MMU lock and the operation must synchronize with other
286 * threads that might be adding or removing pages.
288 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
294 lockdep_assert_held_write(&kvm->mmu_lock);
297 if (sp->lpage_disallowed)
298 unaccount_huge_nx_page(kvm, sp);
301 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
305 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
308 * @pt: the page removed from the paging structure
309 * @shared: This operation may not be running under the exclusive use
310 * of the MMU lock and the operation must synchronize with other
311 * threads that might be modifying SPTEs.
313 * Given a page table that has been removed from the TDP paging structure,
314 * iterates through the page table to clear SPTEs and free child page tables.
316 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
317 * protection. Since this thread removed it from the paging structure,
318 * this thread will be responsible for ensuring the page is freed. Hence the
319 * early rcu_dereferences in the function.
321 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
324 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
325 int level = sp->role.level;
326 gfn_t base_gfn = sp->gfn;
332 trace_kvm_mmu_prepare_zap_page(sp);
334 tdp_mmu_unlink_page(kvm, sp, shared);
336 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
337 sptep = rcu_dereference(pt) + i;
338 gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
342 * Set the SPTE to a nonpresent value that other
343 * threads will not overwrite. If the SPTE was
344 * already marked as removed then another thread
345 * handling a page fault could overwrite it, so
346 * set the SPTE until it is set from some other
347 * value to the removed SPTE value.
350 old_child_spte = xchg(sptep, REMOVED_SPTE);
351 if (!is_removed_spte(old_child_spte))
357 * If the SPTE is not MMU-present, there is no backing
358 * page associated with the SPTE and so no side effects
359 * that need to be recorded, and exclusive ownership of
360 * mmu_lock ensures the SPTE can't be made present.
361 * Note, zapping MMIO SPTEs is also unnecessary as they
362 * are guarded by the memslots generation, not by being
365 old_child_spte = READ_ONCE(*sptep);
366 if (!is_shadow_present_pte(old_child_spte))
370 * Marking the SPTE as a removed SPTE is not
371 * strictly necessary here as the MMU lock will
372 * stop other threads from concurrently modifying
373 * this SPTE. Using the removed SPTE value keeps
374 * the two branches consistent and simplifies
377 WRITE_ONCE(*sptep, REMOVED_SPTE);
379 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
380 old_child_spte, REMOVED_SPTE, level - 1,
384 kvm_flush_remote_tlbs_with_address(kvm, gfn,
385 KVM_PAGES_PER_HPAGE(level));
387 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
391 * __handle_changed_spte - handle bookkeeping associated with an SPTE change
393 * @as_id: the address space of the paging structure the SPTE was a part of
394 * @gfn: the base GFN that was mapped by the SPTE
395 * @old_spte: The value of the SPTE before the change
396 * @new_spte: The value of the SPTE after the change
397 * @level: the level of the PT the SPTE is part of in the paging structure
398 * @shared: This operation may not be running under the exclusive use of
399 * the MMU lock and the operation must synchronize with other
400 * threads that might be modifying SPTEs.
402 * Handle bookkeeping that might result from the modification of a SPTE.
403 * This function must be called for all TDP SPTE modifications.
405 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
406 u64 old_spte, u64 new_spte, int level,
409 bool was_present = is_shadow_present_pte(old_spte);
410 bool is_present = is_shadow_present_pte(new_spte);
411 bool was_leaf = was_present && is_last_spte(old_spte, level);
412 bool is_leaf = is_present && is_last_spte(new_spte, level);
413 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
415 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
416 WARN_ON(level < PG_LEVEL_4K);
417 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
420 * If this warning were to trigger it would indicate that there was a
421 * missing MMU notifier or a race with some notifier handler.
422 * A present, leaf SPTE should never be directly replaced with another
423 * present leaf SPTE pointing to a different PFN. A notifier handler
424 * should be zapping the SPTE before the main MM's page table is
425 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
426 * thread before replacement.
428 if (was_leaf && is_leaf && pfn_changed) {
429 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
430 "SPTE with another present leaf SPTE mapping a\n"
432 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
433 as_id, gfn, old_spte, new_spte, level);
436 * Crash the host to prevent error propagation and guest data
442 if (old_spte == new_spte)
445 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
447 if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
448 if (is_large_pte(old_spte))
449 atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
451 atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
455 * The only times a SPTE should be changed from a non-present to
456 * non-present state is when an MMIO entry is installed/modified/
457 * removed. In that case, there is nothing to do here.
459 if (!was_present && !is_present) {
461 * If this change does not involve a MMIO SPTE or removed SPTE,
462 * it is unexpected. Log the change, though it should not
463 * impact the guest since both the former and current SPTEs
466 if (WARN_ON(!is_mmio_spte(old_spte) &&
467 !is_mmio_spte(new_spte) &&
468 !is_removed_spte(new_spte)))
469 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
470 "should not be replaced with another,\n"
471 "different nonpresent SPTE, unless one or both\n"
472 "are MMIO SPTEs, or the new SPTE is\n"
473 "a temporary removed SPTE.\n"
474 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
475 as_id, gfn, old_spte, new_spte, level);
480 if (was_leaf && is_dirty_spte(old_spte) &&
481 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
482 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
485 * Recursively handle child PTs if the change removed a subtree from
486 * the paging structure.
488 if (was_present && !was_leaf && (pfn_changed || !is_present))
489 handle_removed_tdp_mmu_page(kvm,
490 spte_to_child_pt(old_spte, level), shared);
493 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
494 u64 old_spte, u64 new_spte, int level,
497 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
499 handle_changed_spte_acc_track(old_spte, new_spte, level);
500 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
505 * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
506 * and handle the associated bookkeeping, but do not mark the page dirty
507 * in KVM's dirty bitmaps.
510 * @iter: a tdp_iter instance currently on the SPTE that should be set
511 * @new_spte: The value the SPTE should be set to
512 * Returns: true if the SPTE was set, false if it was not. If false is returned,
513 * this function will have no side-effects.
515 static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
516 struct tdp_iter *iter,
519 lockdep_assert_held_read(&kvm->mmu_lock);
522 * Do not change removed SPTEs. Only the thread that froze the SPTE
525 if (is_removed_spte(iter->old_spte))
528 if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
529 new_spte) != iter->old_spte)
532 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
533 new_spte, iter->level, true);
534 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
539 static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
540 struct tdp_iter *iter,
543 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
546 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
547 iter->old_spte, new_spte, iter->level);
551 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
552 struct tdp_iter *iter)
555 * Freeze the SPTE by setting it to a special,
556 * non-present value. This will stop other threads from
557 * immediately installing a present entry in its place
558 * before the TLBs are flushed.
560 if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
563 kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
564 KVM_PAGES_PER_HPAGE(iter->level));
567 * No other thread can overwrite the removed SPTE as they
568 * must either wait on the MMU lock or use
569 * tdp_mmu_set_spte_atomic which will not overwrite the
570 * special removed SPTE value. No bookkeeping is needed
571 * here since the SPTE is going from non-present
574 WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
581 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
583 * @iter: a tdp_iter instance currently on the SPTE that should be set
584 * @new_spte: The value the SPTE should be set to
585 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
586 * of the page. Should be set unless handling an MMU
587 * notifier for access tracking. Leaving record_acc_track
588 * unset in that case prevents page accesses from being
590 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
591 * appropriate for the change being made. Should be set
592 * unless performing certain dirty logging operations.
593 * Leaving record_dirty_log unset in that case prevents page
594 * writes from being double counted.
596 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
597 u64 new_spte, bool record_acc_track,
598 bool record_dirty_log)
600 lockdep_assert_held_write(&kvm->mmu_lock);
603 * No thread should be using this function to set SPTEs to the
604 * temporary removed SPTE value.
605 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
606 * should be used. If operating under the MMU lock in write mode, the
607 * use of the removed SPTE should not be necessary.
609 WARN_ON(is_removed_spte(iter->old_spte));
611 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
613 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
614 new_spte, iter->level, false);
615 if (record_acc_track)
616 handle_changed_spte_acc_track(iter->old_spte, new_spte,
618 if (record_dirty_log)
619 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
620 iter->old_spte, new_spte,
624 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
627 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
630 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
631 struct tdp_iter *iter,
634 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
637 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
638 struct tdp_iter *iter,
641 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
644 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
645 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
647 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
648 tdp_root_for_each_pte(_iter, _root, _start, _end) \
649 if (!is_shadow_present_pte(_iter.old_spte) || \
650 !is_last_spte(_iter.old_spte, _iter.level)) \
654 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
655 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
656 _mmu->shadow_root_level, _start, _end)
659 * Yield if the MMU lock is contended or this thread needs to return control
662 * If this function should yield and flush is set, it will perform a remote
663 * TLB flush before yielding.
665 * If this function yields, it will also reset the tdp_iter's walk over the
666 * paging structure and the calling function should skip to the next
667 * iteration to allow the iterator to continue its traversal from the
668 * paging structure root.
670 * Return true if this function yielded and the iterator's traversal was reset.
671 * Return false if a yield was not needed.
673 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
674 struct tdp_iter *iter, bool flush,
677 /* Ensure forward progress has been made before yielding. */
678 if (iter->next_last_level_gfn == iter->yielded_gfn)
681 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
685 kvm_flush_remote_tlbs(kvm);
688 cond_resched_rwlock_read(&kvm->mmu_lock);
690 cond_resched_rwlock_write(&kvm->mmu_lock);
694 WARN_ON(iter->gfn > iter->next_last_level_gfn);
696 tdp_iter_restart(iter);
705 * Tears down the mappings for the range of gfns, [start, end), and frees the
706 * non-root pages mapping GFNs strictly within that range. Returns true if
707 * SPTEs have been cleared and a TLB flush is needed before releasing the
710 * If can_yield is true, will release the MMU lock and reschedule if the
711 * scheduler needs the CPU or there is contention on the MMU lock. If this
712 * function cannot yield, it will not release the MMU lock or reschedule and
713 * the caller must ensure it does not supply too large a GFN range, or the
714 * operation can cause a soft lockup.
716 * If shared is true, this thread holds the MMU lock in read mode and must
717 * account for the possibility that other threads are modifying the paging
718 * structures concurrently. If shared is false, this thread should hold the
719 * MMU lock in write mode.
721 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
722 gfn_t start, gfn_t end, bool can_yield, bool flush,
725 struct tdp_iter iter;
727 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
731 tdp_root_for_each_pte(iter, root, start, end) {
734 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
739 if (!is_shadow_present_pte(iter.old_spte))
743 * If this is a non-last-level SPTE that covers a larger range
744 * than should be zapped, continue, and zap the mappings at a
747 if ((iter.gfn < start ||
748 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
749 !is_last_spte(iter.old_spte, iter.level))
753 tdp_mmu_set_spte(kvm, &iter, 0);
755 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
757 * The iter must explicitly re-read the SPTE because
758 * the atomic cmpxchg failed.
760 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
770 * Tears down the mappings for the range of gfns, [start, end), and frees the
771 * non-root pages mapping GFNs strictly within that range. Returns true if
772 * SPTEs have been cleared and a TLB flush is needed before releasing the
775 * If shared is true, this thread holds the MMU lock in read mode and must
776 * account for the possibility that other threads are modifying the paging
777 * structures concurrently. If shared is false, this thread should hold the
780 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
781 gfn_t end, bool can_yield, bool flush,
784 struct kvm_mmu_page *root;
786 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
787 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
793 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
795 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
799 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
800 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
804 kvm_flush_remote_tlbs(kvm);
807 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
808 struct kvm_mmu_page *prev_root)
810 struct kvm_mmu_page *next_root;
813 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
815 typeof(*prev_root), link);
817 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
818 typeof(*next_root), link);
820 while (next_root && !(next_root->role.invalid &&
821 refcount_read(&next_root->tdp_mmu_root_count)))
822 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
824 typeof(*next_root), link);
830 * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
831 * invalidated root, they will not be freed until this function drops the
832 * reference. Before dropping that reference, tear down the paging
833 * structure so that whichever thread does drop the last reference
834 * only has to do a trivial amount of work. Since the roots are invalid,
835 * no new SPTEs should be created under them.
837 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
839 gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
840 struct kvm_mmu_page *next_root;
841 struct kvm_mmu_page *root;
844 lockdep_assert_held_read(&kvm->mmu_lock);
848 root = next_invalidated_root(kvm, NULL);
851 next_root = next_invalidated_root(kvm, root);
855 flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
859 * Put the reference acquired in
860 * kvm_tdp_mmu_invalidate_roots
862 kvm_tdp_mmu_put_root(kvm, root, true);
872 kvm_flush_remote_tlbs(kvm);
876 * Mark each TDP MMU root as invalid so that other threads
877 * will drop their references and allow the root count to
880 * Also take a reference on all roots so that this thread
881 * can do the bulk of the work required to free the roots
882 * once they are invalidated. Without this reference, a
883 * vCPU thread might drop the last reference to a root and
884 * get stuck with tearing down the entire paging structure.
886 * Roots which have a zero refcount should be skipped as
887 * they're already being torn down.
888 * Already invalid roots should be referenced again so that
889 * they aren't freed before kvm_tdp_mmu_zap_all_fast is
892 * This has essentially the same effect for the TDP MMU
893 * as updating mmu_valid_gen does for the shadow MMU.
895 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
897 struct kvm_mmu_page *root;
899 lockdep_assert_held_write(&kvm->mmu_lock);
900 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
901 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
902 root->role.invalid = true;
906 * Installs a last-level SPTE to handle a TDP page fault.
907 * (NPT/EPT violation/misconfiguration)
909 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
911 struct tdp_iter *iter,
912 kvm_pfn_t pfn, bool prefault)
916 int make_spte_ret = 0;
918 if (unlikely(is_noslot_pfn(pfn)))
919 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
921 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
922 pfn, iter->old_spte, prefault, true,
923 map_writable, !shadow_accessed_mask,
926 if (new_spte == iter->old_spte)
927 ret = RET_PF_SPURIOUS;
928 else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
932 * If the page fault was caused by a write but the page is write
933 * protected, emulation is needed. If the emulation was skipped,
934 * the vCPU would have the same fault again.
936 if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
938 ret = RET_PF_EMULATE;
939 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
942 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
943 if (unlikely(is_mmio_spte(new_spte))) {
944 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
946 ret = RET_PF_EMULATE;
948 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
949 rcu_dereference(iter->sptep));
953 vcpu->stat.pf_fixed++;
959 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
960 * page tables and SPTEs to translate the faulting guest physical address.
962 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
963 int map_writable, int max_level, kvm_pfn_t pfn,
966 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
967 bool write = error_code & PFERR_WRITE_MASK;
968 bool exec = error_code & PFERR_FETCH_MASK;
969 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
970 struct kvm_mmu *mmu = vcpu->arch.mmu;
971 struct tdp_iter iter;
972 struct kvm_mmu_page *sp;
976 gfn_t gfn = gpa >> PAGE_SHIFT;
980 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
982 if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
985 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
986 huge_page_disallowed, &req_level);
988 trace_kvm_mmu_spte_requested(gpa, level, pfn);
992 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
993 if (nx_huge_page_workaround_enabled)
994 disallowed_hugepage_adjust(iter.old_spte, gfn,
995 iter.level, &pfn, &level);
997 if (iter.level == level)
1001 * If there is an SPTE mapping a large page at a higher level
1002 * than the target, that SPTE must be cleared and replaced
1003 * with a non-leaf SPTE.
1005 if (is_shadow_present_pte(iter.old_spte) &&
1006 is_large_pte(iter.old_spte)) {
1007 if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1011 * The iter must explicitly re-read the spte here
1012 * because the new value informs the !present
1015 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1018 if (!is_shadow_present_pte(iter.old_spte)) {
1020 * If SPTE has been forzen by another thread, just
1021 * give up and retry, avoiding unnecessary page table
1022 * allocation and free.
1024 if (is_removed_spte(iter.old_spte))
1027 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
1030 new_spte = make_nonleaf_spte(child_pt,
1031 !shadow_accessed_mask);
1033 if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
1035 tdp_mmu_link_page(vcpu->kvm, sp, true,
1036 huge_page_disallowed &&
1037 req_level >= iter.level);
1039 trace_kvm_mmu_get_page(sp, true);
1041 tdp_mmu_free_sp(sp);
1047 if (iter.level != level) {
1049 return RET_PF_RETRY;
1052 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
1059 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1062 struct kvm_mmu_page *root;
1064 for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
1065 flush |= zap_gfn_range(kvm, root, range->start, range->end,
1066 range->may_block, flush, false);
1071 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1072 struct kvm_gfn_range *range);
1074 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1075 struct kvm_gfn_range *range,
1076 tdp_handler_t handler)
1078 struct kvm_mmu_page *root;
1079 struct tdp_iter iter;
1085 * Don't support rescheduling, none of the MMU notifiers that funnel
1086 * into this helper allow blocking; it'd be dead, wasteful code.
1088 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1089 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1090 ret |= handler(kvm, &iter, range);
1099 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1100 * if any of the GFNs in the range have been accessed.
1102 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1103 struct kvm_gfn_range *range)
1107 /* If we have a non-accessed entry we don't need to change the pte. */
1108 if (!is_accessed_spte(iter->old_spte))
1111 new_spte = iter->old_spte;
1113 if (spte_ad_enabled(new_spte)) {
1114 new_spte &= ~shadow_accessed_mask;
1117 * Capture the dirty status of the page, so that it doesn't get
1118 * lost when the SPTE is marked for access tracking.
1120 if (is_writable_pte(new_spte))
1121 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1123 new_spte = mark_spte_for_access_track(new_spte);
1126 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1131 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1133 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1136 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1137 struct kvm_gfn_range *range)
1139 return is_accessed_spte(iter->old_spte);
1142 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1144 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1147 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1148 struct kvm_gfn_range *range)
1152 /* Huge pages aren't expected to be modified without first being zapped. */
1153 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1155 if (iter->level != PG_LEVEL_4K ||
1156 !is_shadow_present_pte(iter->old_spte))
1160 * Note, when changing a read-only SPTE, it's not strictly necessary to
1161 * zero the SPTE before setting the new PFN, but doing so preserves the
1162 * invariant that the PFN of a present * leaf SPTE can never change.
1163 * See __handle_changed_spte().
1165 tdp_mmu_set_spte(kvm, iter, 0);
1167 if (!pte_write(range->pte)) {
1168 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1169 pte_pfn(range->pte));
1171 tdp_mmu_set_spte(kvm, iter, new_spte);
1178 * Handle the changed_pte MMU notifier for the TDP MMU.
1179 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1181 * Returns non-zero if a flush is needed before releasing the MMU lock.
1183 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1185 bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1187 /* FIXME: return 'flush' instead of flushing here. */
1189 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1195 * Remove write access from all SPTEs at or above min_level that map GFNs
1196 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1199 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1200 gfn_t start, gfn_t end, int min_level)
1202 struct tdp_iter iter;
1204 bool spte_set = false;
1208 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1210 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1211 min_level, start, end) {
1213 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1216 if (!is_shadow_present_pte(iter.old_spte) ||
1217 !is_last_spte(iter.old_spte, iter.level) ||
1218 !(iter.old_spte & PT_WRITABLE_MASK))
1221 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1223 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1226 * The iter must explicitly re-read the SPTE because
1227 * the atomic cmpxchg failed.
1229 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1240 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1241 * only affect leaf SPTEs down to min_level.
1242 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1244 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1247 struct kvm_mmu_page *root;
1248 bool spte_set = false;
1250 lockdep_assert_held_read(&kvm->mmu_lock);
1252 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1253 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1254 slot->base_gfn + slot->npages, min_level);
1260 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1261 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1262 * If AD bits are not enabled, this will require clearing the writable bit on
1263 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1266 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1267 gfn_t start, gfn_t end)
1269 struct tdp_iter iter;
1271 bool spte_set = false;
1275 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1277 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1280 if (spte_ad_need_write_protect(iter.old_spte)) {
1281 if (is_writable_pte(iter.old_spte))
1282 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1286 if (iter.old_spte & shadow_dirty_mask)
1287 new_spte = iter.old_spte & ~shadow_dirty_mask;
1292 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1295 * The iter must explicitly re-read the SPTE because
1296 * the atomic cmpxchg failed.
1298 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1309 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1310 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1311 * If AD bits are not enabled, this will require clearing the writable bit on
1312 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1315 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1317 struct kvm_mmu_page *root;
1318 bool spte_set = false;
1320 lockdep_assert_held_read(&kvm->mmu_lock);
1322 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1323 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1324 slot->base_gfn + slot->npages);
1330 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1331 * set in mask, starting at gfn. The given memslot is expected to contain all
1332 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1333 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1334 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1336 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1337 gfn_t gfn, unsigned long mask, bool wrprot)
1339 struct tdp_iter iter;
1344 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1345 gfn + BITS_PER_LONG) {
1349 if (iter.level > PG_LEVEL_4K ||
1350 !(mask & (1UL << (iter.gfn - gfn))))
1353 mask &= ~(1UL << (iter.gfn - gfn));
1355 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1356 if (is_writable_pte(iter.old_spte))
1357 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1361 if (iter.old_spte & shadow_dirty_mask)
1362 new_spte = iter.old_spte & ~shadow_dirty_mask;
1367 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1374 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1375 * set in mask, starting at gfn. The given memslot is expected to contain all
1376 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1377 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1378 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1380 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1381 struct kvm_memory_slot *slot,
1382 gfn_t gfn, unsigned long mask,
1385 struct kvm_mmu_page *root;
1387 lockdep_assert_held_write(&kvm->mmu_lock);
1388 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1389 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1393 * Clear leaf entries which could be replaced by large mappings, for
1394 * GFNs within the slot.
1396 static bool zap_collapsible_spte_range(struct kvm *kvm,
1397 struct kvm_mmu_page *root,
1398 const struct kvm_memory_slot *slot,
1401 gfn_t start = slot->base_gfn;
1402 gfn_t end = start + slot->npages;
1403 struct tdp_iter iter;
1408 tdp_root_for_each_pte(iter, root, start, end) {
1410 if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1415 if (!is_shadow_present_pte(iter.old_spte) ||
1416 !is_last_spte(iter.old_spte, iter.level))
1419 pfn = spte_to_pfn(iter.old_spte);
1420 if (kvm_is_reserved_pfn(pfn) ||
1421 iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1425 if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
1427 * The iter must explicitly re-read the SPTE because
1428 * the atomic cmpxchg failed.
1430 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1442 * Clear non-leaf entries (and free associated page tables) which could
1443 * be replaced by large mappings, for GFNs within the slot.
1445 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1446 const struct kvm_memory_slot *slot,
1449 struct kvm_mmu_page *root;
1451 lockdep_assert_held_read(&kvm->mmu_lock);
1453 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1454 flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1460 * Removes write access on the last level SPTE mapping this GFN and unsets the
1461 * MMU-writable bit to ensure future writes continue to be intercepted.
1462 * Returns true if an SPTE was set and a TLB flush is needed.
1464 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1467 struct tdp_iter iter;
1469 bool spte_set = false;
1473 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1474 if (!is_writable_pte(iter.old_spte))
1477 new_spte = iter.old_spte &
1478 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1480 tdp_mmu_set_spte(kvm, &iter, new_spte);
1490 * Removes write access on the last level SPTE mapping this GFN and unsets the
1491 * MMU-writable bit to ensure future writes continue to be intercepted.
1492 * Returns true if an SPTE was set and a TLB flush is needed.
1494 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1495 struct kvm_memory_slot *slot, gfn_t gfn)
1497 struct kvm_mmu_page *root;
1498 bool spte_set = false;
1500 lockdep_assert_held_write(&kvm->mmu_lock);
1501 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1502 spte_set |= write_protect_gfn(kvm, root, gfn);
1508 * Return the level of the lowest level SPTE added to sptes.
1509 * That SPTE may be non-present.
1511 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1514 struct tdp_iter iter;
1515 struct kvm_mmu *mmu = vcpu->arch.mmu;
1516 gfn_t gfn = addr >> PAGE_SHIFT;
1519 *root_level = vcpu->arch.mmu->shadow_root_level;
1523 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1525 sptes[leaf] = iter.old_spte;