1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
13 static bool __read_mostly tdp_mmu_enabled = false;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
19 if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
22 /* This should not be changed for the lifetime of the VM. */
23 kvm->arch.tdp_mmu_enabled = true;
25 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
32 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
36 lockdep_assert_held_read(&kvm->mmu_lock);
38 lockdep_assert_held_write(&kvm->mmu_lock);
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
43 if (!kvm->arch.tdp_mmu_enabled)
46 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
50 * Ensure that all the outstanding RCU callbacks to free shadow pages
51 * can run before the VM is torn down.
56 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
57 gfn_t start, gfn_t end, bool can_yield, bool flush,
60 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
62 free_page((unsigned long)sp->spt);
63 kmem_cache_free(mmu_page_header_cache, sp);
67 * This is called through call_rcu in order to free TDP page table memory
68 * safely with respect to other kernel threads that may be operating on
70 * By only accessing TDP MMU page table memory in an RCU read critical
71 * section, and freeing it after a grace period, lockless access to that
72 * memory won't use it after it is freed.
74 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
76 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
82 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
85 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
87 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
90 WARN_ON(!root->tdp_mmu_page);
92 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93 list_del_rcu(&root->link);
94 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
96 zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
98 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
102 * Finds the next valid root after root (or the first valid root if root
103 * is NULL), takes a reference on it, and returns that next root. If root
104 * is not NULL, this thread should have already taken a reference on it, and
105 * that reference will be dropped. If no valid root is found, this
106 * function will return NULL.
108 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
109 struct kvm_mmu_page *prev_root,
112 struct kvm_mmu_page *next_root;
117 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
119 typeof(*prev_root), link);
121 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
122 typeof(*next_root), link);
124 while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
125 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
126 &next_root->link, typeof(*next_root), link);
131 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
137 * Note: this iterator gets and puts references to the roots it iterates over.
138 * This makes it safe to release the MMU lock and yield within the loop, but
139 * if exiting the loop early, the caller must drop the reference to the most
140 * recent root. (Unless keeping a live reference is desirable.)
142 * If shared is set, this function is operating under the MMU lock in read
143 * mode. In the unlikely event that this thread must free a root, the lock
144 * will be temporarily dropped and reacquired in write mode.
146 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
147 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
149 _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
150 if (kvm_mmu_page_as_id(_root) != _as_id) { \
153 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
154 list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
155 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
156 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
157 if (kvm_mmu_page_as_id(_root) != _as_id) { \
160 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
163 union kvm_mmu_page_role role;
165 role = vcpu->arch.mmu->mmu_role.base;
168 role.gpte_is_8_bytes = true;
169 role.access = ACC_ALL;
174 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
177 struct kvm_mmu_page *sp;
179 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
180 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
181 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
183 sp->role.word = page_role_for_level(vcpu, level).word;
185 sp->tdp_mmu_page = true;
187 trace_kvm_mmu_get_page(sp, true);
192 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
194 union kvm_mmu_page_role role;
195 struct kvm *kvm = vcpu->kvm;
196 struct kvm_mmu_page *root;
198 lockdep_assert_held_write(&kvm->mmu_lock);
200 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
202 /* Check for an existing root before allocating a new one. */
203 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
204 if (root->role.word == role.word &&
205 kvm_tdp_mmu_get_root(kvm, root))
209 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
210 refcount_set(&root->tdp_mmu_root_count, 1);
212 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
213 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
214 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
217 return __pa(root->spt);
220 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
221 u64 old_spte, u64 new_spte, int level,
224 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
226 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
229 if (is_accessed_spte(old_spte) &&
230 (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
231 spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
232 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
235 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
236 u64 old_spte, u64 new_spte, int level)
239 struct kvm_memory_slot *slot;
241 if (level > PG_LEVEL_4K)
244 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
246 if ((!is_writable_pte(old_spte) || pfn_changed) &&
247 is_writable_pte(new_spte)) {
248 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
249 mark_page_dirty_in_slot(kvm, slot, gfn);
254 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
258 * @shared: This operation may not be running under the exclusive use of
259 * the MMU lock and the operation must synchronize with other
260 * threads that might be adding or removing pages.
261 * @account_nx: This page replaces a NX large page and should be marked for
264 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
265 bool shared, bool account_nx)
268 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
270 lockdep_assert_held_write(&kvm->mmu_lock);
272 list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
274 account_huge_nx_page(kvm, sp);
277 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
281 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
284 * @sp: the page to be removed
285 * @shared: This operation may not be running under the exclusive use of
286 * the MMU lock and the operation must synchronize with other
287 * threads that might be adding or removing pages.
289 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
293 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
295 lockdep_assert_held_write(&kvm->mmu_lock);
298 if (sp->lpage_disallowed)
299 unaccount_huge_nx_page(kvm, sp);
302 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
306 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
309 * @pt: the page removed from the paging structure
310 * @shared: This operation may not be running under the exclusive use
311 * of the MMU lock and the operation must synchronize with other
312 * threads that might be modifying SPTEs.
314 * Given a page table that has been removed from the TDP paging structure,
315 * iterates through the page table to clear SPTEs and free child page tables.
317 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
318 * protection. Since this thread removed it from the paging structure,
319 * this thread will be responsible for ensuring the page is freed. Hence the
320 * early rcu_dereferences in the function.
322 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
325 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
326 int level = sp->role.level;
327 gfn_t base_gfn = sp->gfn;
333 trace_kvm_mmu_prepare_zap_page(sp);
335 tdp_mmu_unlink_page(kvm, sp, shared);
337 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
338 sptep = rcu_dereference(pt) + i;
339 gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
343 * Set the SPTE to a nonpresent value that other
344 * threads will not overwrite. If the SPTE was
345 * already marked as removed then another thread
346 * handling a page fault could overwrite it, so
347 * set the SPTE until it is set from some other
348 * value to the removed SPTE value.
351 old_child_spte = xchg(sptep, REMOVED_SPTE);
352 if (!is_removed_spte(old_child_spte))
358 * If the SPTE is not MMU-present, there is no backing
359 * page associated with the SPTE and so no side effects
360 * that need to be recorded, and exclusive ownership of
361 * mmu_lock ensures the SPTE can't be made present.
362 * Note, zapping MMIO SPTEs is also unnecessary as they
363 * are guarded by the memslots generation, not by being
366 old_child_spte = READ_ONCE(*sptep);
367 if (!is_shadow_present_pte(old_child_spte))
371 * Marking the SPTE as a removed SPTE is not
372 * strictly necessary here as the MMU lock will
373 * stop other threads from concurrently modifying
374 * this SPTE. Using the removed SPTE value keeps
375 * the two branches consistent and simplifies
378 WRITE_ONCE(*sptep, REMOVED_SPTE);
380 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
381 old_child_spte, REMOVED_SPTE, level,
385 kvm_flush_remote_tlbs_with_address(kvm, gfn,
386 KVM_PAGES_PER_HPAGE(level + 1));
388 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
392 * __handle_changed_spte - handle bookkeeping associated with an SPTE change
394 * @as_id: the address space of the paging structure the SPTE was a part of
395 * @gfn: the base GFN that was mapped by the SPTE
396 * @old_spte: The value of the SPTE before the change
397 * @new_spte: The value of the SPTE after the change
398 * @level: the level of the PT the SPTE is part of in the paging structure
399 * @shared: This operation may not be running under the exclusive use of
400 * the MMU lock and the operation must synchronize with other
401 * threads that might be modifying SPTEs.
403 * Handle bookkeeping that might result from the modification of a SPTE.
404 * This function must be called for all TDP SPTE modifications.
406 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
407 u64 old_spte, u64 new_spte, int level,
410 bool was_present = is_shadow_present_pte(old_spte);
411 bool is_present = is_shadow_present_pte(new_spte);
412 bool was_leaf = was_present && is_last_spte(old_spte, level);
413 bool is_leaf = is_present && is_last_spte(new_spte, level);
414 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
416 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
417 WARN_ON(level < PG_LEVEL_4K);
418 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
421 * If this warning were to trigger it would indicate that there was a
422 * missing MMU notifier or a race with some notifier handler.
423 * A present, leaf SPTE should never be directly replaced with another
424 * present leaf SPTE pointing to a different PFN. A notifier handler
425 * should be zapping the SPTE before the main MM's page table is
426 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
427 * thread before replacement.
429 if (was_leaf && is_leaf && pfn_changed) {
430 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
431 "SPTE with another present leaf SPTE mapping a\n"
433 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
434 as_id, gfn, old_spte, new_spte, level);
437 * Crash the host to prevent error propagation and guest data
443 if (old_spte == new_spte)
446 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
448 if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
449 if (is_large_pte(old_spte))
450 atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
452 atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
456 * The only times a SPTE should be changed from a non-present to
457 * non-present state is when an MMIO entry is installed/modified/
458 * removed. In that case, there is nothing to do here.
460 if (!was_present && !is_present) {
462 * If this change does not involve a MMIO SPTE or removed SPTE,
463 * it is unexpected. Log the change, though it should not
464 * impact the guest since both the former and current SPTEs
467 if (WARN_ON(!is_mmio_spte(old_spte) &&
468 !is_mmio_spte(new_spte) &&
469 !is_removed_spte(new_spte)))
470 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
471 "should not be replaced with another,\n"
472 "different nonpresent SPTE, unless one or both\n"
473 "are MMIO SPTEs, or the new SPTE is\n"
474 "a temporary removed SPTE.\n"
475 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
476 as_id, gfn, old_spte, new_spte, level);
481 if (was_leaf && is_dirty_spte(old_spte) &&
482 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
483 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
486 * Recursively handle child PTs if the change removed a subtree from
487 * the paging structure.
489 if (was_present && !was_leaf && (pfn_changed || !is_present))
490 handle_removed_tdp_mmu_page(kvm,
491 spte_to_child_pt(old_spte, level), shared);
494 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
495 u64 old_spte, u64 new_spte, int level,
498 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
500 handle_changed_spte_acc_track(old_spte, new_spte, level);
501 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
506 * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
507 * and handle the associated bookkeeping, but do not mark the page dirty
508 * in KVM's dirty bitmaps.
511 * @iter: a tdp_iter instance currently on the SPTE that should be set
512 * @new_spte: The value the SPTE should be set to
513 * Returns: true if the SPTE was set, false if it was not. If false is returned,
514 * this function will have no side-effects.
516 static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
517 struct tdp_iter *iter,
520 lockdep_assert_held_read(&kvm->mmu_lock);
523 * Do not change removed SPTEs. Only the thread that froze the SPTE
526 if (is_removed_spte(iter->old_spte))
529 if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
530 new_spte) != iter->old_spte)
533 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
534 new_spte, iter->level, true);
535 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
540 static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
541 struct tdp_iter *iter,
544 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
547 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
548 iter->old_spte, new_spte, iter->level);
552 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
553 struct tdp_iter *iter)
556 * Freeze the SPTE by setting it to a special,
557 * non-present value. This will stop other threads from
558 * immediately installing a present entry in its place
559 * before the TLBs are flushed.
561 if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
564 kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
565 KVM_PAGES_PER_HPAGE(iter->level));
568 * No other thread can overwrite the removed SPTE as they
569 * must either wait on the MMU lock or use
570 * tdp_mmu_set_spte_atomic which will not overwrite the
571 * special removed SPTE value. No bookkeeping is needed
572 * here since the SPTE is going from non-present
575 WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
582 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
584 * @iter: a tdp_iter instance currently on the SPTE that should be set
585 * @new_spte: The value the SPTE should be set to
586 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
587 * of the page. Should be set unless handling an MMU
588 * notifier for access tracking. Leaving record_acc_track
589 * unset in that case prevents page accesses from being
591 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
592 * appropriate for the change being made. Should be set
593 * unless performing certain dirty logging operations.
594 * Leaving record_dirty_log unset in that case prevents page
595 * writes from being double counted.
597 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
598 u64 new_spte, bool record_acc_track,
599 bool record_dirty_log)
601 lockdep_assert_held_write(&kvm->mmu_lock);
604 * No thread should be using this function to set SPTEs to the
605 * temporary removed SPTE value.
606 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
607 * should be used. If operating under the MMU lock in write mode, the
608 * use of the removed SPTE should not be necessary.
610 WARN_ON(is_removed_spte(iter->old_spte));
612 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
614 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
615 new_spte, iter->level, false);
616 if (record_acc_track)
617 handle_changed_spte_acc_track(iter->old_spte, new_spte,
619 if (record_dirty_log)
620 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
621 iter->old_spte, new_spte,
625 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
628 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
631 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
632 struct tdp_iter *iter,
635 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
638 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
639 struct tdp_iter *iter,
642 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
645 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
646 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
648 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
649 tdp_root_for_each_pte(_iter, _root, _start, _end) \
650 if (!is_shadow_present_pte(_iter.old_spte) || \
651 !is_last_spte(_iter.old_spte, _iter.level)) \
655 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
656 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
657 _mmu->shadow_root_level, _start, _end)
660 * Yield if the MMU lock is contended or this thread needs to return control
663 * If this function should yield and flush is set, it will perform a remote
664 * TLB flush before yielding.
666 * If this function yields, it will also reset the tdp_iter's walk over the
667 * paging structure and the calling function should skip to the next
668 * iteration to allow the iterator to continue its traversal from the
669 * paging structure root.
671 * Return true if this function yielded and the iterator's traversal was reset.
672 * Return false if a yield was not needed.
674 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
675 struct tdp_iter *iter, bool flush,
678 /* Ensure forward progress has been made before yielding. */
679 if (iter->next_last_level_gfn == iter->yielded_gfn)
682 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
686 kvm_flush_remote_tlbs(kvm);
689 cond_resched_rwlock_read(&kvm->mmu_lock);
691 cond_resched_rwlock_write(&kvm->mmu_lock);
695 WARN_ON(iter->gfn > iter->next_last_level_gfn);
697 tdp_iter_restart(iter);
706 * Tears down the mappings for the range of gfns, [start, end), and frees the
707 * non-root pages mapping GFNs strictly within that range. Returns true if
708 * SPTEs have been cleared and a TLB flush is needed before releasing the
711 * If can_yield is true, will release the MMU lock and reschedule if the
712 * scheduler needs the CPU or there is contention on the MMU lock. If this
713 * function cannot yield, it will not release the MMU lock or reschedule and
714 * the caller must ensure it does not supply too large a GFN range, or the
715 * operation can cause a soft lockup.
717 * If shared is true, this thread holds the MMU lock in read mode and must
718 * account for the possibility that other threads are modifying the paging
719 * structures concurrently. If shared is false, this thread should hold the
720 * MMU lock in write mode.
722 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
723 gfn_t start, gfn_t end, bool can_yield, bool flush,
726 gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
727 bool zap_all = (start == 0 && end >= max_gfn_host);
728 struct tdp_iter iter;
731 * No need to try to step down in the iterator when zapping all SPTEs,
732 * zapping the top-level non-leaf SPTEs will recurse on their children.
734 int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
737 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
738 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
739 * and so KVM will never install a SPTE for such addresses.
741 end = min(end, max_gfn_host);
743 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
747 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
748 min_level, start, end) {
751 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
756 if (!is_shadow_present_pte(iter.old_spte))
760 * If this is a non-last-level SPTE that covers a larger range
761 * than should be zapped, continue, and zap the mappings at a
762 * lower level, except when zapping all SPTEs.
766 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
767 !is_last_spte(iter.old_spte, iter.level))
771 tdp_mmu_set_spte(kvm, &iter, 0);
773 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
775 * The iter must explicitly re-read the SPTE because
776 * the atomic cmpxchg failed.
778 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
788 * Tears down the mappings for the range of gfns, [start, end), and frees the
789 * non-root pages mapping GFNs strictly within that range. Returns true if
790 * SPTEs have been cleared and a TLB flush is needed before releasing the
793 * If shared is true, this thread holds the MMU lock in read mode and must
794 * account for the possibility that other threads are modifying the paging
795 * structures concurrently. If shared is false, this thread should hold the
798 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
799 gfn_t end, bool can_yield, bool flush,
802 struct kvm_mmu_page *root;
804 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
805 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
811 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
816 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
817 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
821 kvm_flush_remote_tlbs(kvm);
824 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
825 struct kvm_mmu_page *prev_root)
827 struct kvm_mmu_page *next_root;
830 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
832 typeof(*prev_root), link);
834 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
835 typeof(*next_root), link);
837 while (next_root && !(next_root->role.invalid &&
838 refcount_read(&next_root->tdp_mmu_root_count)))
839 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
841 typeof(*next_root), link);
847 * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
848 * invalidated root, they will not be freed until this function drops the
849 * reference. Before dropping that reference, tear down the paging
850 * structure so that whichever thread does drop the last reference
851 * only has to do a trivial amount of work. Since the roots are invalid,
852 * no new SPTEs should be created under them.
854 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
856 struct kvm_mmu_page *next_root;
857 struct kvm_mmu_page *root;
860 lockdep_assert_held_read(&kvm->mmu_lock);
864 root = next_invalidated_root(kvm, NULL);
867 next_root = next_invalidated_root(kvm, root);
871 flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
874 * Put the reference acquired in
875 * kvm_tdp_mmu_invalidate_roots
877 kvm_tdp_mmu_put_root(kvm, root, true);
887 kvm_flush_remote_tlbs(kvm);
891 * Mark each TDP MMU root as invalid so that other threads
892 * will drop their references and allow the root count to
895 * Also take a reference on all roots so that this thread
896 * can do the bulk of the work required to free the roots
897 * once they are invalidated. Without this reference, a
898 * vCPU thread might drop the last reference to a root and
899 * get stuck with tearing down the entire paging structure.
901 * Roots which have a zero refcount should be skipped as
902 * they're already being torn down.
903 * Already invalid roots should be referenced again so that
904 * they aren't freed before kvm_tdp_mmu_zap_all_fast is
907 * This has essentially the same effect for the TDP MMU
908 * as updating mmu_valid_gen does for the shadow MMU.
910 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
912 struct kvm_mmu_page *root;
914 lockdep_assert_held_write(&kvm->mmu_lock);
915 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
916 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
917 root->role.invalid = true;
921 * Installs a last-level SPTE to handle a TDP page fault.
922 * (NPT/EPT violation/misconfiguration)
924 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
926 struct tdp_iter *iter,
927 kvm_pfn_t pfn, bool prefault)
930 int ret = RET_PF_FIXED;
931 int make_spte_ret = 0;
933 if (unlikely(is_noslot_pfn(pfn)))
934 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
936 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
937 pfn, iter->old_spte, prefault, true,
938 map_writable, !shadow_accessed_mask,
941 if (new_spte == iter->old_spte)
942 ret = RET_PF_SPURIOUS;
943 else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
947 * If the page fault was caused by a write but the page is write
948 * protected, emulation is needed. If the emulation was skipped,
949 * the vCPU would have the same fault again.
951 if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
953 ret = RET_PF_EMULATE;
954 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
957 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
958 if (unlikely(is_mmio_spte(new_spte))) {
959 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
961 ret = RET_PF_EMULATE;
963 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
964 rcu_dereference(iter->sptep));
968 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
969 * consistent with legacy MMU behavior.
971 if (ret != RET_PF_SPURIOUS)
972 vcpu->stat.pf_fixed++;
978 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
979 * page tables and SPTEs to translate the faulting guest physical address.
981 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
982 int map_writable, int max_level, kvm_pfn_t pfn,
985 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
986 bool write = error_code & PFERR_WRITE_MASK;
987 bool exec = error_code & PFERR_FETCH_MASK;
988 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
989 struct kvm_mmu *mmu = vcpu->arch.mmu;
990 struct tdp_iter iter;
991 struct kvm_mmu_page *sp;
995 gfn_t gfn = gpa >> PAGE_SHIFT;
999 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
1000 huge_page_disallowed, &req_level);
1002 trace_kvm_mmu_spte_requested(gpa, level, pfn);
1006 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1007 if (nx_huge_page_workaround_enabled)
1008 disallowed_hugepage_adjust(iter.old_spte, gfn,
1009 iter.level, &pfn, &level);
1011 if (iter.level == level)
1015 * If there is an SPTE mapping a large page at a higher level
1016 * than the target, that SPTE must be cleared and replaced
1017 * with a non-leaf SPTE.
1019 if (is_shadow_present_pte(iter.old_spte) &&
1020 is_large_pte(iter.old_spte)) {
1021 if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1025 * The iter must explicitly re-read the spte here
1026 * because the new value informs the !present
1029 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1032 if (!is_shadow_present_pte(iter.old_spte)) {
1034 * If SPTE has been frozen by another thread, just
1035 * give up and retry, avoiding unnecessary page table
1036 * allocation and free.
1038 if (is_removed_spte(iter.old_spte))
1041 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
1044 new_spte = make_nonleaf_spte(child_pt,
1045 !shadow_accessed_mask);
1047 if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
1049 tdp_mmu_link_page(vcpu->kvm, sp, true,
1050 huge_page_disallowed &&
1051 req_level >= iter.level);
1053 trace_kvm_mmu_get_page(sp, true);
1055 tdp_mmu_free_sp(sp);
1061 if (iter.level != level) {
1063 return RET_PF_RETRY;
1066 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
1073 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1076 struct kvm_mmu_page *root;
1078 for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
1079 flush |= zap_gfn_range(kvm, root, range->start, range->end,
1080 range->may_block, flush, false);
1085 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1086 struct kvm_gfn_range *range);
1088 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1089 struct kvm_gfn_range *range,
1090 tdp_handler_t handler)
1092 struct kvm_mmu_page *root;
1093 struct tdp_iter iter;
1099 * Don't support rescheduling, none of the MMU notifiers that funnel
1100 * into this helper allow blocking; it'd be dead, wasteful code.
1102 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1103 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1104 ret |= handler(kvm, &iter, range);
1113 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1114 * if any of the GFNs in the range have been accessed.
1116 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1117 struct kvm_gfn_range *range)
1121 /* If we have a non-accessed entry we don't need to change the pte. */
1122 if (!is_accessed_spte(iter->old_spte))
1125 new_spte = iter->old_spte;
1127 if (spte_ad_enabled(new_spte)) {
1128 new_spte &= ~shadow_accessed_mask;
1131 * Capture the dirty status of the page, so that it doesn't get
1132 * lost when the SPTE is marked for access tracking.
1134 if (is_writable_pte(new_spte))
1135 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1137 new_spte = mark_spte_for_access_track(new_spte);
1140 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1145 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1147 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1150 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1151 struct kvm_gfn_range *range)
1153 return is_accessed_spte(iter->old_spte);
1156 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1158 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1161 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1162 struct kvm_gfn_range *range)
1166 /* Huge pages aren't expected to be modified without first being zapped. */
1167 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1169 if (iter->level != PG_LEVEL_4K ||
1170 !is_shadow_present_pte(iter->old_spte))
1174 * Note, when changing a read-only SPTE, it's not strictly necessary to
1175 * zero the SPTE before setting the new PFN, but doing so preserves the
1176 * invariant that the PFN of a present * leaf SPTE can never change.
1177 * See __handle_changed_spte().
1179 tdp_mmu_set_spte(kvm, iter, 0);
1181 if (!pte_write(range->pte)) {
1182 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1183 pte_pfn(range->pte));
1185 tdp_mmu_set_spte(kvm, iter, new_spte);
1192 * Handle the changed_pte MMU notifier for the TDP MMU.
1193 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1195 * Returns non-zero if a flush is needed before releasing the MMU lock.
1197 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1199 bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1201 /* FIXME: return 'flush' instead of flushing here. */
1203 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1209 * Remove write access from all SPTEs at or above min_level that map GFNs
1210 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1213 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1214 gfn_t start, gfn_t end, int min_level)
1216 struct tdp_iter iter;
1218 bool spte_set = false;
1222 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1224 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1225 min_level, start, end) {
1227 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1230 if (!is_shadow_present_pte(iter.old_spte) ||
1231 !is_last_spte(iter.old_spte, iter.level) ||
1232 !(iter.old_spte & PT_WRITABLE_MASK))
1235 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1237 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1240 * The iter must explicitly re-read the SPTE because
1241 * the atomic cmpxchg failed.
1243 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1254 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1255 * only affect leaf SPTEs down to min_level.
1256 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1258 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1261 struct kvm_mmu_page *root;
1262 bool spte_set = false;
1264 lockdep_assert_held_read(&kvm->mmu_lock);
1266 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1267 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1268 slot->base_gfn + slot->npages, min_level);
1274 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1275 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1276 * If AD bits are not enabled, this will require clearing the writable bit on
1277 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1280 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1281 gfn_t start, gfn_t end)
1283 struct tdp_iter iter;
1285 bool spte_set = false;
1289 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1291 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1294 if (spte_ad_need_write_protect(iter.old_spte)) {
1295 if (is_writable_pte(iter.old_spte))
1296 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1300 if (iter.old_spte & shadow_dirty_mask)
1301 new_spte = iter.old_spte & ~shadow_dirty_mask;
1306 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1309 * The iter must explicitly re-read the SPTE because
1310 * the atomic cmpxchg failed.
1312 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1323 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1324 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1325 * If AD bits are not enabled, this will require clearing the writable bit on
1326 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1329 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1331 struct kvm_mmu_page *root;
1332 bool spte_set = false;
1334 lockdep_assert_held_read(&kvm->mmu_lock);
1336 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1337 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1338 slot->base_gfn + slot->npages);
1344 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1345 * set in mask, starting at gfn. The given memslot is expected to contain all
1346 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1347 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1348 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1350 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1351 gfn_t gfn, unsigned long mask, bool wrprot)
1353 struct tdp_iter iter;
1358 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1359 gfn + BITS_PER_LONG) {
1363 if (iter.level > PG_LEVEL_4K ||
1364 !(mask & (1UL << (iter.gfn - gfn))))
1367 mask &= ~(1UL << (iter.gfn - gfn));
1369 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1370 if (is_writable_pte(iter.old_spte))
1371 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1375 if (iter.old_spte & shadow_dirty_mask)
1376 new_spte = iter.old_spte & ~shadow_dirty_mask;
1381 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1388 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1389 * set in mask, starting at gfn. The given memslot is expected to contain all
1390 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1391 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1392 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1394 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1395 struct kvm_memory_slot *slot,
1396 gfn_t gfn, unsigned long mask,
1399 struct kvm_mmu_page *root;
1401 lockdep_assert_held_write(&kvm->mmu_lock);
1402 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1403 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1407 * Clear leaf entries which could be replaced by large mappings, for
1408 * GFNs within the slot.
1410 static bool zap_collapsible_spte_range(struct kvm *kvm,
1411 struct kvm_mmu_page *root,
1412 const struct kvm_memory_slot *slot,
1415 gfn_t start = slot->base_gfn;
1416 gfn_t end = start + slot->npages;
1417 struct tdp_iter iter;
1422 tdp_root_for_each_pte(iter, root, start, end) {
1424 if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1429 if (!is_shadow_present_pte(iter.old_spte) ||
1430 !is_last_spte(iter.old_spte, iter.level))
1433 pfn = spte_to_pfn(iter.old_spte);
1434 if (kvm_is_reserved_pfn(pfn) ||
1435 iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1439 if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
1441 * The iter must explicitly re-read the SPTE because
1442 * the atomic cmpxchg failed.
1444 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1456 * Clear non-leaf entries (and free associated page tables) which could
1457 * be replaced by large mappings, for GFNs within the slot.
1459 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1460 const struct kvm_memory_slot *slot,
1463 struct kvm_mmu_page *root;
1465 lockdep_assert_held_read(&kvm->mmu_lock);
1467 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1468 flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1474 * Removes write access on the last level SPTE mapping this GFN and unsets the
1475 * MMU-writable bit to ensure future writes continue to be intercepted.
1476 * Returns true if an SPTE was set and a TLB flush is needed.
1478 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1479 gfn_t gfn, int min_level)
1481 struct tdp_iter iter;
1483 bool spte_set = false;
1485 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1489 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1490 min_level, gfn, gfn + 1) {
1491 if (!is_shadow_present_pte(iter.old_spte) ||
1492 !is_last_spte(iter.old_spte, iter.level))
1495 if (!is_writable_pte(iter.old_spte))
1498 new_spte = iter.old_spte &
1499 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1501 tdp_mmu_set_spte(kvm, &iter, new_spte);
1511 * Removes write access on the last level SPTE mapping this GFN and unsets the
1512 * MMU-writable bit to ensure future writes continue to be intercepted.
1513 * Returns true if an SPTE was set and a TLB flush is needed.
1515 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1516 struct kvm_memory_slot *slot, gfn_t gfn,
1519 struct kvm_mmu_page *root;
1520 bool spte_set = false;
1522 lockdep_assert_held_write(&kvm->mmu_lock);
1523 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1524 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1530 * Return the level of the lowest level SPTE added to sptes.
1531 * That SPTE may be non-present.
1533 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1536 struct tdp_iter iter;
1537 struct kvm_mmu *mmu = vcpu->arch.mmu;
1538 gfn_t gfn = addr >> PAGE_SHIFT;
1541 *root_level = vcpu->arch.mmu->shadow_root_level;
1545 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1547 sptes[leaf] = iter.old_spte;