1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
13 static bool __read_mostly tdp_mmu_enabled = true;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
19 if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
22 /* This should not be changed for the lifetime of the VM. */
23 kvm->arch.tdp_mmu_enabled = true;
25 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
32 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
36 lockdep_assert_held_read(&kvm->mmu_lock);
38 lockdep_assert_held_write(&kvm->mmu_lock);
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
43 if (!kvm->arch.tdp_mmu_enabled)
46 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
50 * Ensure that all the outstanding RCU callbacks to free shadow pages
51 * can run before the VM is torn down.
56 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
57 gfn_t start, gfn_t end, bool can_yield, bool flush,
60 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
62 free_page((unsigned long)sp->spt);
63 kmem_cache_free(mmu_page_header_cache, sp);
67 * This is called through call_rcu in order to free TDP page table memory
68 * safely with respect to other kernel threads that may be operating on
70 * By only accessing TDP MMU page table memory in an RCU read critical
71 * section, and freeing it after a grace period, lockless access to that
72 * memory won't use it after it is freed.
74 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
76 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
82 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
85 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
87 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
90 WARN_ON(!root->tdp_mmu_page);
92 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93 list_del_rcu(&root->link);
94 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
96 zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
98 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
102 * Returns the next root after @prev_root (or the first root if @prev_root is
103 * NULL). A reference to the returned root is acquired, and the reference to
104 * @prev_root is released (the caller obviously must hold a reference to
105 * @prev_root if it's non-NULL).
107 * If @only_valid is true, invalid roots are skipped.
109 * Returns NULL if the end of tdp_mmu_roots was reached.
111 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
112 struct kvm_mmu_page *prev_root,
113 bool shared, bool only_valid)
115 struct kvm_mmu_page *next_root;
120 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
122 typeof(*prev_root), link);
124 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125 typeof(*next_root), link);
128 if ((!only_valid || !next_root->role.invalid) &&
129 kvm_tdp_mmu_get_root(next_root))
132 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
133 &next_root->link, typeof(*next_root), link);
139 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
145 * Note: this iterator gets and puts references to the roots it iterates over.
146 * This makes it safe to release the MMU lock and yield within the loop, but
147 * if exiting the loop early, the caller must drop the reference to the most
148 * recent root. (Unless keeping a live reference is desirable.)
150 * If shared is set, this function is operating under the MMU lock in read
151 * mode. In the unlikely event that this thread must free a root, the lock
152 * will be temporarily dropped and reacquired in write mode.
154 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
155 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
157 _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
158 if (kvm_mmu_page_as_id(_root) != _as_id) { \
161 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
162 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
164 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
165 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false)
167 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
168 list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
169 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
170 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
171 if (kvm_mmu_page_as_id(_root) != _as_id) { \
174 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
177 union kvm_mmu_page_role role;
179 role = vcpu->arch.mmu->mmu_role.base;
182 role.has_4_byte_gpte = false;
183 role.access = ACC_ALL;
184 role.ad_disabled = !shadow_accessed_mask;
189 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
192 struct kvm_mmu_page *sp;
194 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
195 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
196 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
198 sp->role.word = page_role_for_level(vcpu, level).word;
200 sp->tdp_mmu_page = true;
202 trace_kvm_mmu_get_page(sp, true);
207 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
209 union kvm_mmu_page_role role;
210 struct kvm *kvm = vcpu->kvm;
211 struct kvm_mmu_page *root;
213 lockdep_assert_held_write(&kvm->mmu_lock);
215 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
218 * Check for an existing root before allocating a new one. Note, the
219 * role check prevents consuming an invalid root.
221 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
222 if (root->role.word == role.word &&
223 kvm_tdp_mmu_get_root(root))
227 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
228 refcount_set(&root->tdp_mmu_root_count, 1);
230 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
231 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
232 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
235 return __pa(root->spt);
238 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
239 u64 old_spte, u64 new_spte, int level,
242 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
244 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
247 if (is_accessed_spte(old_spte) &&
248 (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
249 spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
250 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
253 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
254 u64 old_spte, u64 new_spte, int level)
257 struct kvm_memory_slot *slot;
259 if (level > PG_LEVEL_4K)
262 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
264 if ((!is_writable_pte(old_spte) || pfn_changed) &&
265 is_writable_pte(new_spte)) {
266 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
267 mark_page_dirty_in_slot(kvm, slot, gfn);
272 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
276 * @account_nx: This page replaces a NX large page and should be marked for
279 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
282 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
283 list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
285 account_huge_nx_page(kvm, sp);
286 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
290 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
293 * @sp: the page to be removed
294 * @shared: This operation may not be running under the exclusive use of
295 * the MMU lock and the operation must synchronize with other
296 * threads that might be adding or removing pages.
298 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
302 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
304 lockdep_assert_held_write(&kvm->mmu_lock);
307 if (sp->lpage_disallowed)
308 unaccount_huge_nx_page(kvm, sp);
311 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
315 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
318 * @pt: the page removed from the paging structure
319 * @shared: This operation may not be running under the exclusive use
320 * of the MMU lock and the operation must synchronize with other
321 * threads that might be modifying SPTEs.
323 * Given a page table that has been removed from the TDP paging structure,
324 * iterates through the page table to clear SPTEs and free child page tables.
326 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
327 * protection. Since this thread removed it from the paging structure,
328 * this thread will be responsible for ensuring the page is freed. Hence the
329 * early rcu_dereferences in the function.
331 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
334 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
335 int level = sp->role.level;
336 gfn_t base_gfn = sp->gfn;
339 trace_kvm_mmu_prepare_zap_page(sp);
341 tdp_mmu_unlink_page(kvm, sp, shared);
343 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
344 u64 *sptep = rcu_dereference(pt) + i;
345 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
350 * Set the SPTE to a nonpresent value that other
351 * threads will not overwrite. If the SPTE was
352 * already marked as removed then another thread
353 * handling a page fault could overwrite it, so
354 * set the SPTE until it is set from some other
355 * value to the removed SPTE value.
358 old_child_spte = xchg(sptep, REMOVED_SPTE);
359 if (!is_removed_spte(old_child_spte))
365 * If the SPTE is not MMU-present, there is no backing
366 * page associated with the SPTE and so no side effects
367 * that need to be recorded, and exclusive ownership of
368 * mmu_lock ensures the SPTE can't be made present.
369 * Note, zapping MMIO SPTEs is also unnecessary as they
370 * are guarded by the memslots generation, not by being
373 old_child_spte = READ_ONCE(*sptep);
374 if (!is_shadow_present_pte(old_child_spte))
378 * Marking the SPTE as a removed SPTE is not
379 * strictly necessary here as the MMU lock will
380 * stop other threads from concurrently modifying
381 * this SPTE. Using the removed SPTE value keeps
382 * the two branches consistent and simplifies
385 WRITE_ONCE(*sptep, REMOVED_SPTE);
387 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
388 old_child_spte, REMOVED_SPTE, level,
392 kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
393 KVM_PAGES_PER_HPAGE(level + 1));
395 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
399 * __handle_changed_spte - handle bookkeeping associated with an SPTE change
401 * @as_id: the address space of the paging structure the SPTE was a part of
402 * @gfn: the base GFN that was mapped by the SPTE
403 * @old_spte: The value of the SPTE before the change
404 * @new_spte: The value of the SPTE after the change
405 * @level: the level of the PT the SPTE is part of in the paging structure
406 * @shared: This operation may not be running under the exclusive use of
407 * the MMU lock and the operation must synchronize with other
408 * threads that might be modifying SPTEs.
410 * Handle bookkeeping that might result from the modification of a SPTE.
411 * This function must be called for all TDP SPTE modifications.
413 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
414 u64 old_spte, u64 new_spte, int level,
417 bool was_present = is_shadow_present_pte(old_spte);
418 bool is_present = is_shadow_present_pte(new_spte);
419 bool was_leaf = was_present && is_last_spte(old_spte, level);
420 bool is_leaf = is_present && is_last_spte(new_spte, level);
421 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
423 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
424 WARN_ON(level < PG_LEVEL_4K);
425 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
428 * If this warning were to trigger it would indicate that there was a
429 * missing MMU notifier or a race with some notifier handler.
430 * A present, leaf SPTE should never be directly replaced with another
431 * present leaf SPTE pointing to a different PFN. A notifier handler
432 * should be zapping the SPTE before the main MM's page table is
433 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
434 * thread before replacement.
436 if (was_leaf && is_leaf && pfn_changed) {
437 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
438 "SPTE with another present leaf SPTE mapping a\n"
440 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
441 as_id, gfn, old_spte, new_spte, level);
444 * Crash the host to prevent error propagation and guest data
450 if (old_spte == new_spte)
453 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
456 check_spte_writable_invariants(new_spte);
459 * The only times a SPTE should be changed from a non-present to
460 * non-present state is when an MMIO entry is installed/modified/
461 * removed. In that case, there is nothing to do here.
463 if (!was_present && !is_present) {
465 * If this change does not involve a MMIO SPTE or removed SPTE,
466 * it is unexpected. Log the change, though it should not
467 * impact the guest since both the former and current SPTEs
470 if (WARN_ON(!is_mmio_spte(old_spte) &&
471 !is_mmio_spte(new_spte) &&
472 !is_removed_spte(new_spte)))
473 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
474 "should not be replaced with another,\n"
475 "different nonpresent SPTE, unless one or both\n"
476 "are MMIO SPTEs, or the new SPTE is\n"
477 "a temporary removed SPTE.\n"
478 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
479 as_id, gfn, old_spte, new_spte, level);
483 if (is_leaf != was_leaf)
484 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
486 if (was_leaf && is_dirty_spte(old_spte) &&
487 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
488 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
491 * Recursively handle child PTs if the change removed a subtree from
492 * the paging structure.
494 if (was_present && !was_leaf && (pfn_changed || !is_present))
495 handle_removed_tdp_mmu_page(kvm,
496 spte_to_child_pt(old_spte, level), shared);
499 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
500 u64 old_spte, u64 new_spte, int level,
503 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
505 handle_changed_spte_acc_track(old_spte, new_spte, level);
506 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
511 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
512 * and handle the associated bookkeeping. Do not mark the page dirty
513 * in KVM's dirty bitmaps.
515 * If setting the SPTE fails because it has changed, iter->old_spte will be
516 * refreshed to the current value of the spte.
519 * @iter: a tdp_iter instance currently on the SPTE that should be set
520 * @new_spte: The value the SPTE should be set to
521 * Returns: true if the SPTE was set, false if it was not. If false is returned,
522 * this function will have no side-effects other than setting
523 * iter->old_spte to the last known value of spte.
525 static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
526 struct tdp_iter *iter,
529 u64 *sptep = rcu_dereference(iter->sptep);
532 WARN_ON_ONCE(iter->yielded);
534 lockdep_assert_held_read(&kvm->mmu_lock);
537 * Do not change removed SPTEs. Only the thread that froze the SPTE
540 if (is_removed_spte(iter->old_spte))
544 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
545 * does not hold the mmu_lock.
547 old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
548 if (old_spte != iter->old_spte) {
550 * The page table entry was modified by a different logical
551 * CPU. Refresh iter->old_spte with the current value so the
552 * caller operates on fresh data, e.g. if it retries
553 * tdp_mmu_set_spte_atomic().
555 iter->old_spte = old_spte;
559 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
560 new_spte, iter->level, true);
561 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
566 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
567 struct tdp_iter *iter)
570 * Freeze the SPTE by setting it to a special,
571 * non-present value. This will stop other threads from
572 * immediately installing a present entry in its place
573 * before the TLBs are flushed.
575 if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
578 kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
579 KVM_PAGES_PER_HPAGE(iter->level));
582 * No other thread can overwrite the removed SPTE as they
583 * must either wait on the MMU lock or use
584 * tdp_mmu_set_spte_atomic which will not overwrite the
585 * special removed SPTE value. No bookkeeping is needed
586 * here since the SPTE is going from non-present
589 WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
596 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
598 * @iter: a tdp_iter instance currently on the SPTE that should be set
599 * @new_spte: The value the SPTE should be set to
600 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
601 * of the page. Should be set unless handling an MMU
602 * notifier for access tracking. Leaving record_acc_track
603 * unset in that case prevents page accesses from being
605 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
606 * appropriate for the change being made. Should be set
607 * unless performing certain dirty logging operations.
608 * Leaving record_dirty_log unset in that case prevents page
609 * writes from being double counted.
611 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
612 u64 new_spte, bool record_acc_track,
613 bool record_dirty_log)
615 WARN_ON_ONCE(iter->yielded);
617 lockdep_assert_held_write(&kvm->mmu_lock);
620 * No thread should be using this function to set SPTEs to the
621 * temporary removed SPTE value.
622 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
623 * should be used. If operating under the MMU lock in write mode, the
624 * use of the removed SPTE should not be necessary.
626 WARN_ON(is_removed_spte(iter->old_spte));
628 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
630 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
631 new_spte, iter->level, false);
632 if (record_acc_track)
633 handle_changed_spte_acc_track(iter->old_spte, new_spte,
635 if (record_dirty_log)
636 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
637 iter->old_spte, new_spte,
641 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
644 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
647 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
648 struct tdp_iter *iter,
651 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
654 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
655 struct tdp_iter *iter,
658 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
661 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
662 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
664 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
665 tdp_root_for_each_pte(_iter, _root, _start, _end) \
666 if (!is_shadow_present_pte(_iter.old_spte) || \
667 !is_last_spte(_iter.old_spte, _iter.level)) \
671 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
672 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
673 _mmu->shadow_root_level, _start, _end)
676 * Yield if the MMU lock is contended or this thread needs to return control
679 * If this function should yield and flush is set, it will perform a remote
680 * TLB flush before yielding.
682 * If this function yields, iter->yielded is set and the caller must skip to
683 * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
684 * over the paging structures to allow the iterator to continue its traversal
685 * from the paging structure root.
687 * Returns true if this function yielded.
689 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
690 struct tdp_iter *iter,
691 bool flush, bool shared)
693 WARN_ON(iter->yielded);
695 /* Ensure forward progress has been made before yielding. */
696 if (iter->next_last_level_gfn == iter->yielded_gfn)
699 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
703 kvm_flush_remote_tlbs(kvm);
706 cond_resched_rwlock_read(&kvm->mmu_lock);
708 cond_resched_rwlock_write(&kvm->mmu_lock);
712 WARN_ON(iter->gfn > iter->next_last_level_gfn);
714 iter->yielded = true;
717 return iter->yielded;
721 * Tears down the mappings for the range of gfns, [start, end), and frees the
722 * non-root pages mapping GFNs strictly within that range. Returns true if
723 * SPTEs have been cleared and a TLB flush is needed before releasing the
726 * If can_yield is true, will release the MMU lock and reschedule if the
727 * scheduler needs the CPU or there is contention on the MMU lock. If this
728 * function cannot yield, it will not release the MMU lock or reschedule and
729 * the caller must ensure it does not supply too large a GFN range, or the
730 * operation can cause a soft lockup.
732 * If shared is true, this thread holds the MMU lock in read mode and must
733 * account for the possibility that other threads are modifying the paging
734 * structures concurrently. If shared is false, this thread should hold the
735 * MMU lock in write mode.
737 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
738 gfn_t start, gfn_t end, bool can_yield, bool flush,
741 gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
742 bool zap_all = (start == 0 && end >= max_gfn_host);
743 struct tdp_iter iter;
746 * No need to try to step down in the iterator when zapping all SPTEs,
747 * zapping the top-level non-leaf SPTEs will recurse on their children.
749 int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
752 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
753 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
754 * and so KVM will never install a SPTE for such addresses.
756 end = min(end, max_gfn_host);
758 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
762 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
763 min_level, start, end) {
766 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
771 if (!is_shadow_present_pte(iter.old_spte))
775 * If this is a non-last-level SPTE that covers a larger range
776 * than should be zapped, continue, and zap the mappings at a
777 * lower level, except when zapping all SPTEs.
781 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
782 !is_last_spte(iter.old_spte, iter.level))
786 tdp_mmu_set_spte(kvm, &iter, 0);
788 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
798 * Tears down the mappings for the range of gfns, [start, end), and frees the
799 * non-root pages mapping GFNs strictly within that range. Returns true if
800 * SPTEs have been cleared and a TLB flush is needed before releasing the
803 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
804 gfn_t end, bool can_yield, bool flush)
806 struct kvm_mmu_page *root;
808 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
809 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
815 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
820 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
821 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
824 kvm_flush_remote_tlbs(kvm);
827 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
828 struct kvm_mmu_page *prev_root)
830 struct kvm_mmu_page *next_root;
833 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
835 typeof(*prev_root), link);
837 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
838 typeof(*next_root), link);
840 while (next_root && !(next_root->role.invalid &&
841 refcount_read(&next_root->tdp_mmu_root_count)))
842 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
844 typeof(*next_root), link);
850 * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
851 * invalidated root, they will not be freed until this function drops the
852 * reference. Before dropping that reference, tear down the paging
853 * structure so that whichever thread does drop the last reference
854 * only has to do a trivial amount of work. Since the roots are invalid,
855 * no new SPTEs should be created under them.
857 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
859 struct kvm_mmu_page *next_root;
860 struct kvm_mmu_page *root;
863 lockdep_assert_held_read(&kvm->mmu_lock);
867 root = next_invalidated_root(kvm, NULL);
870 next_root = next_invalidated_root(kvm, root);
874 flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
877 * Put the reference acquired in
878 * kvm_tdp_mmu_invalidate_roots
880 kvm_tdp_mmu_put_root(kvm, root, true);
890 kvm_flush_remote_tlbs(kvm);
894 * Mark each TDP MMU root as invalid so that other threads
895 * will drop their references and allow the root count to
898 * Also take a reference on all roots so that this thread
899 * can do the bulk of the work required to free the roots
900 * once they are invalidated. Without this reference, a
901 * vCPU thread might drop the last reference to a root and
902 * get stuck with tearing down the entire paging structure.
904 * Roots which have a zero refcount should be skipped as
905 * they're already being torn down.
906 * Already invalid roots should be referenced again so that
907 * they aren't freed before kvm_tdp_mmu_zap_all_fast is
910 * This has essentially the same effect for the TDP MMU
911 * as updating mmu_valid_gen does for the shadow MMU.
913 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
915 struct kvm_mmu_page *root;
917 lockdep_assert_held_write(&kvm->mmu_lock);
918 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
919 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
920 root->role.invalid = true;
924 * Installs a last-level SPTE to handle a TDP page fault.
925 * (NPT/EPT violation/misconfiguration)
927 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
928 struct kvm_page_fault *fault,
929 struct tdp_iter *iter)
931 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
933 int ret = RET_PF_FIXED;
936 WARN_ON(sp->role.level != fault->goal_level);
937 if (unlikely(!fault->slot))
938 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
940 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
941 fault->pfn, iter->old_spte, fault->prefetch, true,
942 fault->map_writable, &new_spte);
944 if (new_spte == iter->old_spte)
945 ret = RET_PF_SPURIOUS;
946 else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
950 * If the page fault was caused by a write but the page is write
951 * protected, emulation is needed. If the emulation was skipped,
952 * the vCPU would have the same fault again.
956 ret = RET_PF_EMULATE;
959 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
960 if (unlikely(is_mmio_spte(new_spte))) {
961 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
963 ret = RET_PF_EMULATE;
965 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
966 rcu_dereference(iter->sptep));
970 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
971 * consistent with legacy MMU behavior.
973 if (ret != RET_PF_SPURIOUS)
974 vcpu->stat.pf_fixed++;
980 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
981 * page tables and SPTEs to translate the faulting guest physical address.
983 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
985 struct kvm_mmu *mmu = vcpu->arch.mmu;
986 struct tdp_iter iter;
987 struct kvm_mmu_page *sp;
992 kvm_mmu_hugepage_adjust(vcpu, fault);
994 trace_kvm_mmu_spte_requested(fault);
998 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
999 if (fault->nx_huge_page_workaround_enabled)
1000 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1002 if (iter.level == fault->goal_level)
1006 * If there is an SPTE mapping a large page at a higher level
1007 * than the target, that SPTE must be cleared and replaced
1008 * with a non-leaf SPTE.
1010 if (is_shadow_present_pte(iter.old_spte) &&
1011 is_large_pte(iter.old_spte)) {
1012 if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1016 * The iter must explicitly re-read the spte here
1017 * because the new value informs the !present
1020 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1023 if (!is_shadow_present_pte(iter.old_spte)) {
1025 * If SPTE has been frozen by another thread, just
1026 * give up and retry, avoiding unnecessary page table
1027 * allocation and free.
1029 if (is_removed_spte(iter.old_spte))
1032 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
1035 new_spte = make_nonleaf_spte(child_pt,
1036 !shadow_accessed_mask);
1038 if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) {
1039 tdp_mmu_link_page(vcpu->kvm, sp,
1040 fault->huge_page_disallowed &&
1041 fault->req_level >= iter.level);
1043 trace_kvm_mmu_get_page(sp, true);
1045 tdp_mmu_free_sp(sp);
1051 if (iter.level != fault->goal_level) {
1053 return RET_PF_RETRY;
1056 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1062 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1065 return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
1066 range->end, range->may_block, flush);
1069 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1070 struct kvm_gfn_range *range);
1072 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1073 struct kvm_gfn_range *range,
1074 tdp_handler_t handler)
1076 struct kvm_mmu_page *root;
1077 struct tdp_iter iter;
1083 * Don't support rescheduling, none of the MMU notifiers that funnel
1084 * into this helper allow blocking; it'd be dead, wasteful code.
1086 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1087 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1088 ret |= handler(kvm, &iter, range);
1097 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1098 * if any of the GFNs in the range have been accessed.
1100 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1101 struct kvm_gfn_range *range)
1105 /* If we have a non-accessed entry we don't need to change the pte. */
1106 if (!is_accessed_spte(iter->old_spte))
1109 new_spte = iter->old_spte;
1111 if (spte_ad_enabled(new_spte)) {
1112 new_spte &= ~shadow_accessed_mask;
1115 * Capture the dirty status of the page, so that it doesn't get
1116 * lost when the SPTE is marked for access tracking.
1118 if (is_writable_pte(new_spte))
1119 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1121 new_spte = mark_spte_for_access_track(new_spte);
1124 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1129 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1131 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1134 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1135 struct kvm_gfn_range *range)
1137 return is_accessed_spte(iter->old_spte);
1140 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1142 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1145 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1146 struct kvm_gfn_range *range)
1150 /* Huge pages aren't expected to be modified without first being zapped. */
1151 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1153 if (iter->level != PG_LEVEL_4K ||
1154 !is_shadow_present_pte(iter->old_spte))
1158 * Note, when changing a read-only SPTE, it's not strictly necessary to
1159 * zero the SPTE before setting the new PFN, but doing so preserves the
1160 * invariant that the PFN of a present * leaf SPTE can never change.
1161 * See __handle_changed_spte().
1163 tdp_mmu_set_spte(kvm, iter, 0);
1165 if (!pte_write(range->pte)) {
1166 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1167 pte_pfn(range->pte));
1169 tdp_mmu_set_spte(kvm, iter, new_spte);
1176 * Handle the changed_pte MMU notifier for the TDP MMU.
1177 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1179 * Returns non-zero if a flush is needed before releasing the MMU lock.
1181 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1183 bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1185 /* FIXME: return 'flush' instead of flushing here. */
1187 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1193 * Remove write access from all SPTEs at or above min_level that map GFNs
1194 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1197 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1198 gfn_t start, gfn_t end, int min_level)
1200 struct tdp_iter iter;
1202 bool spte_set = false;
1206 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1208 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1209 min_level, start, end) {
1211 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1214 if (!is_shadow_present_pte(iter.old_spte) ||
1215 !is_last_spte(iter.old_spte, iter.level) ||
1216 !(iter.old_spte & PT_WRITABLE_MASK))
1219 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1221 if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1232 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1233 * only affect leaf SPTEs down to min_level.
1234 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1236 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1237 const struct kvm_memory_slot *slot, int min_level)
1239 struct kvm_mmu_page *root;
1240 bool spte_set = false;
1242 lockdep_assert_held_read(&kvm->mmu_lock);
1244 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1245 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1246 slot->base_gfn + slot->npages, min_level);
1252 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1253 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1254 * If AD bits are not enabled, this will require clearing the writable bit on
1255 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1258 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1259 gfn_t start, gfn_t end)
1261 struct tdp_iter iter;
1263 bool spte_set = false;
1267 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1269 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1272 if (spte_ad_need_write_protect(iter.old_spte)) {
1273 if (is_writable_pte(iter.old_spte))
1274 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1278 if (iter.old_spte & shadow_dirty_mask)
1279 new_spte = iter.old_spte & ~shadow_dirty_mask;
1284 if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1295 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1296 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1297 * If AD bits are not enabled, this will require clearing the writable bit on
1298 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1301 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1302 const struct kvm_memory_slot *slot)
1304 struct kvm_mmu_page *root;
1305 bool spte_set = false;
1307 lockdep_assert_held_read(&kvm->mmu_lock);
1309 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1310 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1311 slot->base_gfn + slot->npages);
1317 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1318 * set in mask, starting at gfn. The given memslot is expected to contain all
1319 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1320 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1321 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1323 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1324 gfn_t gfn, unsigned long mask, bool wrprot)
1326 struct tdp_iter iter;
1331 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1332 gfn + BITS_PER_LONG) {
1336 if (iter.level > PG_LEVEL_4K ||
1337 !(mask & (1UL << (iter.gfn - gfn))))
1340 mask &= ~(1UL << (iter.gfn - gfn));
1342 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1343 if (is_writable_pte(iter.old_spte))
1344 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1348 if (iter.old_spte & shadow_dirty_mask)
1349 new_spte = iter.old_spte & ~shadow_dirty_mask;
1354 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1361 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1362 * set in mask, starting at gfn. The given memslot is expected to contain all
1363 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1364 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1365 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1367 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1368 struct kvm_memory_slot *slot,
1369 gfn_t gfn, unsigned long mask,
1372 struct kvm_mmu_page *root;
1374 lockdep_assert_held_write(&kvm->mmu_lock);
1375 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1376 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1380 * Clear leaf entries which could be replaced by large mappings, for
1381 * GFNs within the slot.
1383 static void zap_collapsible_spte_range(struct kvm *kvm,
1384 struct kvm_mmu_page *root,
1385 const struct kvm_memory_slot *slot)
1387 gfn_t start = slot->base_gfn;
1388 gfn_t end = start + slot->npages;
1389 struct tdp_iter iter;
1394 tdp_root_for_each_pte(iter, root, start, end) {
1396 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1399 if (!is_shadow_present_pte(iter.old_spte) ||
1400 !is_last_spte(iter.old_spte, iter.level))
1403 pfn = spte_to_pfn(iter.old_spte);
1404 if (kvm_is_reserved_pfn(pfn) ||
1405 iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1409 /* Note, a successful atomic zap also does a remote TLB flush. */
1410 if (!tdp_mmu_zap_spte_atomic(kvm, &iter))
1418 * Clear non-leaf entries (and free associated page tables) which could
1419 * be replaced by large mappings, for GFNs within the slot.
1421 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1422 const struct kvm_memory_slot *slot)
1424 struct kvm_mmu_page *root;
1426 lockdep_assert_held_read(&kvm->mmu_lock);
1428 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1429 zap_collapsible_spte_range(kvm, root, slot);
1433 * Removes write access on the last level SPTE mapping this GFN and unsets the
1434 * MMU-writable bit to ensure future writes continue to be intercepted.
1435 * Returns true if an SPTE was set and a TLB flush is needed.
1437 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1438 gfn_t gfn, int min_level)
1440 struct tdp_iter iter;
1442 bool spte_set = false;
1444 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1448 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1449 min_level, gfn, gfn + 1) {
1450 if (!is_shadow_present_pte(iter.old_spte) ||
1451 !is_last_spte(iter.old_spte, iter.level))
1454 new_spte = iter.old_spte &
1455 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1457 if (new_spte == iter.old_spte)
1460 tdp_mmu_set_spte(kvm, &iter, new_spte);
1470 * Removes write access on the last level SPTE mapping this GFN and unsets the
1471 * MMU-writable bit to ensure future writes continue to be intercepted.
1472 * Returns true if an SPTE was set and a TLB flush is needed.
1474 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1475 struct kvm_memory_slot *slot, gfn_t gfn,
1478 struct kvm_mmu_page *root;
1479 bool spte_set = false;
1481 lockdep_assert_held_write(&kvm->mmu_lock);
1482 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1483 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1489 * Return the level of the lowest level SPTE added to sptes.
1490 * That SPTE may be non-present.
1492 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1494 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1497 struct tdp_iter iter;
1498 struct kvm_mmu *mmu = vcpu->arch.mmu;
1499 gfn_t gfn = addr >> PAGE_SHIFT;
1502 *root_level = vcpu->arch.mmu->shadow_root_level;
1504 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1506 sptes[leaf] = iter.old_spte;
1513 * Returns the last level spte pointer of the shadow page walk for the given
1514 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1515 * walk could be performed, returns NULL and *spte does not contain valid data.
1518 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1519 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1521 * WARNING: This function is only intended to be called during fast_page_fault.
1523 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1526 struct tdp_iter iter;
1527 struct kvm_mmu *mmu = vcpu->arch.mmu;
1528 gfn_t gfn = addr >> PAGE_SHIFT;
1529 tdp_ptep_t sptep = NULL;
1531 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1532 *spte = iter.old_spte;
1537 * Perform the rcu_dereference to get the raw spte pointer value since
1538 * we are passing it up to fast_page_fault, which is shared with the
1539 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1542 * This is safe since fast_page_fault obeys the contracts of this
1543 * function as well as all TDP MMU contracts around modifying SPTEs
1544 * outside of mmu_lock.
1546 return rcu_dereference(sptep);