1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include "mmu_internal.h"
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
21 /* Arbitrarily returns true so that this may be used in if statements. */
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
26 lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
36 * Invalidate all roots, which besides the obvious, schedules all roots
37 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38 * ultimately frees all roots.
40 kvm_tdp_mmu_invalidate_all_roots(kvm);
41 kvm_tdp_mmu_zap_invalidated_roots(kvm);
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
47 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 * can run before the VM is torn down. Putting the last reference to
49 * zapped roots will create new callbacks.
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
56 free_page((unsigned long)sp->spt);
57 kmem_cache_free(mmu_page_header_cache, sp);
61 * This is called through call_rcu in order to free TDP page table memory
62 * safely with respect to other kernel threads that may be operating on
64 * By only accessing TDP MMU page table memory in an RCU read critical
65 * section, and freeing it after a grace period, lockless access to that
66 * memory won't use it after it is freed.
68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
78 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
82 * The TDP MMU itself holds a reference to each root until the root is
83 * explicitly invalidated, i.e. the final reference should be never be
84 * put for a valid root.
86 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
88 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
89 list_del_rcu(&root->link);
90 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
91 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
95 * Returns the next root after @prev_root (or the first root if @prev_root is
96 * NULL). A reference to the returned root is acquired, and the reference to
97 * @prev_root is released (the caller obviously must hold a reference to
98 * @prev_root if it's non-NULL).
100 * If @only_valid is true, invalid roots are skipped.
102 * Returns NULL if the end of tdp_mmu_roots was reached.
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
105 struct kvm_mmu_page *prev_root,
108 struct kvm_mmu_page *next_root;
111 * While the roots themselves are RCU-protected, fields such as
112 * role.invalid are protected by mmu_lock.
114 lockdep_assert_held(&kvm->mmu_lock);
119 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*prev_root), link);
123 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
124 typeof(*next_root), link);
127 if ((!only_valid || !next_root->role.invalid) &&
128 kvm_tdp_mmu_get_root(next_root))
131 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132 &next_root->link, typeof(*next_root), link);
138 kvm_tdp_mmu_put_root(kvm, prev_root);
144 * Note: this iterator gets and puts references to the roots it iterates over.
145 * This makes it safe to release the MMU lock and yield within the loop, but
146 * if exiting the loop early, the caller must drop the reference to the most
147 * recent root. (Unless keeping a live reference is desirable.)
149 * If shared is set, this function is operating under the MMU lock in read
152 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \
153 for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
154 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
155 _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
156 if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
159 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
160 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
162 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
163 for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
164 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
165 _root = tdp_mmu_next_root(_kvm, _root, false))
168 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
169 * the implication being that any flow that holds mmu_lock for read is
170 * inherently yield-friendly and should use the yield-safe variant above.
171 * Holding mmu_lock for write obviates the need for RCU protection as the list
172 * is guaranteed to be stable.
174 #define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid) \
175 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
176 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
177 ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \
178 ((_only_valid) && (_root)->role.invalid))) { \
181 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
182 __for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
184 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
185 __for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
187 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
189 struct kvm_mmu_page *sp;
191 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
192 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
198 gfn_t gfn, union kvm_mmu_page_role role)
200 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
202 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
207 sp->tdp_mmu_page = true;
209 trace_kvm_mmu_get_page(sp, true);
212 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
213 struct tdp_iter *iter)
215 struct kvm_mmu_page *parent_sp;
216 union kvm_mmu_page_role role;
218 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
220 role = parent_sp->role;
223 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
226 int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
228 struct kvm_mmu *mmu = vcpu->arch.mmu;
229 union kvm_mmu_page_role role = mmu->root_role;
230 int as_id = kvm_mmu_role_as_id(role);
231 struct kvm *kvm = vcpu->kvm;
232 struct kvm_mmu_page *root;
235 * Check for an existing root before acquiring the pages lock to avoid
236 * unnecessary serialization if multiple vCPUs are loading a new root.
237 * E.g. when bringing up secondary vCPUs, KVM will already have created
238 * a valid root on behalf of the primary vCPU.
240 read_lock(&kvm->mmu_lock);
242 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
243 if (root->role.word == role.word)
244 goto out_read_unlock;
247 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
250 * Recheck for an existing root after acquiring the pages lock, another
251 * vCPU may have raced ahead and created a new usable root. Manually
252 * walk the list of roots as the standard macros assume that the pages
253 * lock is *not* held. WARN if grabbing a reference to a usable root
254 * fails, as the last reference to a root can only be put *after* the
255 * root has been invalidated, which requires holding mmu_lock for write.
257 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
258 if (root->role.word == role.word &&
259 !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
260 goto out_spin_unlock;
263 root = tdp_mmu_alloc_sp(vcpu);
264 tdp_mmu_init_sp(root, NULL, 0, role);
267 * TDP MMU roots are kept until they are explicitly invalidated, either
268 * by a memslot update or by the destruction of the VM. Initialize the
269 * refcount to two; one reference for the vCPU, and one reference for
270 * the TDP MMU itself, which is held until the root is invalidated and
271 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
273 refcount_set(&root->tdp_mmu_root_count, 2);
274 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
277 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
279 read_unlock(&kvm->mmu_lock);
281 * Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest
282 * and actually consuming the root if it's invalidated after dropping
283 * mmu_lock, and the root can't be freed as this vCPU holds a reference.
285 mmu->root.hpa = __pa(root->spt);
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
291 u64 old_spte, u64 new_spte, int level,
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
296 kvm_account_pgtable_pages((void *)sp->spt, +1);
297 atomic64_inc(&kvm->arch.tdp_mmu_pages);
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
302 kvm_account_pgtable_pages((void *)sp->spt, -1);
303 atomic64_dec(&kvm->arch.tdp_mmu_pages);
307 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
310 * @sp: the page to be removed
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
314 tdp_unaccount_mmu_page(kvm, sp);
316 if (!sp->nx_huge_page_disallowed)
319 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
320 sp->nx_huge_page_disallowed = false;
321 untrack_possible_nx_huge_page(kvm, sp);
322 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
326 * handle_removed_pt() - handle a page table removed from the TDP structure
329 * @pt: the page removed from the paging structure
330 * @shared: This operation may not be running under the exclusive use
331 * of the MMU lock and the operation must synchronize with other
332 * threads that might be modifying SPTEs.
334 * Given a page table that has been removed from the TDP paging structure,
335 * iterates through the page table to clear SPTEs and free child page tables.
337 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
338 * protection. Since this thread removed it from the paging structure,
339 * this thread will be responsible for ensuring the page is freed. Hence the
340 * early rcu_dereferences in the function.
342 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
344 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
345 int level = sp->role.level;
346 gfn_t base_gfn = sp->gfn;
349 trace_kvm_mmu_prepare_zap_page(sp);
351 tdp_mmu_unlink_sp(kvm, sp);
353 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
354 tdp_ptep_t sptep = pt + i;
355 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
360 * Set the SPTE to a nonpresent value that other
361 * threads will not overwrite. If the SPTE was
362 * already marked as removed then another thread
363 * handling a page fault could overwrite it, so
364 * set the SPTE until it is set from some other
365 * value to the removed SPTE value.
368 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
369 if (!is_removed_spte(old_spte))
375 * If the SPTE is not MMU-present, there is no backing
376 * page associated with the SPTE and so no side effects
377 * that need to be recorded, and exclusive ownership of
378 * mmu_lock ensures the SPTE can't be made present.
379 * Note, zapping MMIO SPTEs is also unnecessary as they
380 * are guarded by the memslots generation, not by being
383 old_spte = kvm_tdp_mmu_read_spte(sptep);
384 if (!is_shadow_present_pte(old_spte))
388 * Use the common helper instead of a raw WRITE_ONCE as
389 * the SPTE needs to be updated atomically if it can be
390 * modified by a different vCPU outside of mmu_lock.
391 * Even though the parent SPTE is !PRESENT, the TLB
392 * hasn't yet been flushed, and both Intel and AMD
393 * document that A/D assists can use upper-level PxE
394 * entries that are cached in the TLB, i.e. the CPU can
395 * still access the page and mark it dirty.
397 * No retry is needed in the atomic update path as the
398 * sole concern is dropping a Dirty bit, i.e. no other
399 * task can zap/remove the SPTE as mmu_lock is held for
400 * write. Marking the SPTE as a removed SPTE is not
401 * strictly necessary for the same reason, but using
402 * the remove SPTE value keeps the shared/exclusive
403 * paths consistent and allows the handle_changed_spte()
404 * call below to hardcode the new value to REMOVED_SPTE.
406 * Note, even though dropping a Dirty bit is the only
407 * scenario where a non-atomic update could result in a
408 * functional bug, simply checking the Dirty bit isn't
409 * sufficient as a fast page fault could read the upper
410 * level SPTE before it is zapped, and then make this
411 * target SPTE writable, resume the guest, and set the
412 * Dirty bit between reading the SPTE above and writing
415 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
416 REMOVED_SPTE, level);
418 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
419 old_spte, REMOVED_SPTE, level, shared);
422 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
426 * handle_changed_spte - handle bookkeeping associated with an SPTE change
428 * @as_id: the address space of the paging structure the SPTE was a part of
429 * @gfn: the base GFN that was mapped by the SPTE
430 * @old_spte: The value of the SPTE before the change
431 * @new_spte: The value of the SPTE after the change
432 * @level: the level of the PT the SPTE is part of in the paging structure
433 * @shared: This operation may not be running under the exclusive use of
434 * the MMU lock and the operation must synchronize with other
435 * threads that might be modifying SPTEs.
437 * Handle bookkeeping that might result from the modification of a SPTE. Note,
438 * dirty logging updates are handled in common code, not here (see make_spte()
439 * and fast_pf_fix_direct_spte()).
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
442 u64 old_spte, u64 new_spte, int level,
445 bool was_present = is_shadow_present_pte(old_spte);
446 bool is_present = is_shadow_present_pte(new_spte);
447 bool was_leaf = was_present && is_last_spte(old_spte, level);
448 bool is_leaf = is_present && is_last_spte(new_spte, level);
449 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
451 WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
452 WARN_ON_ONCE(level < PG_LEVEL_4K);
453 WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
456 * If this warning were to trigger it would indicate that there was a
457 * missing MMU notifier or a race with some notifier handler.
458 * A present, leaf SPTE should never be directly replaced with another
459 * present leaf SPTE pointing to a different PFN. A notifier handler
460 * should be zapping the SPTE before the main MM's page table is
461 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
462 * thread before replacement.
464 if (was_leaf && is_leaf && pfn_changed) {
465 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
466 "SPTE with another present leaf SPTE mapping a\n"
468 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
469 as_id, gfn, old_spte, new_spte, level);
472 * Crash the host to prevent error propagation and guest data
478 if (old_spte == new_spte)
481 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
484 check_spte_writable_invariants(new_spte);
487 * The only times a SPTE should be changed from a non-present to
488 * non-present state is when an MMIO entry is installed/modified/
489 * removed. In that case, there is nothing to do here.
491 if (!was_present && !is_present) {
493 * If this change does not involve a MMIO SPTE or removed SPTE,
494 * it is unexpected. Log the change, though it should not
495 * impact the guest since both the former and current SPTEs
498 if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
499 !is_mmio_spte(new_spte) &&
500 !is_removed_spte(new_spte)))
501 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
502 "should not be replaced with another,\n"
503 "different nonpresent SPTE, unless one or both\n"
504 "are MMIO SPTEs, or the new SPTE is\n"
505 "a temporary removed SPTE.\n"
506 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
507 as_id, gfn, old_spte, new_spte, level);
511 if (is_leaf != was_leaf)
512 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
514 if (was_leaf && is_dirty_spte(old_spte) &&
515 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
516 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
519 * Recursively handle child PTs if the change removed a subtree from
520 * the paging structure. Note the WARN on the PFN changing without the
521 * SPTE being converted to a hugepage (leaf) or being zapped. Shadow
522 * pages are kernel allocations and should never be migrated.
524 if (was_present && !was_leaf &&
525 (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
526 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
528 if (was_leaf && is_accessed_spte(old_spte) &&
529 (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
530 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
534 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
535 * and handle the associated bookkeeping. Do not mark the page dirty
536 * in KVM's dirty bitmaps.
538 * If setting the SPTE fails because it has changed, iter->old_spte will be
539 * refreshed to the current value of the spte.
542 * @iter: a tdp_iter instance currently on the SPTE that should be set
543 * @new_spte: The value the SPTE should be set to
545 * * 0 - If the SPTE was set.
546 * * -EBUSY - If the SPTE cannot be set. In this case this function will have
547 * no side-effects other than setting iter->old_spte to the last
548 * known value of the spte.
550 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
551 struct tdp_iter *iter,
554 u64 *sptep = rcu_dereference(iter->sptep);
557 * The caller is responsible for ensuring the old SPTE is not a REMOVED
558 * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
559 * and pre-checking before inserting a new SPTE is advantageous as it
560 * avoids unnecessary work.
562 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
564 lockdep_assert_held_read(&kvm->mmu_lock);
567 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
568 * does not hold the mmu_lock. On failure, i.e. if a different logical
569 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
570 * the current value, so the caller operates on fresh data, e.g. if it
571 * retries tdp_mmu_set_spte_atomic()
573 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
576 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
577 new_spte, iter->level, true);
582 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
583 struct tdp_iter *iter)
588 * Freeze the SPTE by setting it to a special,
589 * non-present value. This will stop other threads from
590 * immediately installing a present entry in its place
591 * before the TLBs are flushed.
593 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
597 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
600 * No other thread can overwrite the removed SPTE as they must either
601 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
602 * overwrite the special removed SPTE value. No bookkeeping is needed
603 * here since the SPTE is going from non-present to non-present. Use
604 * the raw write helper to avoid an unnecessary check on volatile bits.
606 __kvm_tdp_mmu_write_spte(iter->sptep, 0);
613 * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
615 * @as_id: Address space ID, i.e. regular vs. SMM
616 * @sptep: Pointer to the SPTE
617 * @old_spte: The current value of the SPTE
618 * @new_spte: The new value that will be set for the SPTE
619 * @gfn: The base GFN that was (or will be) mapped by the SPTE
620 * @level: The level _containing_ the SPTE (its parent PT's level)
622 * Returns the old SPTE value, which _may_ be different than @old_spte if the
623 * SPTE had voldatile bits.
625 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
626 u64 old_spte, u64 new_spte, gfn_t gfn, int level)
628 lockdep_assert_held_write(&kvm->mmu_lock);
631 * No thread should be using this function to set SPTEs to or from the
632 * temporary removed SPTE value.
633 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
634 * should be used. If operating under the MMU lock in write mode, the
635 * use of the removed SPTE should not be necessary.
637 WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
639 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
641 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
645 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
648 WARN_ON_ONCE(iter->yielded);
649 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
650 iter->old_spte, new_spte,
651 iter->gfn, iter->level);
654 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
655 for_each_tdp_pte(_iter, _root, _start, _end)
657 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
658 tdp_root_for_each_pte(_iter, _root, _start, _end) \
659 if (!is_shadow_present_pte(_iter.old_spte) || \
660 !is_last_spte(_iter.old_spte, _iter.level)) \
664 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
665 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
668 * Yield if the MMU lock is contended or this thread needs to return control
671 * If this function should yield and flush is set, it will perform a remote
672 * TLB flush before yielding.
674 * If this function yields, iter->yielded is set and the caller must skip to
675 * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
676 * over the paging structures to allow the iterator to continue its traversal
677 * from the paging structure root.
679 * Returns true if this function yielded.
681 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
682 struct tdp_iter *iter,
683 bool flush, bool shared)
685 WARN_ON_ONCE(iter->yielded);
687 /* Ensure forward progress has been made before yielding. */
688 if (iter->next_last_level_gfn == iter->yielded_gfn)
691 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
693 kvm_flush_remote_tlbs(kvm);
698 cond_resched_rwlock_read(&kvm->mmu_lock);
700 cond_resched_rwlock_write(&kvm->mmu_lock);
704 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
706 iter->yielded = true;
709 return iter->yielded;
712 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
715 * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
716 * a gpa range that would exceed the max gfn, and KVM does not create
717 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
718 * the slow emulation path every time.
720 return kvm_mmu_max_gfn() + 1;
723 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
724 bool shared, int zap_level)
726 struct tdp_iter iter;
728 gfn_t end = tdp_mmu_max_gfn_exclusive();
731 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
733 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
736 if (!is_shadow_present_pte(iter.old_spte))
739 if (iter.level > zap_level)
743 tdp_mmu_iter_set_spte(kvm, &iter, 0);
744 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
749 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
754 * The root must have an elevated refcount so that it's reachable via
755 * mmu_notifier callbacks, which allows this path to yield and drop
756 * mmu_lock. When handling an unmap/release mmu_notifier command, KVM
757 * must drop all references to relevant pages prior to completing the
758 * callback. Dropping mmu_lock with an unreachable root would result
759 * in zapping SPTEs after a relevant mmu_notifier callback completes
760 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
761 * dirty accessed bits to the SPTE's associated struct page.
763 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
765 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
770 * Zap roots in multiple passes of decreasing granularity, i.e. zap at
771 * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
772 * preempt models) or mmu_lock contention (full or real-time models).
773 * Zapping at finer granularity marginally increases the total time of
774 * the zap, but in most cases the zap itself isn't latency sensitive.
776 * If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps
777 * in order to mimic the page fault path, which can replace a 1GiB page
778 * table with an equivalent 1GiB hugepage, i.e. can get saddled with
779 * zapping a 1GiB region that's fully populated with 4KiB SPTEs. This
780 * allows verifying that KVM can safely zap 1GiB regions, e.g. without
781 * inducing RCU stalls, without relying on a relatively rare event
782 * (zapping roots is orders of magnitude more common). Note, because
783 * zapping a SP recurses on its children, stepping down to PG_LEVEL_4K
784 * in the iterator itself is unnecessary.
786 if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
787 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
788 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
790 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
791 __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
796 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
801 * This helper intentionally doesn't allow zapping a root shadow page,
802 * which doesn't have a parent page table and thus no associated entry.
804 if (WARN_ON_ONCE(!sp->ptep))
807 old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
808 if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
811 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
812 sp->gfn, sp->role.level + 1);
818 * If can_yield is true, will release the MMU lock and reschedule if the
819 * scheduler needs the CPU or there is contention on the MMU lock. If this
820 * function cannot yield, it will not release the MMU lock or reschedule and
821 * the caller must ensure it does not supply too large a GFN range, or the
822 * operation can cause a soft lockup.
824 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
825 gfn_t start, gfn_t end, bool can_yield, bool flush)
827 struct tdp_iter iter;
829 end = min(end, tdp_mmu_max_gfn_exclusive());
831 lockdep_assert_held_write(&kvm->mmu_lock);
835 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
837 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
842 if (!is_shadow_present_pte(iter.old_spte) ||
843 !is_last_spte(iter.old_spte, iter.level))
846 tdp_mmu_iter_set_spte(kvm, &iter, 0);
849 * Zappings SPTEs in invalid roots doesn't require a TLB flush,
850 * see kvm_tdp_mmu_zap_invalidated_roots() for details.
852 if (!root->role.invalid)
859 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
860 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
866 * Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.
867 * Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if
868 * one or more SPTEs were zapped since the MMU lock was last acquired.
870 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
872 struct kvm_mmu_page *root;
874 lockdep_assert_held_write(&kvm->mmu_lock);
875 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
876 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
881 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
883 struct kvm_mmu_page *root;
886 * Zap all roots, including invalid roots, as all SPTEs must be dropped
887 * before returning to the caller. Zap directly even if the root is
888 * also being zapped by a worker. Walking zapped top-level SPTEs isn't
889 * all that expensive and mmu_lock is already held, which means the
890 * worker has yielded, i.e. flushing the work instead of zapping here
891 * isn't guaranteed to be any faster.
893 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
894 * is being destroyed or the userspace VMM has exited. In both cases,
895 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
897 lockdep_assert_held_write(&kvm->mmu_lock);
898 for_each_tdp_mmu_root_yield_safe(kvm, root)
899 tdp_mmu_zap_root(kvm, root, false);
903 * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
906 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
908 struct kvm_mmu_page *root;
910 read_lock(&kvm->mmu_lock);
912 for_each_tdp_mmu_root_yield_safe(kvm, root) {
913 if (!root->tdp_mmu_scheduled_root_to_zap)
916 root->tdp_mmu_scheduled_root_to_zap = false;
917 KVM_BUG_ON(!root->role.invalid, kvm);
920 * A TLB flush is not necessary as KVM performs a local TLB
921 * flush when allocating a new root (see kvm_mmu_load()), and
922 * when migrating a vCPU to a different pCPU. Note, the local
923 * TLB flush on reuse also invalidates paging-structure-cache
924 * entries, i.e. TLB entries for intermediate paging structures,
925 * that may be zapped, as such entries are associated with the
926 * ASID on both VMX and SVM.
928 tdp_mmu_zap_root(kvm, root, true);
931 * The referenced needs to be put *after* zapping the root, as
932 * the root must be reachable by mmu_notifiers while it's being
935 kvm_tdp_mmu_put_root(kvm, root);
938 read_unlock(&kvm->mmu_lock);
942 * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
943 * is about to be zapped, e.g. in response to a memslots update. The actual
944 * zapping is done separately so that it happens with mmu_lock with read,
945 * whereas invalidating roots must be done with mmu_lock held for write (unless
946 * the VM is being destroyed).
948 * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
949 * See kvm_tdp_mmu_alloc_root().
951 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
953 struct kvm_mmu_page *root;
956 * mmu_lock must be held for write to ensure that a root doesn't become
957 * invalid while there are active readers (invalidating a root while
958 * there are active readers may or may not be problematic in practice,
959 * but it's uncharted territory and not supported).
961 * Waive the assertion if there are no users of @kvm, i.e. the VM is
962 * being destroyed after all references have been put, or if no vCPUs
963 * have been created (which means there are no roots), i.e. the VM is
964 * being destroyed in an error path of KVM_CREATE_VM.
966 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
967 refcount_read(&kvm->users_count) && kvm->created_vcpus)
968 lockdep_assert_held_write(&kvm->mmu_lock);
971 * As above, mmu_lock isn't held when destroying the VM! There can't
972 * be other references to @kvm, i.e. nothing else can invalidate roots
973 * or get/put references to roots.
975 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
977 * Note, invalid roots can outlive a memslot update! Invalid
978 * roots must be *zapped* before the memslot update completes,
979 * but a different task can acquire a reference and keep the
980 * root alive after its been zapped.
982 if (!root->role.invalid) {
983 root->tdp_mmu_scheduled_root_to_zap = true;
984 root->role.invalid = true;
990 * Installs a last-level SPTE to handle a TDP page fault.
991 * (NPT/EPT violation/misconfiguration)
993 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
994 struct kvm_page_fault *fault,
995 struct tdp_iter *iter)
997 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
999 int ret = RET_PF_FIXED;
1000 bool wrprot = false;
1002 if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
1003 return RET_PF_RETRY;
1005 if (unlikely(!fault->slot))
1006 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
1008 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1009 fault->pfn, iter->old_spte, fault->prefetch, true,
1010 fault->map_writable, &new_spte);
1012 if (new_spte == iter->old_spte)
1013 ret = RET_PF_SPURIOUS;
1014 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
1015 return RET_PF_RETRY;
1016 else if (is_shadow_present_pte(iter->old_spte) &&
1017 !is_last_spte(iter->old_spte, iter->level))
1018 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1021 * If the page fault was caused by a write but the page is write
1022 * protected, emulation is needed. If the emulation was skipped,
1023 * the vCPU would have the same fault again.
1027 ret = RET_PF_EMULATE;
1030 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1031 if (unlikely(is_mmio_spte(new_spte))) {
1032 vcpu->stat.pf_mmio_spte_created++;
1033 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1035 ret = RET_PF_EMULATE;
1037 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1038 rcu_dereference(iter->sptep));
1045 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1046 * provided page table.
1048 * @kvm: kvm instance
1049 * @iter: a tdp_iter instance currently on the SPTE that should be set
1050 * @sp: The new TDP page table to install.
1051 * @shared: This operation is running under the MMU lock in read mode.
1053 * Returns: 0 if the new page table was installed. Non-0 if the page table
1054 * could not be installed (e.g. the atomic compare-exchange failed).
1056 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1057 struct kvm_mmu_page *sp, bool shared)
1059 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1063 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1067 tdp_mmu_iter_set_spte(kvm, iter, spte);
1070 tdp_account_mmu_page(kvm, sp);
1075 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1076 struct kvm_mmu_page *sp, bool shared);
1079 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1080 * page tables and SPTEs to translate the faulting guest physical address.
1082 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1084 struct kvm_mmu *mmu = vcpu->arch.mmu;
1085 struct kvm *kvm = vcpu->kvm;
1086 struct tdp_iter iter;
1087 struct kvm_mmu_page *sp;
1088 int ret = RET_PF_RETRY;
1090 kvm_mmu_hugepage_adjust(vcpu, fault);
1092 trace_kvm_mmu_spte_requested(fault);
1096 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1099 if (fault->nx_huge_page_workaround_enabled)
1100 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1103 * If SPTE has been frozen by another thread, just give up and
1104 * retry, avoiding unnecessary page table allocation and free.
1106 if (is_removed_spte(iter.old_spte))
1109 if (iter.level == fault->goal_level)
1110 goto map_target_level;
1112 /* Step down into the lower level page table if it exists. */
1113 if (is_shadow_present_pte(iter.old_spte) &&
1114 !is_large_pte(iter.old_spte))
1118 * The SPTE is either non-present or points to a huge page that
1119 * needs to be split.
1121 sp = tdp_mmu_alloc_sp(vcpu);
1122 tdp_mmu_init_child_sp(sp, &iter);
1124 sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1126 if (is_shadow_present_pte(iter.old_spte))
1127 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1129 r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1132 * Force the guest to retry if installing an upper level SPTE
1133 * failed, e.g. because a different task modified the SPTE.
1136 tdp_mmu_free_sp(sp);
1140 if (fault->huge_page_disallowed &&
1141 fault->req_level >= iter.level) {
1142 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1143 if (sp->nx_huge_page_disallowed)
1144 track_possible_nx_huge_page(kvm, sp);
1145 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1150 * The walk aborted before reaching the target level, e.g. because the
1151 * iterator detected an upper level SPTE was frozen during traversal.
1153 WARN_ON_ONCE(iter.level == fault->goal_level);
1157 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1164 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1167 struct kvm_mmu_page *root;
1169 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1170 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1171 range->may_block, flush);
1176 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1177 struct kvm_gfn_range *range);
1179 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1180 struct kvm_gfn_range *range,
1181 tdp_handler_t handler)
1183 struct kvm_mmu_page *root;
1184 struct tdp_iter iter;
1188 * Don't support rescheduling, none of the MMU notifiers that funnel
1189 * into this helper allow blocking; it'd be dead, wasteful code.
1191 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1194 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1195 ret |= handler(kvm, &iter, range);
1204 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1205 * if any of the GFNs in the range have been accessed.
1207 * No need to mark the corresponding PFN as accessed as this call is coming
1208 * from the clear_young() or clear_flush_young() notifier, which uses the
1209 * return value to determine if the page has been accessed.
1211 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1212 struct kvm_gfn_range *range)
1216 /* If we have a non-accessed entry we don't need to change the pte. */
1217 if (!is_accessed_spte(iter->old_spte))
1220 if (spte_ad_enabled(iter->old_spte)) {
1221 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1223 shadow_accessed_mask,
1225 new_spte = iter->old_spte & ~shadow_accessed_mask;
1228 * Capture the dirty status of the page, so that it doesn't get
1229 * lost when the SPTE is marked for access tracking.
1231 if (is_writable_pte(iter->old_spte))
1232 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1234 new_spte = mark_spte_for_access_track(iter->old_spte);
1235 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1236 iter->old_spte, new_spte,
1240 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1241 iter->old_spte, new_spte);
1245 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1247 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1250 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1251 struct kvm_gfn_range *range)
1253 return is_accessed_spte(iter->old_spte);
1256 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1258 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1261 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1262 struct kvm_gfn_range *range)
1266 /* Huge pages aren't expected to be modified without first being zapped. */
1267 WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
1269 if (iter->level != PG_LEVEL_4K ||
1270 !is_shadow_present_pte(iter->old_spte))
1274 * Note, when changing a read-only SPTE, it's not strictly necessary to
1275 * zero the SPTE before setting the new PFN, but doing so preserves the
1276 * invariant that the PFN of a present * leaf SPTE can never change.
1277 * See handle_changed_spte().
1279 tdp_mmu_iter_set_spte(kvm, iter, 0);
1281 if (!pte_write(range->arg.pte)) {
1282 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1283 pte_pfn(range->arg.pte));
1285 tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1292 * Handle the changed_pte MMU notifier for the TDP MMU.
1293 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1295 * Returns non-zero if a flush is needed before releasing the MMU lock.
1297 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1300 * No need to handle the remote TLB flush under RCU protection, the
1301 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1302 * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1304 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1308 * Remove write access from all SPTEs at or above min_level that map GFNs
1309 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1312 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1313 gfn_t start, gfn_t end, int min_level)
1315 struct tdp_iter iter;
1317 bool spte_set = false;
1321 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1323 for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1325 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1328 if (!is_shadow_present_pte(iter.old_spte) ||
1329 !is_last_spte(iter.old_spte, iter.level) ||
1330 !(iter.old_spte & PT_WRITABLE_MASK))
1333 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1335 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1346 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1347 * only affect leaf SPTEs down to min_level.
1348 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1350 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1351 const struct kvm_memory_slot *slot, int min_level)
1353 struct kvm_mmu_page *root;
1354 bool spte_set = false;
1356 lockdep_assert_held_read(&kvm->mmu_lock);
1358 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1359 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1360 slot->base_gfn + slot->npages, min_level);
1365 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1367 struct kvm_mmu_page *sp;
1371 sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1375 sp->spt = (void *)__get_free_page(gfp);
1377 kmem_cache_free(mmu_page_header_cache, sp);
1384 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1385 struct tdp_iter *iter,
1388 struct kvm_mmu_page *sp;
1390 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1393 * Since we are allocating while under the MMU lock we have to be
1394 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1395 * reclaim and to avoid making any filesystem callbacks (which can end
1396 * up invoking KVM MMU notifiers, resulting in a deadlock).
1398 * If this allocation fails we drop the lock and retry with reclaim
1401 sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1408 read_unlock(&kvm->mmu_lock);
1410 write_unlock(&kvm->mmu_lock);
1412 iter->yielded = true;
1413 sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1416 read_lock(&kvm->mmu_lock);
1418 write_lock(&kvm->mmu_lock);
1425 /* Note, the caller is responsible for initializing @sp. */
1426 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1427 struct kvm_mmu_page *sp, bool shared)
1429 const u64 huge_spte = iter->old_spte;
1430 const int level = iter->level;
1434 * No need for atomics when writing to sp->spt since the page table has
1435 * not been linked in yet and thus is not reachable from any other CPU.
1437 for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1438 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1441 * Replace the huge spte with a pointer to the populated lower level
1442 * page table. Since we are making this change without a TLB flush vCPUs
1443 * will see a mix of the split mappings and the original huge mapping,
1444 * depending on what's currently in their TLB. This is fine from a
1445 * correctness standpoint since the translation will be the same either
1448 ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1453 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1454 * are overwriting from the page stats. But we have to manually update
1455 * the page stats with the new present child pages.
1457 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1460 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1464 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1465 struct kvm_mmu_page *root,
1466 gfn_t start, gfn_t end,
1467 int target_level, bool shared)
1469 struct kvm_mmu_page *sp = NULL;
1470 struct tdp_iter iter;
1476 * Traverse the page table splitting all huge pages above the target
1477 * level into one lower level. For example, if we encounter a 1GB page
1478 * we split it into 512 2MB pages.
1480 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1481 * to visit an SPTE before ever visiting its children, which means we
1482 * will correctly recursively split huge pages that are more than one
1483 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1484 * and then splitting each of those to 512 4KB pages).
1486 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1488 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1491 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1495 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1498 trace_kvm_mmu_split_huge_page(iter.gfn,
1508 tdp_mmu_init_child_sp(sp, &iter);
1510 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1519 * It's possible to exit the loop having never used the last sp if, for
1520 * example, a vCPU doing HugePage NX splitting wins the race and
1521 * installs its own sp in place of the last sp we tried to split.
1524 tdp_mmu_free_sp(sp);
1531 * Try to split all huge pages mapped by the TDP MMU down to the target level.
1533 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1534 const struct kvm_memory_slot *slot,
1535 gfn_t start, gfn_t end,
1536 int target_level, bool shared)
1538 struct kvm_mmu_page *root;
1541 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1542 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1543 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1545 kvm_tdp_mmu_put_root(kvm, root);
1552 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1553 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1554 * If AD bits are not enabled, this will require clearing the writable bit on
1555 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1558 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1559 gfn_t start, gfn_t end)
1561 u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
1562 struct tdp_iter iter;
1563 bool spte_set = false;
1567 tdp_root_for_each_pte(iter, root, start, end) {
1569 if (!is_shadow_present_pte(iter.old_spte) ||
1570 !is_last_spte(iter.old_spte, iter.level))
1573 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1576 KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1577 spte_ad_need_write_protect(iter.old_spte));
1579 if (!(iter.old_spte & dbit))
1582 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1593 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1594 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1595 * If AD bits are not enabled, this will require clearing the writable bit on
1596 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1599 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1600 const struct kvm_memory_slot *slot)
1602 struct kvm_mmu_page *root;
1603 bool spte_set = false;
1605 lockdep_assert_held_read(&kvm->mmu_lock);
1606 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1607 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1608 slot->base_gfn + slot->npages);
1614 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1615 * set in mask, starting at gfn. The given memslot is expected to contain all
1616 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1617 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1618 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1620 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1621 gfn_t gfn, unsigned long mask, bool wrprot)
1623 u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
1625 struct tdp_iter iter;
1627 lockdep_assert_held_write(&kvm->mmu_lock);
1631 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1632 gfn + BITS_PER_LONG) {
1636 KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1637 spte_ad_need_write_protect(iter.old_spte));
1639 if (iter.level > PG_LEVEL_4K ||
1640 !(mask & (1UL << (iter.gfn - gfn))))
1643 mask &= ~(1UL << (iter.gfn - gfn));
1645 if (!(iter.old_spte & dbit))
1648 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1649 iter.old_spte, dbit,
1652 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1654 iter.old_spte & ~dbit);
1655 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1662 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1663 * set in mask, starting at gfn. The given memslot is expected to contain all
1664 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1665 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1666 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1668 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1669 struct kvm_memory_slot *slot,
1670 gfn_t gfn, unsigned long mask,
1673 struct kvm_mmu_page *root;
1675 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1676 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1679 static void zap_collapsible_spte_range(struct kvm *kvm,
1680 struct kvm_mmu_page *root,
1681 const struct kvm_memory_slot *slot)
1683 gfn_t start = slot->base_gfn;
1684 gfn_t end = start + slot->npages;
1685 struct tdp_iter iter;
1686 int max_mapping_level;
1690 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1692 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1695 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1696 !is_shadow_present_pte(iter.old_spte))
1700 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1701 * a large page size, then its parent would have been zapped
1702 * instead of stepping down.
1704 if (is_last_spte(iter.old_spte, iter.level))
1708 * If iter.gfn resides outside of the slot, i.e. the page for
1709 * the current level overlaps but is not contained by the slot,
1710 * then the SPTE can't be made huge. More importantly, trying
1711 * to query that info from slot->arch.lpage_info will cause an
1712 * out-of-bounds access.
1714 if (iter.gfn < start || iter.gfn >= end)
1717 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1718 iter.gfn, PG_LEVEL_NUM);
1719 if (max_mapping_level < iter.level)
1722 /* Note, a successful atomic zap also does a remote TLB flush. */
1723 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1731 * Zap non-leaf SPTEs (and free their associated page tables) which could
1732 * be replaced by huge pages, for GFNs within the slot.
1734 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1735 const struct kvm_memory_slot *slot)
1737 struct kvm_mmu_page *root;
1739 lockdep_assert_held_read(&kvm->mmu_lock);
1740 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1741 zap_collapsible_spte_range(kvm, root, slot);
1745 * Removes write access on the last level SPTE mapping this GFN and unsets the
1746 * MMU-writable bit to ensure future writes continue to be intercepted.
1747 * Returns true if an SPTE was set and a TLB flush is needed.
1749 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1750 gfn_t gfn, int min_level)
1752 struct tdp_iter iter;
1754 bool spte_set = false;
1756 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1760 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1761 if (!is_shadow_present_pte(iter.old_spte) ||
1762 !is_last_spte(iter.old_spte, iter.level))
1765 new_spte = iter.old_spte &
1766 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1768 if (new_spte == iter.old_spte)
1771 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1781 * Removes write access on the last level SPTE mapping this GFN and unsets the
1782 * MMU-writable bit to ensure future writes continue to be intercepted.
1783 * Returns true if an SPTE was set and a TLB flush is needed.
1785 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1786 struct kvm_memory_slot *slot, gfn_t gfn,
1789 struct kvm_mmu_page *root;
1790 bool spte_set = false;
1792 lockdep_assert_held_write(&kvm->mmu_lock);
1793 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1794 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1800 * Return the level of the lowest level SPTE added to sptes.
1801 * That SPTE may be non-present.
1803 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1805 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1808 struct tdp_iter iter;
1809 struct kvm_mmu *mmu = vcpu->arch.mmu;
1810 gfn_t gfn = addr >> PAGE_SHIFT;
1813 *root_level = vcpu->arch.mmu->root_role.level;
1815 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1817 sptes[leaf] = iter.old_spte;
1824 * Returns the last level spte pointer of the shadow page walk for the given
1825 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1826 * walk could be performed, returns NULL and *spte does not contain valid data.
1829 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1830 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1832 * WARNING: This function is only intended to be called during fast_page_fault.
1834 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1837 struct tdp_iter iter;
1838 struct kvm_mmu *mmu = vcpu->arch.mmu;
1839 gfn_t gfn = addr >> PAGE_SHIFT;
1840 tdp_ptep_t sptep = NULL;
1842 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1843 *spte = iter.old_spte;
1848 * Perform the rcu_dereference to get the raw spte pointer value since
1849 * we are passing it up to fast_page_fault, which is shared with the
1850 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1853 * This is safe since fast_page_fault obeys the contracts of this
1854 * function as well as all TDP MMU contracts around modifying SPTEs
1855 * outside of mmu_lock.
1857 return rcu_dereference(sptep);