1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
11 static bool __read_mostly tdp_mmu_enabled = false;
12 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15 static bool is_tdp_mmu_enabled(void)
18 return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
21 #endif /* CONFIG_X86_64 */
24 /* Initializes the TDP MMU for the VM, if enabled. */
25 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
27 if (!is_tdp_mmu_enabled())
30 /* This should not be changed for the lifetime of the VM. */
31 kvm->arch.tdp_mmu_enabled = true;
33 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
34 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
37 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
39 if (!kvm->arch.tdp_mmu_enabled)
42 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45 #define for_each_tdp_mmu_root(_kvm, _root) \
46 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
48 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
50 struct kvm_mmu_page *sp;
52 sp = to_shadow_page(hpa);
54 return sp->tdp_mmu_page && sp->root_count;
57 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
58 gfn_t start, gfn_t end, bool can_yield);
60 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
62 gfn_t max_gfn = 1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT);
64 lockdep_assert_held(&kvm->mmu_lock);
66 WARN_ON(root->root_count);
67 WARN_ON(!root->tdp_mmu_page);
69 list_del(&root->link);
71 zap_gfn_range(kvm, root, 0, max_gfn, false);
73 free_page((unsigned long)root->spt);
74 kmem_cache_free(mmu_page_header_cache, root);
77 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
80 union kvm_mmu_page_role role;
82 role = vcpu->arch.mmu->mmu_role.base;
85 role.gpte_is_8_bytes = true;
86 role.access = ACC_ALL;
91 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
94 struct kvm_mmu_page *sp;
96 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
97 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
98 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
100 sp->role.word = page_role_for_level(vcpu, level).word;
102 sp->tdp_mmu_page = true;
107 static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
109 union kvm_mmu_page_role role;
110 struct kvm *kvm = vcpu->kvm;
111 struct kvm_mmu_page *root;
113 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
115 spin_lock(&kvm->mmu_lock);
117 /* Check for an existing root before allocating a new one. */
118 for_each_tdp_mmu_root(kvm, root) {
119 if (root->role.word == role.word) {
120 kvm_mmu_get_root(kvm, root);
121 spin_unlock(&kvm->mmu_lock);
126 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
127 root->root_count = 1;
129 list_add(&root->link, &kvm->arch.tdp_mmu_roots);
131 spin_unlock(&kvm->mmu_lock);
136 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
138 struct kvm_mmu_page *root;
140 root = get_tdp_mmu_vcpu_root(vcpu);
144 return __pa(root->spt);
147 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
148 u64 old_spte, u64 new_spte, int level);
150 static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
152 return sp->role.smm ? 1 : 0;
155 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
157 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
159 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
162 if (is_accessed_spte(old_spte) &&
163 (!is_accessed_spte(new_spte) || pfn_changed))
164 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
167 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
168 u64 old_spte, u64 new_spte, int level)
171 struct kvm_memory_slot *slot;
173 if (level > PG_LEVEL_4K)
176 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
178 if ((!is_writable_pte(old_spte) || pfn_changed) &&
179 is_writable_pte(new_spte)) {
180 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
181 mark_page_dirty_in_slot(slot, gfn);
186 * handle_changed_spte - handle bookkeeping associated with an SPTE change
188 * @as_id: the address space of the paging structure the SPTE was a part of
189 * @gfn: the base GFN that was mapped by the SPTE
190 * @old_spte: The value of the SPTE before the change
191 * @new_spte: The value of the SPTE after the change
192 * @level: the level of the PT the SPTE is part of in the paging structure
194 * Handle bookkeeping that might result from the modification of a SPTE.
195 * This function must be called for all TDP SPTE modifications.
197 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
198 u64 old_spte, u64 new_spte, int level)
200 bool was_present = is_shadow_present_pte(old_spte);
201 bool is_present = is_shadow_present_pte(new_spte);
202 bool was_leaf = was_present && is_last_spte(old_spte, level);
203 bool is_leaf = is_present && is_last_spte(new_spte, level);
204 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
206 struct kvm_mmu_page *sp;
210 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
211 WARN_ON(level < PG_LEVEL_4K);
212 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
215 * If this warning were to trigger it would indicate that there was a
216 * missing MMU notifier or a race with some notifier handler.
217 * A present, leaf SPTE should never be directly replaced with another
218 * present leaf SPTE pointing to a differnt PFN. A notifier handler
219 * should be zapping the SPTE before the main MM's page table is
220 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
221 * thread before replacement.
223 if (was_leaf && is_leaf && pfn_changed) {
224 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
225 "SPTE with another present leaf SPTE mapping a\n"
227 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
228 as_id, gfn, old_spte, new_spte, level);
231 * Crash the host to prevent error propagation and guest data
237 if (old_spte == new_spte)
241 * The only times a SPTE should be changed from a non-present to
242 * non-present state is when an MMIO entry is installed/modified/
243 * removed. In that case, there is nothing to do here.
245 if (!was_present && !is_present) {
247 * If this change does not involve a MMIO SPTE, it is
248 * unexpected. Log the change, though it should not impact the
249 * guest since both the former and current SPTEs are nonpresent.
251 if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte)))
252 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
253 "should not be replaced with another,\n"
254 "different nonpresent SPTE, unless one or both\n"
256 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
257 as_id, gfn, old_spte, new_spte, level);
262 if (was_leaf && is_dirty_spte(old_spte) &&
263 (!is_dirty_spte(new_spte) || pfn_changed))
264 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
267 * Recursively handle child PTs if the change removed a subtree from
268 * the paging structure.
270 if (was_present && !was_leaf && (pfn_changed || !is_present)) {
271 pt = spte_to_child_pt(old_spte, level);
272 sp = sptep_to_sp(pt);
276 if (sp->lpage_disallowed)
277 unaccount_huge_nx_page(kvm, sp);
279 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
280 old_child_spte = READ_ONCE(*(pt + i));
281 WRITE_ONCE(*(pt + i), 0);
282 handle_changed_spte(kvm, as_id,
283 gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)),
284 old_child_spte, 0, level - 1);
287 kvm_flush_remote_tlbs_with_address(kvm, gfn,
288 KVM_PAGES_PER_HPAGE(level));
290 free_page((unsigned long)pt);
291 kmem_cache_free(mmu_page_header_cache, sp);
295 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
296 u64 old_spte, u64 new_spte, int level)
298 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level);
299 handle_changed_spte_acc_track(old_spte, new_spte, level);
300 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
304 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
305 u64 new_spte, bool record_acc_track,
306 bool record_dirty_log)
308 u64 *root_pt = tdp_iter_root_pt(iter);
309 struct kvm_mmu_page *root = sptep_to_sp(root_pt);
310 int as_id = kvm_mmu_page_as_id(root);
312 WRITE_ONCE(*iter->sptep, new_spte);
314 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
316 if (record_acc_track)
317 handle_changed_spte_acc_track(iter->old_spte, new_spte,
319 if (record_dirty_log)
320 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
321 iter->old_spte, new_spte,
325 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
328 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
331 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
332 struct tdp_iter *iter,
335 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
338 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
339 struct tdp_iter *iter,
342 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
345 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
346 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
348 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
349 tdp_root_for_each_pte(_iter, _root, _start, _end) \
350 if (!is_shadow_present_pte(_iter.old_spte) || \
351 !is_last_spte(_iter.old_spte, _iter.level)) \
355 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
356 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
357 _mmu->shadow_root_level, _start, _end)
360 * Flush the TLB if the process should drop kvm->mmu_lock.
361 * Return whether the caller still needs to flush the tlb.
363 static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
365 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
366 kvm_flush_remote_tlbs(kvm);
367 cond_resched_lock(&kvm->mmu_lock);
368 tdp_iter_refresh_walk(iter);
375 static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
377 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
378 cond_resched_lock(&kvm->mmu_lock);
379 tdp_iter_refresh_walk(iter);
384 * Tears down the mappings for the range of gfns, [start, end), and frees the
385 * non-root pages mapping GFNs strictly within that range. Returns true if
386 * SPTEs have been cleared and a TLB flush is needed before releasing the
388 * If can_yield is true, will release the MMU lock and reschedule if the
389 * scheduler needs the CPU or there is contention on the MMU lock. If this
390 * function cannot yield, it will not release the MMU lock or reschedule and
391 * the caller must ensure it does not supply too large a GFN range, or the
392 * operation can cause a soft lockup.
394 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
395 gfn_t start, gfn_t end, bool can_yield)
397 struct tdp_iter iter;
398 bool flush_needed = false;
400 tdp_root_for_each_pte(iter, root, start, end) {
401 if (!is_shadow_present_pte(iter.old_spte))
405 * If this is a non-last-level SPTE that covers a larger range
406 * than should be zapped, continue, and zap the mappings at a
409 if ((iter.gfn < start ||
410 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
411 !is_last_spte(iter.old_spte, iter.level))
414 tdp_mmu_set_spte(kvm, &iter, 0);
417 flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
425 * Tears down the mappings for the range of gfns, [start, end), and frees the
426 * non-root pages mapping GFNs strictly within that range. Returns true if
427 * SPTEs have been cleared and a TLB flush is needed before releasing the
430 bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
432 struct kvm_mmu_page *root;
435 for_each_tdp_mmu_root(kvm, root) {
437 * Take a reference on the root so that it cannot be freed if
438 * this thread releases the MMU lock and yields in this loop.
440 kvm_mmu_get_root(kvm, root);
442 flush |= zap_gfn_range(kvm, root, start, end, true);
444 kvm_mmu_put_root(kvm, root);
450 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
452 gfn_t max_gfn = 1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT);
455 flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
457 kvm_flush_remote_tlbs(kvm);
461 * Installs a last-level SPTE to handle a TDP page fault.
462 * (NPT/EPT violation/misconfiguration)
464 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
466 struct tdp_iter *iter,
467 kvm_pfn_t pfn, bool prefault)
471 int make_spte_ret = 0;
473 if (unlikely(is_noslot_pfn(pfn))) {
474 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
475 trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte);
477 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
478 pfn, iter->old_spte, prefault, true,
479 map_writable, !shadow_accessed_mask,
482 if (new_spte == iter->old_spte)
483 ret = RET_PF_SPURIOUS;
485 tdp_mmu_set_spte(vcpu->kvm, iter, new_spte);
488 * If the page fault was caused by a write but the page is write
489 * protected, emulation is needed. If the emulation was skipped,
490 * the vCPU would have the same fault again.
492 if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
494 ret = RET_PF_EMULATE;
495 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
498 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
499 if (unlikely(is_mmio_spte(new_spte)))
500 ret = RET_PF_EMULATE;
502 trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
504 vcpu->stat.pf_fixed++;
510 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
511 * page tables and SPTEs to translate the faulting guest physical address.
513 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
514 int map_writable, int max_level, kvm_pfn_t pfn,
517 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
518 bool write = error_code & PFERR_WRITE_MASK;
519 bool exec = error_code & PFERR_FETCH_MASK;
520 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
521 struct kvm_mmu *mmu = vcpu->arch.mmu;
522 struct tdp_iter iter;
523 struct kvm_mmu_page *sp;
527 gfn_t gfn = gpa >> PAGE_SHIFT;
531 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
533 if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
536 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
537 huge_page_disallowed, &req_level);
539 trace_kvm_mmu_spte_requested(gpa, level, pfn);
540 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
541 if (nx_huge_page_workaround_enabled)
542 disallowed_hugepage_adjust(iter.old_spte, gfn,
543 iter.level, &pfn, &level);
545 if (iter.level == level)
549 * If there is an SPTE mapping a large page at a higher level
550 * than the target, that SPTE must be cleared and replaced
551 * with a non-leaf SPTE.
553 if (is_shadow_present_pte(iter.old_spte) &&
554 is_large_pte(iter.old_spte)) {
555 tdp_mmu_set_spte(vcpu->kvm, &iter, 0);
557 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
558 KVM_PAGES_PER_HPAGE(iter.level));
561 * The iter must explicitly re-read the spte here
562 * because the new value informs the !present
565 iter.old_spte = READ_ONCE(*iter.sptep);
568 if (!is_shadow_present_pte(iter.old_spte)) {
569 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
570 list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
572 clear_page(child_pt);
573 new_spte = make_nonleaf_spte(child_pt,
574 !shadow_accessed_mask);
576 trace_kvm_mmu_get_page(sp, true);
577 if (huge_page_disallowed && req_level >= iter.level)
578 account_huge_nx_page(vcpu->kvm, sp);
580 tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
584 if (WARN_ON(iter.level != level))
587 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
593 static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
594 unsigned long end, unsigned long data,
595 int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot,
596 struct kvm_mmu_page *root, gfn_t start,
597 gfn_t end, unsigned long data))
599 struct kvm_memslots *slots;
600 struct kvm_memory_slot *memslot;
601 struct kvm_mmu_page *root;
605 for_each_tdp_mmu_root(kvm, root) {
607 * Take a reference on the root so that it cannot be freed if
608 * this thread releases the MMU lock and yields in this loop.
610 kvm_mmu_get_root(kvm, root);
612 as_id = kvm_mmu_page_as_id(root);
613 slots = __kvm_memslots(kvm, as_id);
614 kvm_for_each_memslot(memslot, slots) {
615 unsigned long hva_start, hva_end;
616 gfn_t gfn_start, gfn_end;
618 hva_start = max(start, memslot->userspace_addr);
619 hva_end = min(end, memslot->userspace_addr +
620 (memslot->npages << PAGE_SHIFT));
621 if (hva_start >= hva_end)
624 * {gfn(page) | page intersects with [hva_start, hva_end)} =
625 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
627 gfn_start = hva_to_gfn_memslot(hva_start, memslot);
628 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
630 ret |= handler(kvm, memslot, root, gfn_start,
634 kvm_mmu_put_root(kvm, root);
640 static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
641 struct kvm_memory_slot *slot,
642 struct kvm_mmu_page *root, gfn_t start,
643 gfn_t end, unsigned long unused)
645 return zap_gfn_range(kvm, root, start, end, false);
648 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
651 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
652 zap_gfn_range_hva_wrapper);
656 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
657 * if any of the GFNs in the range have been accessed.
659 static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
660 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
661 unsigned long unused)
663 struct tdp_iter iter;
667 tdp_root_for_each_leaf_pte(iter, root, start, end) {
669 * If we have a non-accessed entry we don't need to change the
672 if (!is_accessed_spte(iter.old_spte))
675 new_spte = iter.old_spte;
677 if (spte_ad_enabled(new_spte)) {
678 clear_bit((ffs(shadow_accessed_mask) - 1),
679 (unsigned long *)&new_spte);
682 * Capture the dirty status of the page, so that it doesn't get
683 * lost when the SPTE is marked for access tracking.
685 if (is_writable_pte(new_spte))
686 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
688 new_spte = mark_spte_for_access_track(new_spte);
690 new_spte &= ~shadow_dirty_mask;
692 tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
699 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
702 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
706 static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
707 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
708 unsigned long unused2)
710 struct tdp_iter iter;
712 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
713 if (is_accessed_spte(iter.old_spte))
719 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
721 return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
726 * Handle the changed_pte MMU notifier for the TDP MMU.
727 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
729 * Returns non-zero if a flush is needed before releasing the MMU lock.
731 static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
732 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
735 struct tdp_iter iter;
736 pte_t *ptep = (pte_t *)data;
741 WARN_ON(pte_huge(*ptep));
743 new_pfn = pte_pfn(*ptep);
745 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
746 if (iter.level != PG_LEVEL_4K)
749 if (!is_shadow_present_pte(iter.old_spte))
752 tdp_mmu_set_spte(kvm, &iter, 0);
754 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
756 if (!pte_write(*ptep)) {
757 new_spte = kvm_mmu_changed_pte_notifier_make_spte(
758 iter.old_spte, new_pfn);
760 tdp_mmu_set_spte(kvm, &iter, new_spte);
767 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
772 int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
775 return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
776 (unsigned long)host_ptep,
781 * Remove write access from all the SPTEs mapping GFNs [start, end). If
782 * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
783 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
785 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
786 gfn_t start, gfn_t end, int min_level)
788 struct tdp_iter iter;
790 bool spte_set = false;
792 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
794 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
795 min_level, start, end) {
796 if (!is_shadow_present_pte(iter.old_spte) ||
797 !is_last_spte(iter.old_spte, iter.level))
800 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
802 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
805 tdp_mmu_iter_cond_resched(kvm, &iter);
811 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
812 * only affect leaf SPTEs down to min_level.
813 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
815 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
818 struct kvm_mmu_page *root;
820 bool spte_set = false;
822 for_each_tdp_mmu_root(kvm, root) {
823 root_as_id = kvm_mmu_page_as_id(root);
824 if (root_as_id != slot->as_id)
828 * Take a reference on the root so that it cannot be freed if
829 * this thread releases the MMU lock and yields in this loop.
831 kvm_mmu_get_root(kvm, root);
833 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
834 slot->base_gfn + slot->npages, min_level);
836 kvm_mmu_put_root(kvm, root);
843 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
844 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
845 * If AD bits are not enabled, this will require clearing the writable bit on
846 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
849 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
850 gfn_t start, gfn_t end)
852 struct tdp_iter iter;
854 bool spte_set = false;
856 tdp_root_for_each_leaf_pte(iter, root, start, end) {
857 if (spte_ad_need_write_protect(iter.old_spte)) {
858 if (is_writable_pte(iter.old_spte))
859 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
863 if (iter.old_spte & shadow_dirty_mask)
864 new_spte = iter.old_spte & ~shadow_dirty_mask;
869 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
872 tdp_mmu_iter_cond_resched(kvm, &iter);
878 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
879 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
880 * If AD bits are not enabled, this will require clearing the writable bit on
881 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
884 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
886 struct kvm_mmu_page *root;
888 bool spte_set = false;
890 for_each_tdp_mmu_root(kvm, root) {
891 root_as_id = kvm_mmu_page_as_id(root);
892 if (root_as_id != slot->as_id)
896 * Take a reference on the root so that it cannot be freed if
897 * this thread releases the MMU lock and yields in this loop.
899 kvm_mmu_get_root(kvm, root);
901 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
902 slot->base_gfn + slot->npages);
904 kvm_mmu_put_root(kvm, root);
911 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
912 * set in mask, starting at gfn. The given memslot is expected to contain all
913 * the GFNs represented by set bits in the mask. If AD bits are enabled,
914 * clearing the dirty status will involve clearing the dirty bit on each SPTE
915 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
917 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
918 gfn_t gfn, unsigned long mask, bool wrprot)
920 struct tdp_iter iter;
923 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
924 gfn + BITS_PER_LONG) {
928 if (iter.level > PG_LEVEL_4K ||
929 !(mask & (1UL << (iter.gfn - gfn))))
932 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
933 if (is_writable_pte(iter.old_spte))
934 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
938 if (iter.old_spte & shadow_dirty_mask)
939 new_spte = iter.old_spte & ~shadow_dirty_mask;
944 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
946 mask &= ~(1UL << (iter.gfn - gfn));
951 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
952 * set in mask, starting at gfn. The given memslot is expected to contain all
953 * the GFNs represented by set bits in the mask. If AD bits are enabled,
954 * clearing the dirty status will involve clearing the dirty bit on each SPTE
955 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
957 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
958 struct kvm_memory_slot *slot,
959 gfn_t gfn, unsigned long mask,
962 struct kvm_mmu_page *root;
965 lockdep_assert_held(&kvm->mmu_lock);
966 for_each_tdp_mmu_root(kvm, root) {
967 root_as_id = kvm_mmu_page_as_id(root);
968 if (root_as_id != slot->as_id)
971 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
976 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
977 * only used for PML, and so will involve setting the dirty bit on each SPTE.
978 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
980 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
981 gfn_t start, gfn_t end)
983 struct tdp_iter iter;
985 bool spte_set = false;
987 tdp_root_for_each_pte(iter, root, start, end) {
988 if (!is_shadow_present_pte(iter.old_spte))
991 new_spte = iter.old_spte | shadow_dirty_mask;
993 tdp_mmu_set_spte(kvm, &iter, new_spte);
996 tdp_mmu_iter_cond_resched(kvm, &iter);
1003 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
1004 * only used for PML, and so will involve setting the dirty bit on each SPTE.
1005 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1007 bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
1009 struct kvm_mmu_page *root;
1011 bool spte_set = false;
1013 for_each_tdp_mmu_root(kvm, root) {
1014 root_as_id = kvm_mmu_page_as_id(root);
1015 if (root_as_id != slot->as_id)
1019 * Take a reference on the root so that it cannot be freed if
1020 * this thread releases the MMU lock and yields in this loop.
1022 kvm_mmu_get_root(kvm, root);
1024 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
1025 slot->base_gfn + slot->npages);
1027 kvm_mmu_put_root(kvm, root);
1033 * Clear non-leaf entries (and free associated page tables) which could
1034 * be replaced by large mappings, for GFNs within the slot.
1036 static void zap_collapsible_spte_range(struct kvm *kvm,
1037 struct kvm_mmu_page *root,
1038 gfn_t start, gfn_t end)
1040 struct tdp_iter iter;
1042 bool spte_set = false;
1044 tdp_root_for_each_pte(iter, root, start, end) {
1045 if (!is_shadow_present_pte(iter.old_spte) ||
1046 is_last_spte(iter.old_spte, iter.level))
1049 pfn = spte_to_pfn(iter.old_spte);
1050 if (kvm_is_reserved_pfn(pfn) ||
1051 !PageTransCompoundMap(pfn_to_page(pfn)))
1054 tdp_mmu_set_spte(kvm, &iter, 0);
1056 spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
1060 kvm_flush_remote_tlbs(kvm);
1064 * Clear non-leaf entries (and free associated page tables) which could
1065 * be replaced by large mappings, for GFNs within the slot.
1067 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1068 const struct kvm_memory_slot *slot)
1070 struct kvm_mmu_page *root;
1073 for_each_tdp_mmu_root(kvm, root) {
1074 root_as_id = kvm_mmu_page_as_id(root);
1075 if (root_as_id != slot->as_id)
1079 * Take a reference on the root so that it cannot be freed if
1080 * this thread releases the MMU lock and yields in this loop.
1082 kvm_mmu_get_root(kvm, root);
1084 zap_collapsible_spte_range(kvm, root, slot->base_gfn,
1085 slot->base_gfn + slot->npages);
1087 kvm_mmu_put_root(kvm, root);
1092 * Removes write access on the last level SPTE mapping this GFN and unsets the
1093 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1094 * Returns true if an SPTE was set and a TLB flush is needed.
1096 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1099 struct tdp_iter iter;
1101 bool spte_set = false;
1103 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1104 if (!is_writable_pte(iter.old_spte))
1107 new_spte = iter.old_spte &
1108 ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
1110 tdp_mmu_set_spte(kvm, &iter, new_spte);
1118 * Removes write access on the last level SPTE mapping this GFN and unsets the
1119 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1120 * Returns true if an SPTE was set and a TLB flush is needed.
1122 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1123 struct kvm_memory_slot *slot, gfn_t gfn)
1125 struct kvm_mmu_page *root;
1127 bool spte_set = false;
1129 lockdep_assert_held(&kvm->mmu_lock);
1130 for_each_tdp_mmu_root(kvm, root) {
1131 root_as_id = kvm_mmu_page_as_id(root);
1132 if (root_as_id != slot->as_id)
1135 spte_set |= write_protect_gfn(kvm, root, gfn);
1141 * Return the level of the lowest level SPTE added to sptes.
1142 * That SPTE may be non-present.
1144 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
1146 struct tdp_iter iter;
1147 struct kvm_mmu *mmu = vcpu->arch.mmu;
1148 int leaf = vcpu->arch.mmu->shadow_root_level;
1149 gfn_t gfn = addr >> PAGE_SHIFT;
1151 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1153 sptes[leaf - 1] = iter.old_spte;