1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15 #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
16 #define MMU_WARN_ON(x) WARN_ON(x)
18 #define pgprintk(x...) do { } while (0)
19 #define rmap_printk(x...) do { } while (0)
20 #define MMU_WARN_ON(x) do { } while (0)
24 * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
25 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
26 * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
27 * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
28 * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
30 #define INVALID_PAE_ROOT 0
31 #define IS_VALID_PAE_ROOT(x) (!!(x))
33 typedef u64 __rcu *tdp_ptep_t;
37 * Note, "link" through "spt" fit in a single 64 byte cache line on
38 * 64-bit kernels, keep it that way unless there's a reason not to.
40 struct list_head link;
41 struct hlist_node hash_link;
46 bool lpage_disallowed; /* Can't be replaced by an equiv large page */
49 * The following two entries are used to key the shadow page in the
52 union kvm_mmu_page_role role;
56 /* hold the gfn of each spte inside spt */
58 /* Currently serving as active root */
61 refcount_t tdp_mmu_root_count;
63 unsigned int unsync_children;
65 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
69 DECLARE_BITMAP(unsync_child_bitmap, 512);
71 struct work_struct tdp_mmu_async_work;
72 void *tdp_mmu_async_data;
76 struct list_head lpage_disallowed_link;
79 * Used out of the mmu-lock to avoid reading spte values while an
80 * update is in progress; see the comments in __get_spte_lockless().
85 /* Number of writes since the last time traversal visited this page. */
86 atomic_t write_flooding_count;
89 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
90 struct rcu_head rcu_head;
94 extern struct kmem_cache *mmu_page_header_cache;
96 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
98 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
100 return (struct kvm_mmu_page *)page_private(page);
103 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
105 return to_shadow_page(__pa(sptep));
108 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
110 return role.smm ? 1 : 0;
113 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
115 return kvm_mmu_role_as_id(sp->role);
118 static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
121 * When using the EPT page-modification log, the GPAs in the CPU dirty
122 * log would come from L2 rather than L1. Therefore, we need to rely
123 * on write protection to record dirty pages, which bypasses PML, since
124 * writes now result in a vmexit. Note, the check on CPU dirty logging
125 * being enabled is mandatory as the bits used to denote WP-only SPTEs
126 * are reserved for PAE paging (32-bit KVM).
128 return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
131 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
132 gfn_t gfn, bool can_unsync, bool prefetch);
134 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
135 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
136 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
137 struct kvm_memory_slot *slot, u64 gfn,
139 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
140 u64 start_gfn, u64 pages);
141 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
144 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
146 * RET_PF_RETRY: let CPU fault again on the address.
147 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
148 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
149 * RET_PF_FIXED: The faulting entry has been fixed.
150 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
152 * Any names added to this enum should be exported to userspace for use in
153 * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
163 int kvm_mmu_max_mapping_level(struct kvm *kvm,
164 const struct kvm_memory_slot *slot, gfn_t gfn,
165 kvm_pfn_t pfn, int max_level);
166 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
167 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
169 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
171 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
172 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
174 #endif /* __KVM_X86_MMU_INTERNAL_H */