1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
16 #define MMU_WARN_ON(x) WARN_ON(x)
18 #define pgprintk(x...) do { } while (0)
19 #define rmap_printk(x...) do { } while (0)
20 #define MMU_WARN_ON(x) do { } while (0)
24 struct list_head link;
25 struct hlist_node hash_link;
26 struct list_head lpage_disallowed_link;
31 bool lpage_disallowed; /* Can't be replaced by an equiv large page */
34 * The following two entries are used to key the shadow page in the
37 union kvm_mmu_page_role role;
41 /* hold the gfn of each spte inside spt */
43 int root_count; /* Currently serving as active root */
44 unsigned int unsync_children;
45 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
46 DECLARE_BITMAP(unsync_child_bitmap, 512);
50 * Used out of the mmu-lock to avoid reading spte values while an
51 * update is in progress; see the comments in __get_spte_lockless().
56 /* Number of writes since the last time traversal visited this page. */
57 atomic_t write_flooding_count;
62 extern struct kmem_cache *mmu_page_header_cache;
64 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
66 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
68 return (struct kvm_mmu_page *)page_private(page);
71 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
73 return to_shadow_page(__pa(sptep));
76 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
79 * When using the EPT page-modification log, the GPAs in the log
80 * would come from L2 rather than L1. Therefore, we need to rely
81 * on write protection to record dirty pages. This also bypasses
82 * PML, since writes now result in a vmexit.
84 return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
87 bool is_nx_huge_page_enabled(void);
88 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
91 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
92 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
93 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
94 struct kvm_memory_slot *slot, u64 gfn);
95 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
96 u64 start_gfn, u64 pages);
98 static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
100 BUG_ON(!sp->root_count);
101 lockdep_assert_held(&kvm->mmu_lock);
106 static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
108 lockdep_assert_held(&kvm->mmu_lock);
111 return !sp->root_count;
115 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
117 * RET_PF_RETRY: let CPU fault again on the address.
118 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
119 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
120 * RET_PF_FIXED: The faulting entry has been fixed.
121 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
131 /* Bits which may be returned by set_spte() */
132 #define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
133 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
134 #define SET_SPTE_SPURIOUS BIT(2)
136 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
137 int max_level, kvm_pfn_t *pfnp,
138 bool huge_page_disallowed, int *req_level);
139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
140 kvm_pfn_t *pfnp, int *goal_levelp);
142 bool is_nx_huge_page_enabled(void);
144 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
146 #endif /* __KVM_X86_MMU_INTERNAL_H */