1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_H
3 #define __KVM_X86_MMU_H
5 #include <linux/kvm_host.h>
6 #include "kvm_cache_regs.h"
10 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11 #define PT32_PT_BITS 10
12 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
14 #define PT_WRITABLE_SHIFT 1
15 #define PT_USER_SHIFT 2
17 #define PT_PRESENT_MASK (1ULL << 0)
18 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
20 #define PT_PWT_MASK (1ULL << 3)
21 #define PT_PCD_MASK (1ULL << 4)
22 #define PT_ACCESSED_SHIFT 5
23 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
24 #define PT_DIRTY_SHIFT 6
25 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
26 #define PT_PAGE_SIZE_SHIFT 7
27 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
28 #define PT_PAT_MASK (1ULL << 7)
29 #define PT_GLOBAL_MASK (1ULL << 8)
30 #define PT64_NX_SHIFT 63
31 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
33 #define PT_PAT_SHIFT 7
34 #define PT_DIR_PAT_SHIFT 12
35 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
37 #define PT32_DIR_PSE36_SIZE 4
38 #define PT32_DIR_PSE36_SHIFT 13
39 #define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
42 #define PT64_ROOT_5LEVEL 5
43 #define PT64_ROOT_4LEVEL 4
44 #define PT32_ROOT_LEVEL 2
45 #define PT32E_ROOT_LEVEL 3
47 #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
48 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
50 #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
52 static __always_inline u64 rsvd_bits(int s, int e)
54 BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
56 if (__builtin_constant_p(e))
64 return ((2ULL << (e - s)) - 1) << s;
67 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
68 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
70 void kvm_init_mmu(struct kvm_vcpu *vcpu);
71 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
72 unsigned long cr4, u64 efer, gpa_t nested_cr3);
73 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
74 int huge_page_level, bool accessed_dirty,
76 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
77 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
78 u64 fault_address, char *insn, int insn_len);
80 int kvm_mmu_load(struct kvm_vcpu *vcpu);
81 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
82 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
83 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
85 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
87 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
90 return kvm_mmu_load(vcpu);
93 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
95 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
97 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
98 ? cr3 & X86_CR3_PCID_MASK
102 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
104 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
107 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
109 u64 root_hpa = vcpu->arch.mmu->root_hpa;
111 if (!VALID_PAGE(root_hpa))
114 static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
115 vcpu->arch.mmu->shadow_root_level);
118 struct kvm_page_fault {
119 /* arguments to kvm_mmu_do_page_fault. */
121 const u32 error_code;
124 /* Derived from error_code. */
131 /* Derived from mmu and global state. */
133 const bool nx_huge_page_workaround_enabled;
136 * Whether a >4KB mapping can be created or is forbidden due to NX
139 bool huge_page_disallowed;
142 * Maximum page size that can be created for this fault; input to
143 * FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
148 * Page size that can be created based on the max_level and the
149 * page size used by the host mapping.
154 * Page size that will be created based on the req_level and
155 * huge_page_disallowed.
159 /* Shifted addr, or result of guest page table walk if addr is a gva. */
162 /* The memslot containing gfn. May be NULL. */
163 struct kvm_memory_slot *slot;
165 /* Outputs of kvm_faultin_pfn. */
171 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
173 extern int nx_huge_pages;
174 static inline bool is_nx_huge_page_enabled(void)
176 return READ_ONCE(nx_huge_pages);
179 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
180 u32 err, bool prefetch)
182 struct kvm_page_fault fault = {
185 .exec = err & PFERR_FETCH_MASK,
186 .write = err & PFERR_WRITE_MASK,
187 .present = err & PFERR_PRESENT_MASK,
188 .rsvd = err & PFERR_RSVD_MASK,
189 .user = err & PFERR_USER_MASK,
190 .prefetch = prefetch,
191 .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
192 .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
194 .max_level = KVM_MAX_HUGEPAGE_LEVEL,
195 .req_level = PG_LEVEL_4K,
196 .goal_level = PG_LEVEL_4K,
198 #ifdef CONFIG_RETPOLINE
200 return kvm_tdp_page_fault(vcpu, &fault);
202 return vcpu->arch.mmu->page_fault(vcpu, &fault);
206 * Currently, we have two sorts of write-protection, a) the first one
207 * write-protects guest page to sync the guest modification, b) another one is
208 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
209 * between these two sorts are:
210 * 1) the first case clears MMU-writable bit.
211 * 2) the first case requires flushing tlb immediately avoiding corrupting
212 * shadow page table between all vcpus so it should be in the protection of
213 * mmu-lock. And the another case does not need to flush tlb until returning
214 * the dirty bitmap to userspace since it only write-protects the page
215 * logged in the bitmap, that means the page in the dirty bitmap is not
216 * missed, so it can flush tlb out of mmu-lock.
218 * So, there is the problem: the first case can meet the corrupted tlb caused
219 * by another case which write-protects pages but without flush tlb
220 * immediately. In order to making the first case be aware this problem we let
221 * it flush tlb if we try to write-protect a spte whose MMU-writable bit
222 * is set, it works since another case never touches MMU-writable bit.
224 * Anyway, whenever a spte is updated (only permission and status bits are
225 * changed) we need to check whether the spte with MMU-writable becomes
226 * readonly, if that happens, we need to flush tlb. Fortunately,
227 * mmu_spte_update() has already handled it perfectly.
229 * The rules to use MMU-writable and PT_WRITABLE_MASK:
230 * - if we want to see if it has writable tlb entry or if the spte can be
231 * writable on the mmu mapping, check MMU-writable, this is the most
233 * - if we fix page fault on the spte or do write-protection by dirty logging,
234 * check PT_WRITABLE_MASK.
236 * TODO: introduce APIs to split these two cases.
238 static inline bool is_writable_pte(unsigned long pte)
240 return pte & PT_WRITABLE_MASK;
244 * Check if a given access (described through the I/D, W/R and U/S bits of a
245 * page fault error code pfec) causes a permission fault with the given PTE
246 * access rights (in ACC_* format).
248 * Return zero if the access does not fault; return the page fault error code
249 * if the access faults.
251 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
252 unsigned pte_access, unsigned pte_pkey,
255 int cpl = static_call(kvm_x86_get_cpl)(vcpu);
256 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
259 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
261 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
262 * (these are implicit supervisor accesses) regardless of the value
265 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
266 * the result in X86_EFLAGS_AC. We then insert it in place of
267 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
268 * but it will be one in index if SMAP checks are being overridden.
269 * It is important to keep this branchless.
271 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
272 int index = (pfec >> 1) +
273 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
274 bool fault = (mmu->permissions[index] >> pte_access) & 1;
275 u32 errcode = PFERR_PRESENT_MASK;
277 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
278 if (unlikely(mmu->pkru_mask)) {
279 u32 pkru_bits, offset;
282 * PKRU defines 32 bits, there are 16 domains and 2
283 * attribute bits per domain in pkru. pte_pkey is the
284 * index of the protection domain, so pte_pkey * 2 is
285 * is the index of the first bit for the domain.
287 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
289 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
290 offset = (pfec & ~1) +
291 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
293 pkru_bits &= mmu->pkru_mask >> offset;
294 errcode |= -pkru_bits & PFERR_PK_MASK;
295 fault |= (pkru_bits != 0);
298 return -(u32)fault & errcode;
301 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
303 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
305 int kvm_mmu_post_init_vm(struct kvm *kvm);
306 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
308 static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
311 * Read shadow_root_allocated before related pointers. Hence, threads
312 * reading shadow_root_allocated in any lock context are guaranteed to
313 * see the pointers. Pairs with smp_store_release in
314 * mmu_first_shadow_root_alloc.
316 return smp_load_acquire(&kvm->arch.shadow_root_allocated);
320 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
322 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
325 static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
327 return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
330 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
332 /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
333 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
334 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
337 static inline unsigned long
338 __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
341 return gfn_to_index(slot->base_gfn + npages - 1,
342 slot->base_gfn, level) + 1;
345 static inline unsigned long
346 kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
348 return __kvm_mmu_slot_lpages(slot, slot->npages, level);
351 static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
353 atomic64_add(count, &kvm->stat.pages[level - 1]);
356 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
357 struct x86_exception *exception);
359 static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
361 gpa_t gpa, u32 access,
362 struct x86_exception *exception)
364 if (mmu != &vcpu->arch.nested_mmu)
366 return translate_nested_gpa(vcpu, gpa, access, exception);