6ba7c60bd4f88b67aebf4a97c7b3718e90fe9bbc
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/set_memory.h>
52 #include <asm/vmx.h>
53 #include <asm/kvm_page_track.h>
54 #include "trace.h"
55
56 #include "paging.h"
57
58 extern bool itlb_multihit_kvm_mitigation;
59
60 int __read_mostly nx_huge_pages = -1;
61 #ifdef CONFIG_PREEMPT_RT
62 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
63 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
64 #else
65 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
66 #endif
67
68 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
69 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
70
71 static const struct kernel_param_ops nx_huge_pages_ops = {
72         .set = set_nx_huge_pages,
73         .get = param_get_bool,
74 };
75
76 static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
77         .set = set_nx_huge_pages_recovery_ratio,
78         .get = param_get_uint,
79 };
80
81 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
82 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
83 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
84                 &nx_huge_pages_recovery_ratio, 0644);
85 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
86
87 static bool __read_mostly force_flush_and_sync_on_reuse;
88 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
89
90 /*
91  * When setting this variable to true it enables Two-Dimensional-Paging
92  * where the hardware walks 2 page tables:
93  * 1. the guest-virtual to guest-physical
94  * 2. while doing 1. it walks guest-physical to host-physical
95  * If the hardware supports that we don't need to do shadow paging.
96  */
97 bool tdp_enabled = false;
98
99 static int max_huge_page_level __read_mostly;
100 static int tdp_root_level __read_mostly;
101 static int max_tdp_level __read_mostly;
102
103 enum {
104         AUDIT_PRE_PAGE_FAULT,
105         AUDIT_POST_PAGE_FAULT,
106         AUDIT_PRE_PTE_WRITE,
107         AUDIT_POST_PTE_WRITE,
108         AUDIT_PRE_SYNC,
109         AUDIT_POST_SYNC
110 };
111
112 #ifdef MMU_DEBUG
113 bool dbg = 0;
114 module_param(dbg, bool, 0644);
115 #endif
116
117 #define PTE_PREFETCH_NUM                8
118
119 #define PT32_LEVEL_BITS 10
120
121 #define PT32_LEVEL_SHIFT(level) \
122                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123
124 #define PT32_LVL_OFFSET_MASK(level) \
125         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
126                                                 * PT32_LEVEL_BITS))) - 1))
127
128 #define PT32_INDEX(address, level)\
129         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130
131
132 #define PT32_BASE_ADDR_MASK PAGE_MASK
133 #define PT32_DIR_BASE_ADDR_MASK \
134         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135 #define PT32_LVL_ADDR_MASK(level) \
136         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
137                                             * PT32_LEVEL_BITS))) - 1))
138
139 #include <trace/events/kvm.h>
140
141 /* make pte_list_desc fit well in cache lines */
142 #define PTE_LIST_EXT 14
143
144 /*
145  * Slight optimization of cacheline layout, by putting `more' and `spte_count'
146  * at the start; then accessing it will only use one single cacheline for
147  * either full (entries==PTE_LIST_EXT) case or entries<=6.
148  */
149 struct pte_list_desc {
150         struct pte_list_desc *more;
151         /*
152          * Stores number of entries stored in the pte_list_desc.  No need to be
153          * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
154          */
155         u64 spte_count;
156         u64 *sptes[PTE_LIST_EXT];
157 };
158
159 struct kvm_shadow_walk_iterator {
160         u64 addr;
161         hpa_t shadow_addr;
162         u64 *sptep;
163         int level;
164         unsigned index;
165 };
166
167 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
168         for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
169                                          (_root), (_addr));                \
170              shadow_walk_okay(&(_walker));                                 \
171              shadow_walk_next(&(_walker)))
172
173 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
174         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
175              shadow_walk_okay(&(_walker));                      \
176              shadow_walk_next(&(_walker)))
177
178 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)     \
179         for (shadow_walk_init(&(_walker), _vcpu, _addr);                \
180              shadow_walk_okay(&(_walker)) &&                            \
181                 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });  \
182              __shadow_walk_next(&(_walker), spte))
183
184 static struct kmem_cache *pte_list_desc_cache;
185 struct kmem_cache *mmu_page_header_cache;
186 static struct percpu_counter kvm_total_used_mmu_pages;
187
188 static void mmu_spte_set(u64 *sptep, u64 spte);
189 static union kvm_mmu_page_role
190 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
191
192 struct kvm_mmu_role_regs {
193         const unsigned long cr0;
194         const unsigned long cr4;
195         const u64 efer;
196 };
197
198 #define CREATE_TRACE_POINTS
199 #include "mmutrace.h"
200
201 /*
202  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
203  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
204  * the single source of truth for the MMU's state.
205  */
206 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)                   \
207 static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
208 {                                                                       \
209         return !!(regs->reg & flag);                                    \
210 }
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
216 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
217 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
218 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
219 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
220 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
221
222 /*
223  * The MMU itself (with a valid role) is the single source of truth for the
224  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
225  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
226  * and the vCPU may be incorrect/irrelevant.
227  */
228 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)         \
229 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)        \
230 {                                                               \
231         return !!(mmu->mmu_role. base_or_ext . reg##_##name);   \
232 }
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
234 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
235 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
236 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
237 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
238 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
239 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
240 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
241 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
242
243 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
244 {
245         struct kvm_mmu_role_regs regs = {
246                 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
247                 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
248                 .efer = vcpu->arch.efer,
249         };
250
251         return regs;
252 }
253
254 static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
255 {
256         if (!____is_cr0_pg(regs))
257                 return 0;
258         else if (____is_efer_lma(regs))
259                 return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
260                                                PT64_ROOT_4LEVEL;
261         else if (____is_cr4_pae(regs))
262                 return PT32E_ROOT_LEVEL;
263         else
264                 return PT32_ROOT_LEVEL;
265 }
266
267 static inline bool kvm_available_flush_tlb_with_range(void)
268 {
269         return kvm_x86_ops.tlb_remote_flush_with_range;
270 }
271
272 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
273                 struct kvm_tlb_range *range)
274 {
275         int ret = -ENOTSUPP;
276
277         if (range && kvm_x86_ops.tlb_remote_flush_with_range)
278                 ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
279
280         if (ret)
281                 kvm_flush_remote_tlbs(kvm);
282 }
283
284 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
285                 u64 start_gfn, u64 pages)
286 {
287         struct kvm_tlb_range range;
288
289         range.start_gfn = start_gfn;
290         range.pages = pages;
291
292         kvm_flush_remote_tlbs_with_range(kvm, &range);
293 }
294
295 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
296                            unsigned int access)
297 {
298         u64 spte = make_mmio_spte(vcpu, gfn, access);
299
300         trace_mark_mmio_spte(sptep, gfn, spte);
301         mmu_spte_set(sptep, spte);
302 }
303
304 static gfn_t get_mmio_spte_gfn(u64 spte)
305 {
306         u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
307
308         gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
309                & shadow_nonpresent_or_rsvd_mask;
310
311         return gpa >> PAGE_SHIFT;
312 }
313
314 static unsigned get_mmio_spte_access(u64 spte)
315 {
316         return spte & shadow_mmio_access_mask;
317 }
318
319 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
320 {
321         u64 kvm_gen, spte_gen, gen;
322
323         gen = kvm_vcpu_memslots(vcpu)->generation;
324         if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
325                 return false;
326
327         kvm_gen = gen & MMIO_SPTE_GEN_MASK;
328         spte_gen = get_mmio_spte_generation(spte);
329
330         trace_check_mmio_spte(spte, kvm_gen, spte_gen);
331         return likely(kvm_gen == spte_gen);
332 }
333
334 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
335                                   struct x86_exception *exception)
336 {
337         return gpa;
338 }
339
340 static int is_cpuid_PSE36(void)
341 {
342         return 1;
343 }
344
345 static gfn_t pse36_gfn_delta(u32 gpte)
346 {
347         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
348
349         return (gpte & PT32_DIR_PSE36_MASK) << shift;
350 }
351
352 #ifdef CONFIG_X86_64
353 static void __set_spte(u64 *sptep, u64 spte)
354 {
355         WRITE_ONCE(*sptep, spte);
356 }
357
358 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
359 {
360         WRITE_ONCE(*sptep, spte);
361 }
362
363 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
364 {
365         return xchg(sptep, spte);
366 }
367
368 static u64 __get_spte_lockless(u64 *sptep)
369 {
370         return READ_ONCE(*sptep);
371 }
372 #else
373 union split_spte {
374         struct {
375                 u32 spte_low;
376                 u32 spte_high;
377         };
378         u64 spte;
379 };
380
381 static void count_spte_clear(u64 *sptep, u64 spte)
382 {
383         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
384
385         if (is_shadow_present_pte(spte))
386                 return;
387
388         /* Ensure the spte is completely set before we increase the count */
389         smp_wmb();
390         sp->clear_spte_count++;
391 }
392
393 static void __set_spte(u64 *sptep, u64 spte)
394 {
395         union split_spte *ssptep, sspte;
396
397         ssptep = (union split_spte *)sptep;
398         sspte = (union split_spte)spte;
399
400         ssptep->spte_high = sspte.spte_high;
401
402         /*
403          * If we map the spte from nonpresent to present, We should store
404          * the high bits firstly, then set present bit, so cpu can not
405          * fetch this spte while we are setting the spte.
406          */
407         smp_wmb();
408
409         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
410 }
411
412 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
413 {
414         union split_spte *ssptep, sspte;
415
416         ssptep = (union split_spte *)sptep;
417         sspte = (union split_spte)spte;
418
419         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
420
421         /*
422          * If we map the spte from present to nonpresent, we should clear
423          * present bit firstly to avoid vcpu fetch the old high bits.
424          */
425         smp_wmb();
426
427         ssptep->spte_high = sspte.spte_high;
428         count_spte_clear(sptep, spte);
429 }
430
431 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
432 {
433         union split_spte *ssptep, sspte, orig;
434
435         ssptep = (union split_spte *)sptep;
436         sspte = (union split_spte)spte;
437
438         /* xchg acts as a barrier before the setting of the high bits */
439         orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
440         orig.spte_high = ssptep->spte_high;
441         ssptep->spte_high = sspte.spte_high;
442         count_spte_clear(sptep, spte);
443
444         return orig.spte;
445 }
446
447 /*
448  * The idea using the light way get the spte on x86_32 guest is from
449  * gup_get_pte (mm/gup.c).
450  *
451  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
452  * coalesces them and we are running out of the MMU lock.  Therefore
453  * we need to protect against in-progress updates of the spte.
454  *
455  * Reading the spte while an update is in progress may get the old value
456  * for the high part of the spte.  The race is fine for a present->non-present
457  * change (because the high part of the spte is ignored for non-present spte),
458  * but for a present->present change we must reread the spte.
459  *
460  * All such changes are done in two steps (present->non-present and
461  * non-present->present), hence it is enough to count the number of
462  * present->non-present updates: if it changed while reading the spte,
463  * we might have hit the race.  This is done using clear_spte_count.
464  */
465 static u64 __get_spte_lockless(u64 *sptep)
466 {
467         struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
468         union split_spte spte, *orig = (union split_spte *)sptep;
469         int count;
470
471 retry:
472         count = sp->clear_spte_count;
473         smp_rmb();
474
475         spte.spte_low = orig->spte_low;
476         smp_rmb();
477
478         spte.spte_high = orig->spte_high;
479         smp_rmb();
480
481         if (unlikely(spte.spte_low != orig->spte_low ||
482               count != sp->clear_spte_count))
483                 goto retry;
484
485         return spte.spte;
486 }
487 #endif
488
489 static bool spte_has_volatile_bits(u64 spte)
490 {
491         if (!is_shadow_present_pte(spte))
492                 return false;
493
494         /*
495          * Always atomically update spte if it can be updated
496          * out of mmu-lock, it can ensure dirty bit is not lost,
497          * also, it can help us to get a stable is_writable_pte()
498          * to ensure tlb flush is not missed.
499          */
500         if (spte_can_locklessly_be_made_writable(spte) ||
501             is_access_track_spte(spte))
502                 return true;
503
504         if (spte_ad_enabled(spte)) {
505                 if ((spte & shadow_accessed_mask) == 0 ||
506                     (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
507                         return true;
508         }
509
510         return false;
511 }
512
513 /* Rules for using mmu_spte_set:
514  * Set the sptep from nonpresent to present.
515  * Note: the sptep being assigned *must* be either not present
516  * or in a state where the hardware will not attempt to update
517  * the spte.
518  */
519 static void mmu_spte_set(u64 *sptep, u64 new_spte)
520 {
521         WARN_ON(is_shadow_present_pte(*sptep));
522         __set_spte(sptep, new_spte);
523 }
524
525 /*
526  * Update the SPTE (excluding the PFN), but do not track changes in its
527  * accessed/dirty status.
528  */
529 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
530 {
531         u64 old_spte = *sptep;
532
533         WARN_ON(!is_shadow_present_pte(new_spte));
534
535         if (!is_shadow_present_pte(old_spte)) {
536                 mmu_spte_set(sptep, new_spte);
537                 return old_spte;
538         }
539
540         if (!spte_has_volatile_bits(old_spte))
541                 __update_clear_spte_fast(sptep, new_spte);
542         else
543                 old_spte = __update_clear_spte_slow(sptep, new_spte);
544
545         WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
546
547         return old_spte;
548 }
549
550 /* Rules for using mmu_spte_update:
551  * Update the state bits, it means the mapped pfn is not changed.
552  *
553  * Whenever we overwrite a writable spte with a read-only one we
554  * should flush remote TLBs. Otherwise rmap_write_protect
555  * will find a read-only spte, even though the writable spte
556  * might be cached on a CPU's TLB, the return value indicates this
557  * case.
558  *
559  * Returns true if the TLB needs to be flushed
560  */
561 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
562 {
563         bool flush = false;
564         u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
565
566         if (!is_shadow_present_pte(old_spte))
567                 return false;
568
569         /*
570          * For the spte updated out of mmu-lock is safe, since
571          * we always atomically update it, see the comments in
572          * spte_has_volatile_bits().
573          */
574         if (spte_can_locklessly_be_made_writable(old_spte) &&
575               !is_writable_pte(new_spte))
576                 flush = true;
577
578         /*
579          * Flush TLB when accessed/dirty states are changed in the page tables,
580          * to guarantee consistency between TLB and page tables.
581          */
582
583         if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
584                 flush = true;
585                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
586         }
587
588         if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
589                 flush = true;
590                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
591         }
592
593         return flush;
594 }
595
596 /*
597  * Rules for using mmu_spte_clear_track_bits:
598  * It sets the sptep from present to nonpresent, and track the
599  * state bits, it is used to clear the last level sptep.
600  * Returns the old PTE.
601  */
602 static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
603 {
604         kvm_pfn_t pfn;
605         u64 old_spte = *sptep;
606         int level = sptep_to_sp(sptep)->role.level;
607
608         if (!spte_has_volatile_bits(old_spte))
609                 __update_clear_spte_fast(sptep, 0ull);
610         else
611                 old_spte = __update_clear_spte_slow(sptep, 0ull);
612
613         if (!is_shadow_present_pte(old_spte))
614                 return old_spte;
615
616         kvm_update_page_stats(kvm, level, -1);
617
618         pfn = spte_to_pfn(old_spte);
619
620         /*
621          * KVM does not hold the refcount of the page used by
622          * kvm mmu, before reclaiming the page, we should
623          * unmap it from mmu first.
624          */
625         WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
626
627         if (is_accessed_spte(old_spte))
628                 kvm_set_pfn_accessed(pfn);
629
630         if (is_dirty_spte(old_spte))
631                 kvm_set_pfn_dirty(pfn);
632
633         return old_spte;
634 }
635
636 /*
637  * Rules for using mmu_spte_clear_no_track:
638  * Directly clear spte without caring the state bits of sptep,
639  * it is used to set the upper level spte.
640  */
641 static void mmu_spte_clear_no_track(u64 *sptep)
642 {
643         __update_clear_spte_fast(sptep, 0ull);
644 }
645
646 static u64 mmu_spte_get_lockless(u64 *sptep)
647 {
648         return __get_spte_lockless(sptep);
649 }
650
651 /* Restore an acc-track PTE back to a regular PTE */
652 static u64 restore_acc_track_spte(u64 spte)
653 {
654         u64 new_spte = spte;
655         u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
656                          & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
657
658         WARN_ON_ONCE(spte_ad_enabled(spte));
659         WARN_ON_ONCE(!is_access_track_spte(spte));
660
661         new_spte &= ~shadow_acc_track_mask;
662         new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
663                       SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
664         new_spte |= saved_bits;
665
666         return new_spte;
667 }
668
669 /* Returns the Accessed status of the PTE and resets it at the same time. */
670 static bool mmu_spte_age(u64 *sptep)
671 {
672         u64 spte = mmu_spte_get_lockless(sptep);
673
674         if (!is_accessed_spte(spte))
675                 return false;
676
677         if (spte_ad_enabled(spte)) {
678                 clear_bit((ffs(shadow_accessed_mask) - 1),
679                           (unsigned long *)sptep);
680         } else {
681                 /*
682                  * Capture the dirty status of the page, so that it doesn't get
683                  * lost when the SPTE is marked for access tracking.
684                  */
685                 if (is_writable_pte(spte))
686                         kvm_set_pfn_dirty(spte_to_pfn(spte));
687
688                 spte = mark_spte_for_access_track(spte);
689                 mmu_spte_update_no_track(sptep, spte);
690         }
691
692         return true;
693 }
694
695 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
696 {
697         if (is_tdp_mmu(vcpu->arch.mmu)) {
698                 kvm_tdp_mmu_walk_lockless_begin();
699         } else {
700                 /*
701                  * Prevent page table teardown by making any free-er wait during
702                  * kvm_flush_remote_tlbs() IPI to all active vcpus.
703                  */
704                 local_irq_disable();
705
706                 /*
707                  * Make sure a following spte read is not reordered ahead of the write
708                  * to vcpu->mode.
709                  */
710                 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
711         }
712 }
713
714 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
715 {
716         if (is_tdp_mmu(vcpu->arch.mmu)) {
717                 kvm_tdp_mmu_walk_lockless_end();
718         } else {
719                 /*
720                  * Make sure the write to vcpu->mode is not reordered in front of
721                  * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
722                  * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
723                  */
724                 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
725                 local_irq_enable();
726         }
727 }
728
729 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
730 {
731         int r;
732
733         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
734         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
735                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
736         if (r)
737                 return r;
738         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
739                                        PT64_ROOT_MAX_LEVEL);
740         if (r)
741                 return r;
742         if (maybe_indirect) {
743                 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
744                                                PT64_ROOT_MAX_LEVEL);
745                 if (r)
746                         return r;
747         }
748         return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
749                                           PT64_ROOT_MAX_LEVEL);
750 }
751
752 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
753 {
754         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
755         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
756         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
757         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
758 }
759
760 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
761 {
762         return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
763 }
764
765 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
766 {
767         kmem_cache_free(pte_list_desc_cache, pte_list_desc);
768 }
769
770 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
771 {
772         if (!sp->role.direct)
773                 return sp->gfns[index];
774
775         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
776 }
777
778 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
779 {
780         if (!sp->role.direct) {
781                 sp->gfns[index] = gfn;
782                 return;
783         }
784
785         if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
786                 pr_err_ratelimited("gfn mismatch under direct page %llx "
787                                    "(expected %llx, got %llx)\n",
788                                    sp->gfn,
789                                    kvm_mmu_page_get_gfn(sp, index), gfn);
790 }
791
792 /*
793  * Return the pointer to the large page information for a given gfn,
794  * handling slots that are not large page aligned.
795  */
796 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
797                 const struct kvm_memory_slot *slot, int level)
798 {
799         unsigned long idx;
800
801         idx = gfn_to_index(gfn, slot->base_gfn, level);
802         return &slot->arch.lpage_info[level - 2][idx];
803 }
804
805 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806                                             gfn_t gfn, int count)
807 {
808         struct kvm_lpage_info *linfo;
809         int i;
810
811         for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812                 linfo = lpage_info_slot(gfn, slot, i);
813                 linfo->disallow_lpage += count;
814                 WARN_ON(linfo->disallow_lpage < 0);
815         }
816 }
817
818 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
819 {
820         update_gfn_disallow_lpage_count(slot, gfn, 1);
821 }
822
823 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
824 {
825         update_gfn_disallow_lpage_count(slot, gfn, -1);
826 }
827
828 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
829 {
830         struct kvm_memslots *slots;
831         struct kvm_memory_slot *slot;
832         gfn_t gfn;
833
834         kvm->arch.indirect_shadow_pages++;
835         gfn = sp->gfn;
836         slots = kvm_memslots_for_spte_role(kvm, sp->role);
837         slot = __gfn_to_memslot(slots, gfn);
838
839         /* the non-leaf shadow pages are keeping readonly. */
840         if (sp->role.level > PG_LEVEL_4K)
841                 return kvm_slot_page_track_add_page(kvm, slot, gfn,
842                                                     KVM_PAGE_TRACK_WRITE);
843
844         kvm_mmu_gfn_disallow_lpage(slot, gfn);
845 }
846
847 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
848 {
849         if (sp->lpage_disallowed)
850                 return;
851
852         ++kvm->stat.nx_lpage_splits;
853         list_add_tail(&sp->lpage_disallowed_link,
854                       &kvm->arch.lpage_disallowed_mmu_pages);
855         sp->lpage_disallowed = true;
856 }
857
858 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
859 {
860         struct kvm_memslots *slots;
861         struct kvm_memory_slot *slot;
862         gfn_t gfn;
863
864         kvm->arch.indirect_shadow_pages--;
865         gfn = sp->gfn;
866         slots = kvm_memslots_for_spte_role(kvm, sp->role);
867         slot = __gfn_to_memslot(slots, gfn);
868         if (sp->role.level > PG_LEVEL_4K)
869                 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
870                                                        KVM_PAGE_TRACK_WRITE);
871
872         kvm_mmu_gfn_allow_lpage(slot, gfn);
873 }
874
875 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
876 {
877         --kvm->stat.nx_lpage_splits;
878         sp->lpage_disallowed = false;
879         list_del(&sp->lpage_disallowed_link);
880 }
881
882 static struct kvm_memory_slot *
883 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
884                             bool no_dirty_log)
885 {
886         struct kvm_memory_slot *slot;
887
888         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
889         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
890                 return NULL;
891         if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
892                 return NULL;
893
894         return slot;
895 }
896
897 /*
898  * About rmap_head encoding:
899  *
900  * If the bit zero of rmap_head->val is clear, then it points to the only spte
901  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
902  * pte_list_desc containing more mappings.
903  */
904
905 /*
906  * Returns the number of pointers in the rmap chain, not counting the new one.
907  */
908 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
909                         struct kvm_rmap_head *rmap_head)
910 {
911         struct pte_list_desc *desc;
912         int count = 0;
913
914         if (!rmap_head->val) {
915                 rmap_printk("%p %llx 0->1\n", spte, *spte);
916                 rmap_head->val = (unsigned long)spte;
917         } else if (!(rmap_head->val & 1)) {
918                 rmap_printk("%p %llx 1->many\n", spte, *spte);
919                 desc = mmu_alloc_pte_list_desc(vcpu);
920                 desc->sptes[0] = (u64 *)rmap_head->val;
921                 desc->sptes[1] = spte;
922                 desc->spte_count = 2;
923                 rmap_head->val = (unsigned long)desc | 1;
924                 ++count;
925         } else {
926                 rmap_printk("%p %llx many->many\n", spte, *spte);
927                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
928                 while (desc->spte_count == PTE_LIST_EXT) {
929                         count += PTE_LIST_EXT;
930                         if (!desc->more) {
931                                 desc->more = mmu_alloc_pte_list_desc(vcpu);
932                                 desc = desc->more;
933                                 desc->spte_count = 0;
934                                 break;
935                         }
936                         desc = desc->more;
937                 }
938                 count += desc->spte_count;
939                 desc->sptes[desc->spte_count++] = spte;
940         }
941         return count;
942 }
943
944 static void
945 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
946                            struct pte_list_desc *desc, int i,
947                            struct pte_list_desc *prev_desc)
948 {
949         int j = desc->spte_count - 1;
950
951         desc->sptes[i] = desc->sptes[j];
952         desc->sptes[j] = NULL;
953         desc->spte_count--;
954         if (desc->spte_count)
955                 return;
956         if (!prev_desc && !desc->more)
957                 rmap_head->val = 0;
958         else
959                 if (prev_desc)
960                         prev_desc->more = desc->more;
961                 else
962                         rmap_head->val = (unsigned long)desc->more | 1;
963         mmu_free_pte_list_desc(desc);
964 }
965
966 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
967 {
968         struct pte_list_desc *desc;
969         struct pte_list_desc *prev_desc;
970         int i;
971
972         if (!rmap_head->val) {
973                 pr_err("%s: %p 0->BUG\n", __func__, spte);
974                 BUG();
975         } else if (!(rmap_head->val & 1)) {
976                 rmap_printk("%p 1->0\n", spte);
977                 if ((u64 *)rmap_head->val != spte) {
978                         pr_err("%s:  %p 1->BUG\n", __func__, spte);
979                         BUG();
980                 }
981                 rmap_head->val = 0;
982         } else {
983                 rmap_printk("%p many->many\n", spte);
984                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
985                 prev_desc = NULL;
986                 while (desc) {
987                         for (i = 0; i < desc->spte_count; ++i) {
988                                 if (desc->sptes[i] == spte) {
989                                         pte_list_desc_remove_entry(rmap_head,
990                                                         desc, i, prev_desc);
991                                         return;
992                                 }
993                         }
994                         prev_desc = desc;
995                         desc = desc->more;
996                 }
997                 pr_err("%s: %p many->many\n", __func__, spte);
998                 BUG();
999         }
1000 }
1001
1002 static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1003                             u64 *sptep)
1004 {
1005         mmu_spte_clear_track_bits(kvm, sptep);
1006         __pte_list_remove(sptep, rmap_head);
1007 }
1008
1009 /* Return true if rmap existed, false otherwise */
1010 static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1011 {
1012         struct pte_list_desc *desc, *next;
1013         int i;
1014
1015         if (!rmap_head->val)
1016                 return false;
1017
1018         if (!(rmap_head->val & 1)) {
1019                 mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1020                 goto out;
1021         }
1022
1023         desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1024
1025         for (; desc; desc = next) {
1026                 for (i = 0; i < desc->spte_count; i++)
1027                         mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1028                 next = desc->more;
1029                 mmu_free_pte_list_desc(desc);
1030         }
1031 out:
1032         /* rmap_head is meaningless now, remember to reset it */
1033         rmap_head->val = 0;
1034         return true;
1035 }
1036
1037 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1038 {
1039         struct pte_list_desc *desc;
1040         unsigned int count = 0;
1041
1042         if (!rmap_head->val)
1043                 return 0;
1044         else if (!(rmap_head->val & 1))
1045                 return 1;
1046
1047         desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1048
1049         while (desc) {
1050                 count += desc->spte_count;
1051                 desc = desc->more;
1052         }
1053
1054         return count;
1055 }
1056
1057 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1058                                          const struct kvm_memory_slot *slot)
1059 {
1060         unsigned long idx;
1061
1062         idx = gfn_to_index(gfn, slot->base_gfn, level);
1063         return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1064 }
1065
1066 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1067 {
1068         struct kvm_mmu_memory_cache *mc;
1069
1070         mc = &vcpu->arch.mmu_pte_list_desc_cache;
1071         return kvm_mmu_memory_cache_nr_free_objects(mc);
1072 }
1073
1074 static void rmap_remove(struct kvm *kvm, u64 *spte)
1075 {
1076         struct kvm_memslots *slots;
1077         struct kvm_memory_slot *slot;
1078         struct kvm_mmu_page *sp;
1079         gfn_t gfn;
1080         struct kvm_rmap_head *rmap_head;
1081
1082         sp = sptep_to_sp(spte);
1083         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1084
1085         /*
1086          * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1087          * so we have to determine which memslots to use based on context
1088          * information in sp->role.
1089          */
1090         slots = kvm_memslots_for_spte_role(kvm, sp->role);
1091
1092         slot = __gfn_to_memslot(slots, gfn);
1093         rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1094
1095         __pte_list_remove(spte, rmap_head);
1096 }
1097
1098 /*
1099  * Used by the following functions to iterate through the sptes linked by a
1100  * rmap.  All fields are private and not assumed to be used outside.
1101  */
1102 struct rmap_iterator {
1103         /* private fields */
1104         struct pte_list_desc *desc;     /* holds the sptep if not NULL */
1105         int pos;                        /* index of the sptep */
1106 };
1107
1108 /*
1109  * Iteration must be started by this function.  This should also be used after
1110  * removing/dropping sptes from the rmap link because in such cases the
1111  * information in the iterator may not be valid.
1112  *
1113  * Returns sptep if found, NULL otherwise.
1114  */
1115 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1116                            struct rmap_iterator *iter)
1117 {
1118         u64 *sptep;
1119
1120         if (!rmap_head->val)
1121                 return NULL;
1122
1123         if (!(rmap_head->val & 1)) {
1124                 iter->desc = NULL;
1125                 sptep = (u64 *)rmap_head->val;
1126                 goto out;
1127         }
1128
1129         iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1130         iter->pos = 0;
1131         sptep = iter->desc->sptes[iter->pos];
1132 out:
1133         BUG_ON(!is_shadow_present_pte(*sptep));
1134         return sptep;
1135 }
1136
1137 /*
1138  * Must be used with a valid iterator: e.g. after rmap_get_first().
1139  *
1140  * Returns sptep if found, NULL otherwise.
1141  */
1142 static u64 *rmap_get_next(struct rmap_iterator *iter)
1143 {
1144         u64 *sptep;
1145
1146         if (iter->desc) {
1147                 if (iter->pos < PTE_LIST_EXT - 1) {
1148                         ++iter->pos;
1149                         sptep = iter->desc->sptes[iter->pos];
1150                         if (sptep)
1151                                 goto out;
1152                 }
1153
1154                 iter->desc = iter->desc->more;
1155
1156                 if (iter->desc) {
1157                         iter->pos = 0;
1158                         /* desc->sptes[0] cannot be NULL */
1159                         sptep = iter->desc->sptes[iter->pos];
1160                         goto out;
1161                 }
1162         }
1163
1164         return NULL;
1165 out:
1166         BUG_ON(!is_shadow_present_pte(*sptep));
1167         return sptep;
1168 }
1169
1170 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)                 \
1171         for (_spte_ = rmap_get_first(_rmap_head_, _iter_);              \
1172              _spte_; _spte_ = rmap_get_next(_iter_))
1173
1174 static void drop_spte(struct kvm *kvm, u64 *sptep)
1175 {
1176         u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1177
1178         if (is_shadow_present_pte(old_spte))
1179                 rmap_remove(kvm, sptep);
1180 }
1181
1182
1183 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1184 {
1185         if (is_large_pte(*sptep)) {
1186                 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1187                 drop_spte(kvm, sptep);
1188                 return true;
1189         }
1190
1191         return false;
1192 }
1193
1194 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1195 {
1196         if (__drop_large_spte(vcpu->kvm, sptep)) {
1197                 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1198
1199                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1200                         KVM_PAGES_PER_HPAGE(sp->role.level));
1201         }
1202 }
1203
1204 /*
1205  * Write-protect on the specified @sptep, @pt_protect indicates whether
1206  * spte write-protection is caused by protecting shadow page table.
1207  *
1208  * Note: write protection is difference between dirty logging and spte
1209  * protection:
1210  * - for dirty logging, the spte can be set to writable at anytime if
1211  *   its dirty bitmap is properly set.
1212  * - for spte protection, the spte can be writable only after unsync-ing
1213  *   shadow page.
1214  *
1215  * Return true if tlb need be flushed.
1216  */
1217 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1218 {
1219         u64 spte = *sptep;
1220
1221         if (!is_writable_pte(spte) &&
1222               !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1223                 return false;
1224
1225         rmap_printk("spte %p %llx\n", sptep, *sptep);
1226
1227         if (pt_protect)
1228                 spte &= ~shadow_mmu_writable_mask;
1229         spte = spte & ~PT_WRITABLE_MASK;
1230
1231         return mmu_spte_update(sptep, spte);
1232 }
1233
1234 static bool __rmap_write_protect(struct kvm *kvm,
1235                                  struct kvm_rmap_head *rmap_head,
1236                                  bool pt_protect)
1237 {
1238         u64 *sptep;
1239         struct rmap_iterator iter;
1240         bool flush = false;
1241
1242         for_each_rmap_spte(rmap_head, &iter, sptep)
1243                 flush |= spte_write_protect(sptep, pt_protect);
1244
1245         return flush;
1246 }
1247
1248 static bool spte_clear_dirty(u64 *sptep)
1249 {
1250         u64 spte = *sptep;
1251
1252         rmap_printk("spte %p %llx\n", sptep, *sptep);
1253
1254         MMU_WARN_ON(!spte_ad_enabled(spte));
1255         spte &= ~shadow_dirty_mask;
1256         return mmu_spte_update(sptep, spte);
1257 }
1258
1259 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1260 {
1261         bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1262                                                (unsigned long *)sptep);
1263         if (was_writable && !spte_ad_enabled(*sptep))
1264                 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1265
1266         return was_writable;
1267 }
1268
1269 /*
1270  * Gets the GFN ready for another round of dirty logging by clearing the
1271  *      - D bit on ad-enabled SPTEs, and
1272  *      - W bit on ad-disabled SPTEs.
1273  * Returns true iff any D or W bits were cleared.
1274  */
1275 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1276                                const struct kvm_memory_slot *slot)
1277 {
1278         u64 *sptep;
1279         struct rmap_iterator iter;
1280         bool flush = false;
1281
1282         for_each_rmap_spte(rmap_head, &iter, sptep)
1283                 if (spte_ad_need_write_protect(*sptep))
1284                         flush |= spte_wrprot_for_clear_dirty(sptep);
1285                 else
1286                         flush |= spte_clear_dirty(sptep);
1287
1288         return flush;
1289 }
1290
1291 /**
1292  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1293  * @kvm: kvm instance
1294  * @slot: slot to protect
1295  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1296  * @mask: indicates which pages we should protect
1297  *
1298  * Used when we do not need to care about huge page mappings.
1299  */
1300 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1301                                      struct kvm_memory_slot *slot,
1302                                      gfn_t gfn_offset, unsigned long mask)
1303 {
1304         struct kvm_rmap_head *rmap_head;
1305
1306         if (is_tdp_mmu_enabled(kvm))
1307                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1308                                 slot->base_gfn + gfn_offset, mask, true);
1309
1310         if (!kvm_memslots_have_rmaps(kvm))
1311                 return;
1312
1313         while (mask) {
1314                 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1315                                         PG_LEVEL_4K, slot);
1316                 __rmap_write_protect(kvm, rmap_head, false);
1317
1318                 /* clear the first set bit */
1319                 mask &= mask - 1;
1320         }
1321 }
1322
1323 /**
1324  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1325  * protect the page if the D-bit isn't supported.
1326  * @kvm: kvm instance
1327  * @slot: slot to clear D-bit
1328  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1329  * @mask: indicates which pages we should clear D-bit
1330  *
1331  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1332  */
1333 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1334                                          struct kvm_memory_slot *slot,
1335                                          gfn_t gfn_offset, unsigned long mask)
1336 {
1337         struct kvm_rmap_head *rmap_head;
1338
1339         if (is_tdp_mmu_enabled(kvm))
1340                 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1341                                 slot->base_gfn + gfn_offset, mask, false);
1342
1343         if (!kvm_memslots_have_rmaps(kvm))
1344                 return;
1345
1346         while (mask) {
1347                 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1348                                         PG_LEVEL_4K, slot);
1349                 __rmap_clear_dirty(kvm, rmap_head, slot);
1350
1351                 /* clear the first set bit */
1352                 mask &= mask - 1;
1353         }
1354 }
1355
1356 /**
1357  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1358  * PT level pages.
1359  *
1360  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1361  * enable dirty logging for them.
1362  *
1363  * We need to care about huge page mappings: e.g. during dirty logging we may
1364  * have such mappings.
1365  */
1366 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1367                                 struct kvm_memory_slot *slot,
1368                                 gfn_t gfn_offset, unsigned long mask)
1369 {
1370         /*
1371          * Huge pages are NOT write protected when we start dirty logging in
1372          * initially-all-set mode; must write protect them here so that they
1373          * are split to 4K on the first write.
1374          *
1375          * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1376          * of memslot has no such restriction, so the range can cross two large
1377          * pages.
1378          */
1379         if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1380                 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1381                 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1382
1383                 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1384
1385                 /* Cross two large pages? */
1386                 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1387                     ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1388                         kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1389                                                        PG_LEVEL_2M);
1390         }
1391
1392         /* Now handle 4K PTEs.  */
1393         if (kvm_x86_ops.cpu_dirty_log_size)
1394                 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1395         else
1396                 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1397 }
1398
1399 int kvm_cpu_dirty_log_size(void)
1400 {
1401         return kvm_x86_ops.cpu_dirty_log_size;
1402 }
1403
1404 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1405                                     struct kvm_memory_slot *slot, u64 gfn,
1406                                     int min_level)
1407 {
1408         struct kvm_rmap_head *rmap_head;
1409         int i;
1410         bool write_protected = false;
1411
1412         if (kvm_memslots_have_rmaps(kvm)) {
1413                 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1414                         rmap_head = gfn_to_rmap(gfn, i, slot);
1415                         write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1416                 }
1417         }
1418
1419         if (is_tdp_mmu_enabled(kvm))
1420                 write_protected |=
1421                         kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1422
1423         return write_protected;
1424 }
1425
1426 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1427 {
1428         struct kvm_memory_slot *slot;
1429
1430         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1431         return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1432 }
1433
1434 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1435                           const struct kvm_memory_slot *slot)
1436 {
1437         return pte_list_destroy(kvm, rmap_head);
1438 }
1439
1440 static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1441                             struct kvm_memory_slot *slot, gfn_t gfn, int level,
1442                             pte_t unused)
1443 {
1444         return kvm_zap_rmapp(kvm, rmap_head, slot);
1445 }
1446
1447 static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1448                               struct kvm_memory_slot *slot, gfn_t gfn, int level,
1449                               pte_t pte)
1450 {
1451         u64 *sptep;
1452         struct rmap_iterator iter;
1453         int need_flush = 0;
1454         u64 new_spte;
1455         kvm_pfn_t new_pfn;
1456
1457         WARN_ON(pte_huge(pte));
1458         new_pfn = pte_pfn(pte);
1459
1460 restart:
1461         for_each_rmap_spte(rmap_head, &iter, sptep) {
1462                 rmap_printk("spte %p %llx gfn %llx (%d)\n",
1463                             sptep, *sptep, gfn, level);
1464
1465                 need_flush = 1;
1466
1467                 if (pte_write(pte)) {
1468                         pte_list_remove(kvm, rmap_head, sptep);
1469                         goto restart;
1470                 } else {
1471                         new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1472                                         *sptep, new_pfn);
1473
1474                         mmu_spte_clear_track_bits(kvm, sptep);
1475                         mmu_spte_set(sptep, new_spte);
1476                 }
1477         }
1478
1479         if (need_flush && kvm_available_flush_tlb_with_range()) {
1480                 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1481                 return 0;
1482         }
1483
1484         return need_flush;
1485 }
1486
1487 struct slot_rmap_walk_iterator {
1488         /* input fields. */
1489         const struct kvm_memory_slot *slot;
1490         gfn_t start_gfn;
1491         gfn_t end_gfn;
1492         int start_level;
1493         int end_level;
1494
1495         /* output fields. */
1496         gfn_t gfn;
1497         struct kvm_rmap_head *rmap;
1498         int level;
1499
1500         /* private field. */
1501         struct kvm_rmap_head *end_rmap;
1502 };
1503
1504 static void
1505 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1506 {
1507         iterator->level = level;
1508         iterator->gfn = iterator->start_gfn;
1509         iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1510         iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1511 }
1512
1513 static void
1514 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1515                     const struct kvm_memory_slot *slot, int start_level,
1516                     int end_level, gfn_t start_gfn, gfn_t end_gfn)
1517 {
1518         iterator->slot = slot;
1519         iterator->start_level = start_level;
1520         iterator->end_level = end_level;
1521         iterator->start_gfn = start_gfn;
1522         iterator->end_gfn = end_gfn;
1523
1524         rmap_walk_init_level(iterator, iterator->start_level);
1525 }
1526
1527 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1528 {
1529         return !!iterator->rmap;
1530 }
1531
1532 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1533 {
1534         if (++iterator->rmap <= iterator->end_rmap) {
1535                 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1536                 return;
1537         }
1538
1539         if (++iterator->level > iterator->end_level) {
1540                 iterator->rmap = NULL;
1541                 return;
1542         }
1543
1544         rmap_walk_init_level(iterator, iterator->level);
1545 }
1546
1547 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,    \
1548            _start_gfn, _end_gfn, _iter_)                                \
1549         for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,         \
1550                                  _end_level_, _start_gfn, _end_gfn);    \
1551              slot_rmap_walk_okay(_iter_);                               \
1552              slot_rmap_walk_next(_iter_))
1553
1554 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1555                                struct kvm_memory_slot *slot, gfn_t gfn,
1556                                int level, pte_t pte);
1557
1558 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1559                                                  struct kvm_gfn_range *range,
1560                                                  rmap_handler_t handler)
1561 {
1562         struct slot_rmap_walk_iterator iterator;
1563         bool ret = false;
1564
1565         for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1566                                  range->start, range->end - 1, &iterator)
1567                 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1568                                iterator.level, range->pte);
1569
1570         return ret;
1571 }
1572
1573 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1574 {
1575         bool flush = false;
1576
1577         if (kvm_memslots_have_rmaps(kvm))
1578                 flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1579
1580         if (is_tdp_mmu_enabled(kvm))
1581                 flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1582
1583         return flush;
1584 }
1585
1586 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1587 {
1588         bool flush = false;
1589
1590         if (kvm_memslots_have_rmaps(kvm))
1591                 flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1592
1593         if (is_tdp_mmu_enabled(kvm))
1594                 flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1595
1596         return flush;
1597 }
1598
1599 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1600                           struct kvm_memory_slot *slot, gfn_t gfn, int level,
1601                           pte_t unused)
1602 {
1603         u64 *sptep;
1604         struct rmap_iterator iter;
1605         int young = 0;
1606
1607         for_each_rmap_spte(rmap_head, &iter, sptep)
1608                 young |= mmu_spte_age(sptep);
1609
1610         return young;
1611 }
1612
1613 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1614                                struct kvm_memory_slot *slot, gfn_t gfn,
1615                                int level, pte_t unused)
1616 {
1617         u64 *sptep;
1618         struct rmap_iterator iter;
1619
1620         for_each_rmap_spte(rmap_head, &iter, sptep)
1621                 if (is_accessed_spte(*sptep))
1622                         return 1;
1623         return 0;
1624 }
1625
1626 #define RMAP_RECYCLE_THRESHOLD 1000
1627
1628 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1629 {
1630         struct kvm_memory_slot *slot;
1631         struct kvm_mmu_page *sp;
1632         struct kvm_rmap_head *rmap_head;
1633         int rmap_count;
1634
1635         sp = sptep_to_sp(spte);
1636         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1637         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1638         rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1639         rmap_count = pte_list_add(vcpu, spte, rmap_head);
1640
1641         if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1642                 kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1643                 kvm_flush_remote_tlbs_with_address(
1644                                 vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
1645         }
1646 }
1647
1648 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1649 {
1650         bool young = false;
1651
1652         if (kvm_memslots_have_rmaps(kvm))
1653                 young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1654
1655         if (is_tdp_mmu_enabled(kvm))
1656                 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1657
1658         return young;
1659 }
1660
1661 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1662 {
1663         bool young = false;
1664
1665         if (kvm_memslots_have_rmaps(kvm))
1666                 young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1667
1668         if (is_tdp_mmu_enabled(kvm))
1669                 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1670
1671         return young;
1672 }
1673
1674 #ifdef MMU_DEBUG
1675 static int is_empty_shadow_page(u64 *spt)
1676 {
1677         u64 *pos;
1678         u64 *end;
1679
1680         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1681                 if (is_shadow_present_pte(*pos)) {
1682                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1683                                pos, *pos);
1684                         return 0;
1685                 }
1686         return 1;
1687 }
1688 #endif
1689
1690 /*
1691  * This value is the sum of all of the kvm instances's
1692  * kvm->arch.n_used_mmu_pages values.  We need a global,
1693  * aggregate version in order to make the slab shrinker
1694  * faster
1695  */
1696 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1697 {
1698         kvm->arch.n_used_mmu_pages += nr;
1699         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1700 }
1701
1702 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1703 {
1704         MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1705         hlist_del(&sp->hash_link);
1706         list_del(&sp->link);
1707         free_page((unsigned long)sp->spt);
1708         if (!sp->role.direct)
1709                 free_page((unsigned long)sp->gfns);
1710         kmem_cache_free(mmu_page_header_cache, sp);
1711 }
1712
1713 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1714 {
1715         return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1716 }
1717
1718 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1719                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1720 {
1721         if (!parent_pte)
1722                 return;
1723
1724         pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1725 }
1726
1727 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1728                                        u64 *parent_pte)
1729 {
1730         __pte_list_remove(parent_pte, &sp->parent_ptes);
1731 }
1732
1733 static void drop_parent_pte(struct kvm_mmu_page *sp,
1734                             u64 *parent_pte)
1735 {
1736         mmu_page_remove_parent_pte(sp, parent_pte);
1737         mmu_spte_clear_no_track(parent_pte);
1738 }
1739
1740 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1741 {
1742         struct kvm_mmu_page *sp;
1743
1744         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1745         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1746         if (!direct)
1747                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1748         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1749
1750         /*
1751          * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1752          * depends on valid pages being added to the head of the list.  See
1753          * comments in kvm_zap_obsolete_pages().
1754          */
1755         sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1756         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1757         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1758         return sp;
1759 }
1760
1761 static void mark_unsync(u64 *spte);
1762 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1763 {
1764         u64 *sptep;
1765         struct rmap_iterator iter;
1766
1767         for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1768                 mark_unsync(sptep);
1769         }
1770 }
1771
1772 static void mark_unsync(u64 *spte)
1773 {
1774         struct kvm_mmu_page *sp;
1775         unsigned int index;
1776
1777         sp = sptep_to_sp(spte);
1778         index = spte - sp->spt;
1779         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1780                 return;
1781         if (sp->unsync_children++)
1782                 return;
1783         kvm_mmu_mark_parents_unsync(sp);
1784 }
1785
1786 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1787                                struct kvm_mmu_page *sp)
1788 {
1789         return -1;
1790 }
1791
1792 #define KVM_PAGE_ARRAY_NR 16
1793
1794 struct kvm_mmu_pages {
1795         struct mmu_page_and_offset {
1796                 struct kvm_mmu_page *sp;
1797                 unsigned int idx;
1798         } page[KVM_PAGE_ARRAY_NR];
1799         unsigned int nr;
1800 };
1801
1802 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1803                          int idx)
1804 {
1805         int i;
1806
1807         if (sp->unsync)
1808                 for (i=0; i < pvec->nr; i++)
1809                         if (pvec->page[i].sp == sp)
1810                                 return 0;
1811
1812         pvec->page[pvec->nr].sp = sp;
1813         pvec->page[pvec->nr].idx = idx;
1814         pvec->nr++;
1815         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1816 }
1817
1818 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1819 {
1820         --sp->unsync_children;
1821         WARN_ON((int)sp->unsync_children < 0);
1822         __clear_bit(idx, sp->unsync_child_bitmap);
1823 }
1824
1825 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1826                            struct kvm_mmu_pages *pvec)
1827 {
1828         int i, ret, nr_unsync_leaf = 0;
1829
1830         for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1831                 struct kvm_mmu_page *child;
1832                 u64 ent = sp->spt[i];
1833
1834                 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1835                         clear_unsync_child_bit(sp, i);
1836                         continue;
1837                 }
1838
1839                 child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1840
1841                 if (child->unsync_children) {
1842                         if (mmu_pages_add(pvec, child, i))
1843                                 return -ENOSPC;
1844
1845                         ret = __mmu_unsync_walk(child, pvec);
1846                         if (!ret) {
1847                                 clear_unsync_child_bit(sp, i);
1848                                 continue;
1849                         } else if (ret > 0) {
1850                                 nr_unsync_leaf += ret;
1851                         } else
1852                                 return ret;
1853                 } else if (child->unsync) {
1854                         nr_unsync_leaf++;
1855                         if (mmu_pages_add(pvec, child, i))
1856                                 return -ENOSPC;
1857                 } else
1858                         clear_unsync_child_bit(sp, i);
1859         }
1860
1861         return nr_unsync_leaf;
1862 }
1863
1864 #define INVALID_INDEX (-1)
1865
1866 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1867                            struct kvm_mmu_pages *pvec)
1868 {
1869         pvec->nr = 0;
1870         if (!sp->unsync_children)
1871                 return 0;
1872
1873         mmu_pages_add(pvec, sp, INVALID_INDEX);
1874         return __mmu_unsync_walk(sp, pvec);
1875 }
1876
1877 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1878 {
1879         WARN_ON(!sp->unsync);
1880         trace_kvm_mmu_sync_page(sp);
1881         sp->unsync = 0;
1882         --kvm->stat.mmu_unsync;
1883 }
1884
1885 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1886                                      struct list_head *invalid_list);
1887 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1888                                     struct list_head *invalid_list);
1889
1890 #define for_each_valid_sp(_kvm, _sp, _list)                             \
1891         hlist_for_each_entry(_sp, _list, hash_link)                     \
1892                 if (is_obsolete_sp((_kvm), (_sp))) {                    \
1893                 } else
1894
1895 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                 \
1896         for_each_valid_sp(_kvm, _sp,                                    \
1897           &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
1898                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1899
1900 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1901                          struct list_head *invalid_list)
1902 {
1903         int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
1904
1905         if (ret < 0) {
1906                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1907                 return false;
1908         }
1909
1910         return !!ret;
1911 }
1912
1913 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1914                                         struct list_head *invalid_list,
1915                                         bool remote_flush)
1916 {
1917         if (!remote_flush && list_empty(invalid_list))
1918                 return false;
1919
1920         if (!list_empty(invalid_list))
1921                 kvm_mmu_commit_zap_page(kvm, invalid_list);
1922         else
1923                 kvm_flush_remote_tlbs(kvm);
1924         return true;
1925 }
1926
1927 #ifdef CONFIG_KVM_MMU_AUDIT
1928 #include "mmu_audit.c"
1929 #else
1930 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1931 static void mmu_audit_disable(void) { }
1932 #endif
1933
1934 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1935 {
1936         return sp->role.invalid ||
1937                unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1938 }
1939
1940 struct mmu_page_path {
1941         struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1942         unsigned int idx[PT64_ROOT_MAX_LEVEL];
1943 };
1944
1945 #define for_each_sp(pvec, sp, parents, i)                       \
1946                 for (i = mmu_pages_first(&pvec, &parents);      \
1947                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1948                         i = mmu_pages_next(&pvec, &parents, i))
1949
1950 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1951                           struct mmu_page_path *parents,
1952                           int i)
1953 {
1954         int n;
1955
1956         for (n = i+1; n < pvec->nr; n++) {
1957                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1958                 unsigned idx = pvec->page[n].idx;
1959                 int level = sp->role.level;
1960
1961                 parents->idx[level-1] = idx;
1962                 if (level == PG_LEVEL_4K)
1963                         break;
1964
1965                 parents->parent[level-2] = sp;
1966         }
1967
1968         return n;
1969 }
1970
1971 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1972                            struct mmu_page_path *parents)
1973 {
1974         struct kvm_mmu_page *sp;
1975         int level;
1976
1977         if (pvec->nr == 0)
1978                 return 0;
1979
1980         WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1981
1982         sp = pvec->page[0].sp;
1983         level = sp->role.level;
1984         WARN_ON(level == PG_LEVEL_4K);
1985
1986         parents->parent[level-2] = sp;
1987
1988         /* Also set up a sentinel.  Further entries in pvec are all
1989          * children of sp, so this element is never overwritten.
1990          */
1991         parents->parent[level-1] = NULL;
1992         return mmu_pages_next(pvec, parents, 0);
1993 }
1994
1995 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1996 {
1997         struct kvm_mmu_page *sp;
1998         unsigned int level = 0;
1999
2000         do {
2001                 unsigned int idx = parents->idx[level];
2002                 sp = parents->parent[level];
2003                 if (!sp)
2004                         return;
2005
2006                 WARN_ON(idx == INVALID_INDEX);
2007                 clear_unsync_child_bit(sp, idx);
2008                 level++;
2009         } while (!sp->unsync_children);
2010 }
2011
2012 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2013                              struct kvm_mmu_page *parent, bool can_yield)
2014 {
2015         int i;
2016         struct kvm_mmu_page *sp;
2017         struct mmu_page_path parents;
2018         struct kvm_mmu_pages pages;
2019         LIST_HEAD(invalid_list);
2020         bool flush = false;
2021
2022         while (mmu_unsync_walk(parent, &pages)) {
2023                 bool protected = false;
2024
2025                 for_each_sp(pages, sp, parents, i)
2026                         protected |= rmap_write_protect(vcpu, sp->gfn);
2027
2028                 if (protected) {
2029                         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2030                         flush = false;
2031                 }
2032
2033                 for_each_sp(pages, sp, parents, i) {
2034                         kvm_unlink_unsync_page(vcpu->kvm, sp);
2035                         flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2036                         mmu_pages_clear_parents(&parents);
2037                 }
2038                 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2039                         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2040                         if (!can_yield) {
2041                                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2042                                 return -EINTR;
2043                         }
2044
2045                         cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2046                         flush = false;
2047                 }
2048         }
2049
2050         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2051         return 0;
2052 }
2053
2054 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2055 {
2056         atomic_set(&sp->write_flooding_count,  0);
2057 }
2058
2059 static void clear_sp_write_flooding_count(u64 *spte)
2060 {
2061         __clear_sp_write_flooding_count(sptep_to_sp(spte));
2062 }
2063
2064 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2065                                              gfn_t gfn,
2066                                              gva_t gaddr,
2067                                              unsigned level,
2068                                              int direct,
2069                                              unsigned int access)
2070 {
2071         bool direct_mmu = vcpu->arch.mmu->direct_map;
2072         union kvm_mmu_page_role role;
2073         struct hlist_head *sp_list;
2074         unsigned quadrant;
2075         struct kvm_mmu_page *sp;
2076         int collisions = 0;
2077         LIST_HEAD(invalid_list);
2078
2079         role = vcpu->arch.mmu->mmu_role.base;
2080         role.level = level;
2081         role.direct = direct;
2082         if (role.direct)
2083                 role.gpte_is_8_bytes = true;
2084         role.access = access;
2085         if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2086                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2087                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2088                 role.quadrant = quadrant;
2089         }
2090
2091         sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2092         for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2093                 if (sp->gfn != gfn) {
2094                         collisions++;
2095                         continue;
2096                 }
2097
2098                 if (sp->role.word != role.word) {
2099                         /*
2100                          * If the guest is creating an upper-level page, zap
2101                          * unsync pages for the same gfn.  While it's possible
2102                          * the guest is using recursive page tables, in all
2103                          * likelihood the guest has stopped using the unsync
2104                          * page and is installing a completely unrelated page.
2105                          * Unsync pages must not be left as is, because the new
2106                          * upper-level page will be write-protected.
2107                          */
2108                         if (level > PG_LEVEL_4K && sp->unsync)
2109                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2110                                                          &invalid_list);
2111                         continue;
2112                 }
2113
2114                 if (direct_mmu)
2115                         goto trace_get_page;
2116
2117                 if (sp->unsync) {
2118                         /*
2119                          * The page is good, but is stale.  kvm_sync_page does
2120                          * get the latest guest state, but (unlike mmu_unsync_children)
2121                          * it doesn't write-protect the page or mark it synchronized!
2122                          * This way the validity of the mapping is ensured, but the
2123                          * overhead of write protection is not incurred until the
2124                          * guest invalidates the TLB mapping.  This allows multiple
2125                          * SPs for a single gfn to be unsync.
2126                          *
2127                          * If the sync fails, the page is zapped.  If so, break
2128                          * in order to rebuild it.
2129                          */
2130                         if (!kvm_sync_page(vcpu, sp, &invalid_list))
2131                                 break;
2132
2133                         WARN_ON(!list_empty(&invalid_list));
2134                         kvm_flush_remote_tlbs(vcpu->kvm);
2135                 }
2136
2137                 __clear_sp_write_flooding_count(sp);
2138
2139 trace_get_page:
2140                 trace_kvm_mmu_get_page(sp, false);
2141                 goto out;
2142         }
2143
2144         ++vcpu->kvm->stat.mmu_cache_miss;
2145
2146         sp = kvm_mmu_alloc_page(vcpu, direct);
2147
2148         sp->gfn = gfn;
2149         sp->role = role;
2150         hlist_add_head(&sp->hash_link, sp_list);
2151         if (!direct) {
2152                 account_shadowed(vcpu->kvm, sp);
2153                 if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2154                         kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2155         }
2156         trace_kvm_mmu_get_page(sp, true);
2157 out:
2158         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2159
2160         if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2161                 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2162         return sp;
2163 }
2164
2165 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2166                                         struct kvm_vcpu *vcpu, hpa_t root,
2167                                         u64 addr)
2168 {
2169         iterator->addr = addr;
2170         iterator->shadow_addr = root;
2171         iterator->level = vcpu->arch.mmu->shadow_root_level;
2172
2173         if (iterator->level == PT64_ROOT_4LEVEL &&
2174             vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2175             !vcpu->arch.mmu->direct_map)
2176                 --iterator->level;
2177
2178         if (iterator->level == PT32E_ROOT_LEVEL) {
2179                 /*
2180                  * prev_root is currently only used for 64-bit hosts. So only
2181                  * the active root_hpa is valid here.
2182                  */
2183                 BUG_ON(root != vcpu->arch.mmu->root_hpa);
2184
2185                 iterator->shadow_addr
2186                         = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2187                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2188                 --iterator->level;
2189                 if (!iterator->shadow_addr)
2190                         iterator->level = 0;
2191         }
2192 }
2193
2194 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2195                              struct kvm_vcpu *vcpu, u64 addr)
2196 {
2197         shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2198                                     addr);
2199 }
2200
2201 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2202 {
2203         if (iterator->level < PG_LEVEL_4K)
2204                 return false;
2205
2206         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2207         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2208         return true;
2209 }
2210
2211 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2212                                u64 spte)
2213 {
2214         if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2215                 iterator->level = 0;
2216                 return;
2217         }
2218
2219         iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2220         --iterator->level;
2221 }
2222
2223 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2224 {
2225         __shadow_walk_next(iterator, *iterator->sptep);
2226 }
2227
2228 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2229                              struct kvm_mmu_page *sp)
2230 {
2231         u64 spte;
2232
2233         BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2234
2235         spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2236
2237         mmu_spte_set(sptep, spte);
2238
2239         mmu_page_add_parent_pte(vcpu, sp, sptep);
2240
2241         if (sp->unsync_children || sp->unsync)
2242                 mark_unsync(sptep);
2243 }
2244
2245 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2246                                    unsigned direct_access)
2247 {
2248         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2249                 struct kvm_mmu_page *child;
2250
2251                 /*
2252                  * For the direct sp, if the guest pte's dirty bit
2253                  * changed form clean to dirty, it will corrupt the
2254                  * sp's access: allow writable in the read-only sp,
2255                  * so we should update the spte at this point to get
2256                  * a new sp with the correct access.
2257                  */
2258                 child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2259                 if (child->role.access == direct_access)
2260                         return;
2261
2262                 drop_parent_pte(child, sptep);
2263                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2264         }
2265 }
2266
2267 /* Returns the number of zapped non-leaf child shadow pages. */
2268 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2269                             u64 *spte, struct list_head *invalid_list)
2270 {
2271         u64 pte;
2272         struct kvm_mmu_page *child;
2273
2274         pte = *spte;
2275         if (is_shadow_present_pte(pte)) {
2276                 if (is_last_spte(pte, sp->role.level)) {
2277                         drop_spte(kvm, spte);
2278                 } else {
2279                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2280                         drop_parent_pte(child, spte);
2281
2282                         /*
2283                          * Recursively zap nested TDP SPs, parentless SPs are
2284                          * unlikely to be used again in the near future.  This
2285                          * avoids retaining a large number of stale nested SPs.
2286                          */
2287                         if (tdp_enabled && invalid_list &&
2288                             child->role.guest_mode && !child->parent_ptes.val)
2289                                 return kvm_mmu_prepare_zap_page(kvm, child,
2290                                                                 invalid_list);
2291                 }
2292         } else if (is_mmio_spte(pte)) {
2293                 mmu_spte_clear_no_track(spte);
2294         }
2295         return 0;
2296 }
2297
2298 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2299                                         struct kvm_mmu_page *sp,
2300                                         struct list_head *invalid_list)
2301 {
2302         int zapped = 0;
2303         unsigned i;
2304
2305         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2306                 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2307
2308         return zapped;
2309 }
2310
2311 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2312 {
2313         u64 *sptep;
2314         struct rmap_iterator iter;
2315
2316         while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2317                 drop_parent_pte(sp, sptep);
2318 }
2319
2320 static int mmu_zap_unsync_children(struct kvm *kvm,
2321                                    struct kvm_mmu_page *parent,
2322                                    struct list_head *invalid_list)
2323 {
2324         int i, zapped = 0;
2325         struct mmu_page_path parents;
2326         struct kvm_mmu_pages pages;
2327
2328         if (parent->role.level == PG_LEVEL_4K)
2329                 return 0;
2330
2331         while (mmu_unsync_walk(parent, &pages)) {
2332                 struct kvm_mmu_page *sp;
2333
2334                 for_each_sp(pages, sp, parents, i) {
2335                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2336                         mmu_pages_clear_parents(&parents);
2337                         zapped++;
2338                 }
2339         }
2340
2341         return zapped;
2342 }
2343
2344 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2345                                        struct kvm_mmu_page *sp,
2346                                        struct list_head *invalid_list,
2347                                        int *nr_zapped)
2348 {
2349         bool list_unstable;
2350
2351         trace_kvm_mmu_prepare_zap_page(sp);
2352         ++kvm->stat.mmu_shadow_zapped;
2353         *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2354         *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2355         kvm_mmu_unlink_parents(kvm, sp);
2356
2357         /* Zapping children means active_mmu_pages has become unstable. */
2358         list_unstable = *nr_zapped;
2359
2360         if (!sp->role.invalid && !sp->role.direct)
2361                 unaccount_shadowed(kvm, sp);
2362
2363         if (sp->unsync)
2364                 kvm_unlink_unsync_page(kvm, sp);
2365         if (!sp->root_count) {
2366                 /* Count self */
2367                 (*nr_zapped)++;
2368
2369                 /*
2370                  * Already invalid pages (previously active roots) are not on
2371                  * the active page list.  See list_del() in the "else" case of
2372                  * !sp->root_count.
2373                  */
2374                 if (sp->role.invalid)
2375                         list_add(&sp->link, invalid_list);
2376                 else
2377                         list_move(&sp->link, invalid_list);
2378                 kvm_mod_used_mmu_pages(kvm, -1);
2379         } else {
2380                 /*
2381                  * Remove the active root from the active page list, the root
2382                  * will be explicitly freed when the root_count hits zero.
2383                  */
2384                 list_del(&sp->link);
2385
2386                 /*
2387                  * Obsolete pages cannot be used on any vCPUs, see the comment
2388                  * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2389                  * treats invalid shadow pages as being obsolete.
2390                  */
2391                 if (!is_obsolete_sp(kvm, sp))
2392                         kvm_reload_remote_mmus(kvm);
2393         }
2394
2395         if (sp->lpage_disallowed)
2396                 unaccount_huge_nx_page(kvm, sp);
2397
2398         sp->role.invalid = 1;
2399         return list_unstable;
2400 }
2401
2402 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2403                                      struct list_head *invalid_list)
2404 {
2405         int nr_zapped;
2406
2407         __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2408         return nr_zapped;
2409 }
2410
2411 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2412                                     struct list_head *invalid_list)
2413 {
2414         struct kvm_mmu_page *sp, *nsp;
2415
2416         if (list_empty(invalid_list))
2417                 return;
2418
2419         /*
2420          * We need to make sure everyone sees our modifications to
2421          * the page tables and see changes to vcpu->mode here. The barrier
2422          * in the kvm_flush_remote_tlbs() achieves this. This pairs
2423          * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2424          *
2425          * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2426          * guest mode and/or lockless shadow page table walks.
2427          */
2428         kvm_flush_remote_tlbs(kvm);
2429
2430         list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2431                 WARN_ON(!sp->role.invalid || sp->root_count);
2432                 kvm_mmu_free_page(sp);
2433         }
2434 }
2435
2436 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2437                                                   unsigned long nr_to_zap)
2438 {
2439         unsigned long total_zapped = 0;
2440         struct kvm_mmu_page *sp, *tmp;
2441         LIST_HEAD(invalid_list);
2442         bool unstable;
2443         int nr_zapped;
2444
2445         if (list_empty(&kvm->arch.active_mmu_pages))
2446                 return 0;
2447
2448 restart:
2449         list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2450                 /*
2451                  * Don't zap active root pages, the page itself can't be freed
2452                  * and zapping it will just force vCPUs to realloc and reload.
2453                  */
2454                 if (sp->root_count)
2455                         continue;
2456
2457                 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2458                                                       &nr_zapped);
2459                 total_zapped += nr_zapped;
2460                 if (total_zapped >= nr_to_zap)
2461                         break;
2462
2463                 if (unstable)
2464                         goto restart;
2465         }
2466
2467         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2468
2469         kvm->stat.mmu_recycled += total_zapped;
2470         return total_zapped;
2471 }
2472
2473 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2474 {
2475         if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2476                 return kvm->arch.n_max_mmu_pages -
2477                         kvm->arch.n_used_mmu_pages;
2478
2479         return 0;
2480 }
2481
2482 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2483 {
2484         unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2485
2486         if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2487                 return 0;
2488
2489         kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2490
2491         /*
2492          * Note, this check is intentionally soft, it only guarantees that one
2493          * page is available, while the caller may end up allocating as many as
2494          * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2495          * exceeding the (arbitrary by default) limit will not harm the host,
2496          * being too aggressive may unnecessarily kill the guest, and getting an
2497          * exact count is far more trouble than it's worth, especially in the
2498          * page fault paths.
2499          */
2500         if (!kvm_mmu_available_pages(vcpu->kvm))
2501                 return -ENOSPC;
2502         return 0;
2503 }
2504
2505 /*
2506  * Changing the number of mmu pages allocated to the vm
2507  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2508  */
2509 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2510 {
2511         write_lock(&kvm->mmu_lock);
2512
2513         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2514                 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2515                                                   goal_nr_mmu_pages);
2516
2517                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2518         }
2519
2520         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2521
2522         write_unlock(&kvm->mmu_lock);
2523 }
2524
2525 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2526 {
2527         struct kvm_mmu_page *sp;
2528         LIST_HEAD(invalid_list);
2529         int r;
2530
2531         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2532         r = 0;
2533         write_lock(&kvm->mmu_lock);
2534         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2535                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2536                          sp->role.word);
2537                 r = 1;
2538                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2539         }
2540         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2541         write_unlock(&kvm->mmu_lock);
2542
2543         return r;
2544 }
2545
2546 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2547 {
2548         gpa_t gpa;
2549         int r;
2550
2551         if (vcpu->arch.mmu->direct_map)
2552                 return 0;
2553
2554         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2555
2556         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2557
2558         return r;
2559 }
2560
2561 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2562 {
2563         trace_kvm_mmu_unsync_page(sp);
2564         ++vcpu->kvm->stat.mmu_unsync;
2565         sp->unsync = 1;
2566
2567         kvm_mmu_mark_parents_unsync(sp);
2568 }
2569
2570 /*
2571  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2572  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2573  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2574  * be write-protected.
2575  */
2576 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
2577                             bool speculative)
2578 {
2579         struct kvm_mmu_page *sp;
2580         bool locked = false;
2581
2582         /*
2583          * Force write-protection if the page is being tracked.  Note, the page
2584          * track machinery is used to write-protect upper-level shadow pages,
2585          * i.e. this guards the role.level == 4K assertion below!
2586          */
2587         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2588                 return -EPERM;
2589
2590         /*
2591          * The page is not write-tracked, mark existing shadow pages unsync
2592          * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
2593          * that case, KVM must complete emulation of the guest TLB flush before
2594          * allowing shadow pages to become unsync (writable by the guest).
2595          */
2596         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2597                 if (!can_unsync)
2598                         return -EPERM;
2599
2600                 if (sp->unsync)
2601                         continue;
2602
2603                 if (speculative)
2604                         return -EEXIST;
2605
2606                 /*
2607                  * TDP MMU page faults require an additional spinlock as they
2608                  * run with mmu_lock held for read, not write, and the unsync
2609                  * logic is not thread safe.  Take the spinklock regardless of
2610                  * the MMU type to avoid extra conditionals/parameters, there's
2611                  * no meaningful penalty if mmu_lock is held for write.
2612                  */
2613                 if (!locked) {
2614                         locked = true;
2615                         spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2616
2617                         /*
2618                          * Recheck after taking the spinlock, a different vCPU
2619                          * may have since marked the page unsync.  A false
2620                          * positive on the unprotected check above is not
2621                          * possible as clearing sp->unsync _must_ hold mmu_lock
2622                          * for write, i.e. unsync cannot transition from 0->1
2623                          * while this CPU holds mmu_lock for read (or write).
2624                          */
2625                         if (READ_ONCE(sp->unsync))
2626                                 continue;
2627                 }
2628
2629                 WARN_ON(sp->role.level != PG_LEVEL_4K);
2630                 kvm_unsync_page(vcpu, sp);
2631         }
2632         if (locked)
2633                 spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2634
2635         /*
2636          * We need to ensure that the marking of unsync pages is visible
2637          * before the SPTE is updated to allow writes because
2638          * kvm_mmu_sync_roots() checks the unsync flags without holding
2639          * the MMU lock and so can race with this. If the SPTE was updated
2640          * before the page had been marked as unsync-ed, something like the
2641          * following could happen:
2642          *
2643          * CPU 1                    CPU 2
2644          * ---------------------------------------------------------------------
2645          * 1.2 Host updates SPTE
2646          *     to be writable
2647          *                      2.1 Guest writes a GPTE for GVA X.
2648          *                          (GPTE being in the guest page table shadowed
2649          *                           by the SP from CPU 1.)
2650          *                          This reads SPTE during the page table walk.
2651          *                          Since SPTE.W is read as 1, there is no
2652          *                          fault.
2653          *
2654          *                      2.2 Guest issues TLB flush.
2655          *                          That causes a VM Exit.
2656          *
2657          *                      2.3 Walking of unsync pages sees sp->unsync is
2658          *                          false and skips the page.
2659          *
2660          *                      2.4 Guest accesses GVA X.
2661          *                          Since the mapping in the SP was not updated,
2662          *                          so the old mapping for GVA X incorrectly
2663          *                          gets used.
2664          * 1.1 Host marks SP
2665          *     as unsync
2666          *     (sp->unsync = true)
2667          *
2668          * The write barrier below ensures that 1.1 happens before 1.2 and thus
2669          * the situation in 2.4 does not arise. The implicit barrier in 2.2
2670          * pairs with this write barrier.
2671          */
2672         smp_wmb();
2673
2674         return 0;
2675 }
2676
2677 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2678                     unsigned int pte_access, int level,
2679                     gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2680                     bool can_unsync, bool host_writable)
2681 {
2682         u64 spte;
2683         struct kvm_mmu_page *sp;
2684         int ret;
2685
2686         sp = sptep_to_sp(sptep);
2687
2688         ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2689                         can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2690
2691         if (*sptep == spte)
2692                 ret |= SET_SPTE_SPURIOUS;
2693         else if (mmu_spte_update(sptep, spte))
2694                 ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2695         return ret;
2696 }
2697
2698 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2699                         unsigned int pte_access, bool write_fault, int level,
2700                         gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2701                         bool host_writable)
2702 {
2703         struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2704         int was_rmapped = 0;
2705         int ret = RET_PF_FIXED;
2706         bool flush = false;
2707         int make_spte_ret;
2708         u64 spte;
2709
2710         pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2711                  *sptep, write_fault, gfn);
2712
2713         if (unlikely(is_noslot_pfn(pfn))) {
2714                 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2715                 return RET_PF_EMULATE;
2716         }
2717
2718         if (is_shadow_present_pte(*sptep)) {
2719                 /*
2720                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2721                  * the parent of the now unreachable PTE.
2722                  */
2723                 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2724                         struct kvm_mmu_page *child;
2725                         u64 pte = *sptep;
2726
2727                         child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2728                         drop_parent_pte(child, sptep);
2729                         flush = true;
2730                 } else if (pfn != spte_to_pfn(*sptep)) {
2731                         pgprintk("hfn old %llx new %llx\n",
2732                                  spte_to_pfn(*sptep), pfn);
2733                         drop_spte(vcpu->kvm, sptep);
2734                         flush = true;
2735                 } else
2736                         was_rmapped = 1;
2737         }
2738
2739         make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2740                                  true, host_writable, sp_ad_disabled(sp), &spte);
2741
2742         if (*sptep == spte) {
2743                 ret = RET_PF_SPURIOUS;
2744         } else {
2745                 trace_kvm_mmu_set_spte(level, gfn, sptep);
2746                 flush |= mmu_spte_update(sptep, spte);
2747         }
2748
2749         if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2750                 if (write_fault)
2751                         ret = RET_PF_EMULATE;
2752         }
2753
2754         if (flush)
2755                 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2756                                 KVM_PAGES_PER_HPAGE(level));
2757
2758         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2759
2760         if (!was_rmapped) {
2761                 WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2762                 kvm_update_page_stats(vcpu->kvm, level, 1);
2763                 rmap_add(vcpu, sptep, gfn);
2764         }
2765
2766         return ret;
2767 }
2768
2769 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2770                                      bool no_dirty_log)
2771 {
2772         struct kvm_memory_slot *slot;
2773
2774         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2775         if (!slot)
2776                 return KVM_PFN_ERR_FAULT;
2777
2778         return gfn_to_pfn_memslot_atomic(slot, gfn);
2779 }
2780
2781 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2782                                     struct kvm_mmu_page *sp,
2783                                     u64 *start, u64 *end)
2784 {
2785         struct page *pages[PTE_PREFETCH_NUM];
2786         struct kvm_memory_slot *slot;
2787         unsigned int access = sp->role.access;
2788         int i, ret;
2789         gfn_t gfn;
2790
2791         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2792         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2793         if (!slot)
2794                 return -1;
2795
2796         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2797         if (ret <= 0)
2798                 return -1;
2799
2800         for (i = 0; i < ret; i++, gfn++, start++) {
2801                 mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2802                              page_to_pfn(pages[i]), true, true);
2803                 put_page(pages[i]);
2804         }
2805
2806         return 0;
2807 }
2808
2809 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2810                                   struct kvm_mmu_page *sp, u64 *sptep)
2811 {
2812         u64 *spte, *start = NULL;
2813         int i;
2814
2815         WARN_ON(!sp->role.direct);
2816
2817         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2818         spte = sp->spt + i;
2819
2820         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2821                 if (is_shadow_present_pte(*spte) || spte == sptep) {
2822                         if (!start)
2823                                 continue;
2824                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2825                                 return;
2826                         start = NULL;
2827                 } else if (!start)
2828                         start = spte;
2829         }
2830         if (start)
2831                 direct_pte_prefetch_many(vcpu, sp, start, spte);
2832 }
2833
2834 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2835 {
2836         struct kvm_mmu_page *sp;
2837
2838         sp = sptep_to_sp(sptep);
2839
2840         /*
2841          * Without accessed bits, there's no way to distinguish between
2842          * actually accessed translations and prefetched, so disable pte
2843          * prefetch if accessed bits aren't available.
2844          */
2845         if (sp_ad_disabled(sp))
2846                 return;
2847
2848         if (sp->role.level > PG_LEVEL_4K)
2849                 return;
2850
2851         /*
2852          * If addresses are being invalidated, skip prefetching to avoid
2853          * accidentally prefetching those addresses.
2854          */
2855         if (unlikely(vcpu->kvm->mmu_notifier_count))
2856                 return;
2857
2858         __direct_pte_prefetch(vcpu, sp, sptep);
2859 }
2860
2861 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2862                                   const struct kvm_memory_slot *slot)
2863 {
2864         unsigned long hva;
2865         pte_t *pte;
2866         int level;
2867
2868         if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2869                 return PG_LEVEL_4K;
2870
2871         /*
2872          * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2873          * is not solely for performance, it's also necessary to avoid the
2874          * "writable" check in __gfn_to_hva_many(), which will always fail on
2875          * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2876          * page fault steps have already verified the guest isn't writing a
2877          * read-only memslot.
2878          */
2879         hva = __gfn_to_hva_memslot(slot, gfn);
2880
2881         pte = lookup_address_in_mm(kvm->mm, hva, &level);
2882         if (unlikely(!pte))
2883                 return PG_LEVEL_4K;
2884
2885         return level;
2886 }
2887
2888 int kvm_mmu_max_mapping_level(struct kvm *kvm,
2889                               const struct kvm_memory_slot *slot, gfn_t gfn,
2890                               kvm_pfn_t pfn, int max_level)
2891 {
2892         struct kvm_lpage_info *linfo;
2893         int host_level;
2894
2895         max_level = min(max_level, max_huge_page_level);
2896         for ( ; max_level > PG_LEVEL_4K; max_level--) {
2897                 linfo = lpage_info_slot(gfn, slot, max_level);
2898                 if (!linfo->disallow_lpage)
2899                         break;
2900         }
2901
2902         if (max_level == PG_LEVEL_4K)
2903                 return PG_LEVEL_4K;
2904
2905         host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
2906         return min(host_level, max_level);
2907 }
2908
2909 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2910 {
2911         struct kvm_memory_slot *slot = fault->slot;
2912         kvm_pfn_t mask;
2913
2914         fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
2915
2916         if (unlikely(fault->max_level == PG_LEVEL_4K))
2917                 return;
2918
2919         if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn))
2920                 return;
2921
2922         if (kvm_slot_dirty_track_enabled(slot))
2923                 return;
2924
2925         /*
2926          * Enforce the iTLB multihit workaround after capturing the requested
2927          * level, which will be used to do precise, accurate accounting.
2928          */
2929         fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
2930                                                      fault->gfn, fault->pfn,
2931                                                      fault->max_level);
2932         if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
2933                 return;
2934
2935         /*
2936          * mmu_notifier_retry() was successful and mmu_lock is held, so
2937          * the pmd can't be split from under us.
2938          */
2939         fault->goal_level = fault->req_level;
2940         mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
2941         VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
2942         fault->pfn &= ~mask;
2943 }
2944
2945 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
2946 {
2947         if (cur_level > PG_LEVEL_4K &&
2948             cur_level == fault->goal_level &&
2949             is_shadow_present_pte(spte) &&
2950             !is_large_pte(spte)) {
2951                 /*
2952                  * A small SPTE exists for this pfn, but FNAME(fetch)
2953                  * and __direct_map would like to create a large PTE
2954                  * instead: just force them to go down another level,
2955                  * patching back for them into pfn the next 9 bits of
2956                  * the address.
2957                  */
2958                 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
2959                                 KVM_PAGES_PER_HPAGE(cur_level - 1);
2960                 fault->pfn |= fault->gfn & page_mask;
2961                 fault->goal_level--;
2962         }
2963 }
2964
2965 static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2966 {
2967         struct kvm_shadow_walk_iterator it;
2968         struct kvm_mmu_page *sp;
2969         int ret;
2970         gfn_t base_gfn = fault->gfn;
2971
2972         kvm_mmu_hugepage_adjust(vcpu, fault);
2973
2974         trace_kvm_mmu_spte_requested(fault);
2975         for_each_shadow_entry(vcpu, fault->addr, it) {
2976                 /*
2977                  * We cannot overwrite existing page tables with an NX
2978                  * large page, as the leaf could be executable.
2979                  */
2980                 if (fault->nx_huge_page_workaround_enabled)
2981                         disallowed_hugepage_adjust(fault, *it.sptep, it.level);
2982
2983                 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2984                 if (it.level == fault->goal_level)
2985                         break;
2986
2987                 drop_large_spte(vcpu, it.sptep);
2988                 if (is_shadow_present_pte(*it.sptep))
2989                         continue;
2990
2991                 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2992                                       it.level - 1, true, ACC_ALL);
2993
2994                 link_shadow_page(vcpu, it.sptep, sp);
2995                 if (fault->is_tdp && fault->huge_page_disallowed &&
2996                     fault->req_level >= it.level)
2997                         account_huge_nx_page(vcpu->kvm, sp);
2998         }
2999
3000         if (WARN_ON_ONCE(it.level != fault->goal_level))
3001                 return -EFAULT;
3002
3003         ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
3004                            fault->write, fault->goal_level, base_gfn, fault->pfn,
3005                            fault->prefault, fault->map_writable);
3006         if (ret == RET_PF_SPURIOUS)
3007                 return ret;
3008
3009         direct_pte_prefetch(vcpu, it.sptep);
3010         ++vcpu->stat.pf_fixed;
3011         return ret;
3012 }
3013
3014 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3015 {
3016         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3017 }
3018
3019 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3020 {
3021         /*
3022          * Do not cache the mmio info caused by writing the readonly gfn
3023          * into the spte otherwise read access on readonly gfn also can
3024          * caused mmio page fault and treat it as mmio access.
3025          */
3026         if (pfn == KVM_PFN_ERR_RO_FAULT)
3027                 return RET_PF_EMULATE;
3028
3029         if (pfn == KVM_PFN_ERR_HWPOISON) {
3030                 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3031                 return RET_PF_RETRY;
3032         }
3033
3034         return -EFAULT;
3035 }
3036
3037 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3038                                 unsigned int access, int *ret_val)
3039 {
3040         /* The pfn is invalid, report the error! */
3041         if (unlikely(is_error_pfn(fault->pfn))) {
3042                 *ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
3043                 return true;
3044         }
3045
3046         if (unlikely(!fault->slot)) {
3047                 gva_t gva = fault->is_tdp ? 0 : fault->addr;
3048
3049                 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3050                                      access & shadow_mmio_access_mask);
3051                 /*
3052                  * If MMIO caching is disabled, emulate immediately without
3053                  * touching the shadow page tables as attempting to install an
3054                  * MMIO SPTE will just be an expensive nop.
3055                  */
3056                 if (unlikely(!shadow_mmio_value)) {
3057                         *ret_val = RET_PF_EMULATE;
3058                         return true;
3059                 }
3060         }
3061
3062         return false;
3063 }
3064
3065 static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3066 {
3067         /*
3068          * Do not fix the mmio spte with invalid generation number which
3069          * need to be updated by slow page fault path.
3070          */
3071         if (fault->rsvd)
3072                 return false;
3073
3074         /* See if the page fault is due to an NX violation */
3075         if (unlikely(fault->exec && fault->present))
3076                 return false;
3077
3078         /*
3079          * #PF can be fast if:
3080          * 1. The shadow page table entry is not present, which could mean that
3081          *    the fault is potentially caused by access tracking (if enabled).
3082          * 2. The shadow page table entry is present and the fault
3083          *    is caused by write-protect, that means we just need change the W
3084          *    bit of the spte which can be done out of mmu-lock.
3085          *
3086          * However, if access tracking is disabled we know that a non-present
3087          * page must be a genuine page fault where we have to create a new SPTE.
3088          * So, if access tracking is disabled, we return true only for write
3089          * accesses to a present page.
3090          */
3091
3092         return shadow_acc_track_mask != 0 || (fault->write && fault->present);
3093 }
3094
3095 /*
3096  * Returns true if the SPTE was fixed successfully. Otherwise,
3097  * someone else modified the SPTE from its original value.
3098  */
3099 static bool
3100 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3101                         u64 *sptep, u64 old_spte, u64 new_spte)
3102 {
3103         /*
3104          * Theoretically we could also set dirty bit (and flush TLB) here in
3105          * order to eliminate unnecessary PML logging. See comments in
3106          * set_spte. But fast_page_fault is very unlikely to happen with PML
3107          * enabled, so we do not do this. This might result in the same GPA
3108          * to be logged in PML buffer again when the write really happens, and
3109          * eventually to be called by mark_page_dirty twice. But it's also no
3110          * harm. This also avoids the TLB flush needed after setting dirty bit
3111          * so non-PML cases won't be impacted.
3112          *
3113          * Compare with set_spte where instead shadow_dirty_mask is set.
3114          */
3115         if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3116                 return false;
3117
3118         if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3119                 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3120
3121         return true;
3122 }
3123
3124 static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3125 {
3126         if (fault->exec)
3127                 return is_executable_pte(spte);
3128
3129         if (fault->write)
3130                 return is_writable_pte(spte);
3131
3132         /* Fault was on Read access */
3133         return spte & PT_PRESENT_MASK;
3134 }
3135
3136 /*
3137  * Returns the last level spte pointer of the shadow page walk for the given
3138  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3139  * walk could be performed, returns NULL and *spte does not contain valid data.
3140  *
3141  * Contract:
3142  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3143  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3144  */
3145 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3146 {
3147         struct kvm_shadow_walk_iterator iterator;
3148         u64 old_spte;
3149         u64 *sptep = NULL;
3150
3151         for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3152                 sptep = iterator.sptep;
3153                 *spte = old_spte;
3154         }
3155
3156         return sptep;
3157 }
3158
3159 /*
3160  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3161  */
3162 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3163 {
3164         struct kvm_mmu_page *sp;
3165         int ret = RET_PF_INVALID;
3166         u64 spte = 0ull;
3167         u64 *sptep = NULL;
3168         uint retry_count = 0;
3169
3170         if (!page_fault_can_be_fast(fault))
3171                 return ret;
3172
3173         walk_shadow_page_lockless_begin(vcpu);
3174
3175         do {
3176                 u64 new_spte;
3177
3178                 if (is_tdp_mmu(vcpu->arch.mmu))
3179                         sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3180                 else
3181                         sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3182
3183                 if (!is_shadow_present_pte(spte))
3184                         break;
3185
3186                 sp = sptep_to_sp(sptep);
3187                 if (!is_last_spte(spte, sp->role.level))
3188                         break;
3189
3190                 /*
3191                  * Check whether the memory access that caused the fault would
3192                  * still cause it if it were to be performed right now. If not,
3193                  * then this is a spurious fault caused by TLB lazily flushed,
3194                  * or some other CPU has already fixed the PTE after the
3195                  * current CPU took the fault.
3196                  *
3197                  * Need not check the access of upper level table entries since
3198                  * they are always ACC_ALL.
3199                  */
3200                 if (is_access_allowed(fault, spte)) {
3201                         ret = RET_PF_SPURIOUS;
3202                         break;
3203                 }
3204
3205                 new_spte = spte;
3206
3207                 if (is_access_track_spte(spte))
3208                         new_spte = restore_acc_track_spte(new_spte);
3209
3210                 /*
3211                  * Currently, to simplify the code, write-protection can
3212                  * be removed in the fast path only if the SPTE was
3213                  * write-protected for dirty-logging or access tracking.
3214                  */
3215                 if (fault->write &&
3216                     spte_can_locklessly_be_made_writable(spte)) {
3217                         new_spte |= PT_WRITABLE_MASK;
3218
3219                         /*
3220                          * Do not fix write-permission on the large spte.  Since
3221                          * we only dirty the first page into the dirty-bitmap in
3222                          * fast_pf_fix_direct_spte(), other pages are missed
3223                          * if its slot has dirty logging enabled.
3224                          *
3225                          * Instead, we let the slow page fault path create a
3226                          * normal spte to fix the access.
3227                          *
3228                          * See the comments in kvm_arch_commit_memory_region().
3229                          */
3230                         if (sp->role.level > PG_LEVEL_4K)
3231                                 break;
3232                 }
3233
3234                 /* Verify that the fault can be handled in the fast path */
3235                 if (new_spte == spte ||
3236                     !is_access_allowed(fault, new_spte))
3237                         break;
3238
3239                 /*
3240                  * Currently, fast page fault only works for direct mapping
3241                  * since the gfn is not stable for indirect shadow page. See
3242                  * Documentation/virt/kvm/locking.rst to get more detail.
3243                  */
3244                 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3245                         ret = RET_PF_FIXED;
3246                         break;
3247                 }
3248
3249                 if (++retry_count > 4) {
3250                         printk_once(KERN_WARNING
3251                                 "kvm: Fast #PF retrying more than 4 times.\n");
3252                         break;
3253                 }
3254
3255         } while (true);
3256
3257         trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3258         walk_shadow_page_lockless_end(vcpu);
3259
3260         return ret;
3261 }
3262
3263 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3264                                struct list_head *invalid_list)
3265 {
3266         struct kvm_mmu_page *sp;
3267
3268         if (!VALID_PAGE(*root_hpa))
3269                 return;
3270
3271         sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3272
3273         if (is_tdp_mmu_page(sp))
3274                 kvm_tdp_mmu_put_root(kvm, sp, false);
3275         else if (!--sp->root_count && sp->role.invalid)
3276                 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3277
3278         *root_hpa = INVALID_PAGE;
3279 }
3280
3281 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3282 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3283                         ulong roots_to_free)
3284 {
3285         struct kvm *kvm = vcpu->kvm;
3286         int i;
3287         LIST_HEAD(invalid_list);
3288         bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3289
3290         BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3291
3292         /* Before acquiring the MMU lock, see if we need to do any real work. */
3293         if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3294                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3295                         if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3296                             VALID_PAGE(mmu->prev_roots[i].hpa))
3297                                 break;
3298
3299                 if (i == KVM_MMU_NUM_PREV_ROOTS)
3300                         return;
3301         }
3302
3303         write_lock(&kvm->mmu_lock);
3304
3305         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3306                 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3307                         mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3308                                            &invalid_list);
3309
3310         if (free_active_root) {
3311                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3312                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3313                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3314                 } else if (mmu->pae_root) {
3315                         for (i = 0; i < 4; ++i) {
3316                                 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3317                                         continue;
3318
3319                                 mmu_free_root_page(kvm, &mmu->pae_root[i],
3320                                                    &invalid_list);
3321                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3322                         }
3323                 }
3324                 mmu->root_hpa = INVALID_PAGE;
3325                 mmu->root_pgd = 0;
3326         }
3327
3328         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3329         write_unlock(&kvm->mmu_lock);
3330 }
3331 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3332
3333 void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3334 {
3335         unsigned long roots_to_free = 0;
3336         hpa_t root_hpa;
3337         int i;
3338
3339         /*
3340          * This should not be called while L2 is active, L2 can't invalidate
3341          * _only_ its own roots, e.g. INVVPID unconditionally exits.
3342          */
3343         WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
3344
3345         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3346                 root_hpa = mmu->prev_roots[i].hpa;
3347                 if (!VALID_PAGE(root_hpa))
3348                         continue;
3349
3350                 if (!to_shadow_page(root_hpa) ||
3351                         to_shadow_page(root_hpa)->role.guest_mode)
3352                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3353         }
3354
3355         kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
3356 }
3357 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3358
3359
3360 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3361 {
3362         int ret = 0;
3363
3364         if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3365                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3366                 ret = 1;
3367         }
3368
3369         return ret;
3370 }
3371
3372 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3373                             u8 level, bool direct)
3374 {
3375         struct kvm_mmu_page *sp;
3376
3377         sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3378         ++sp->root_count;
3379
3380         return __pa(sp->spt);
3381 }
3382
3383 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3384 {
3385         struct kvm_mmu *mmu = vcpu->arch.mmu;
3386         u8 shadow_root_level = mmu->shadow_root_level;
3387         hpa_t root;
3388         unsigned i;
3389         int r;
3390
3391         write_lock(&vcpu->kvm->mmu_lock);
3392         r = make_mmu_pages_available(vcpu);
3393         if (r < 0)
3394                 goto out_unlock;
3395
3396         if (is_tdp_mmu_enabled(vcpu->kvm)) {
3397                 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3398                 mmu->root_hpa = root;
3399         } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3400                 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3401                 mmu->root_hpa = root;
3402         } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3403                 if (WARN_ON_ONCE(!mmu->pae_root)) {
3404                         r = -EIO;
3405                         goto out_unlock;
3406                 }
3407
3408                 for (i = 0; i < 4; ++i) {
3409                         WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3410
3411                         root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3412                                               i << 30, PT32_ROOT_LEVEL, true);
3413                         mmu->pae_root[i] = root | PT_PRESENT_MASK |
3414                                            shadow_me_mask;
3415                 }
3416                 mmu->root_hpa = __pa(mmu->pae_root);
3417         } else {
3418                 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3419                 r = -EIO;
3420                 goto out_unlock;
3421         }
3422
3423         /* root_pgd is ignored for direct MMUs. */
3424         mmu->root_pgd = 0;
3425 out_unlock:
3426         write_unlock(&vcpu->kvm->mmu_lock);
3427         return r;
3428 }
3429
3430 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3431 {
3432         struct kvm_mmu *mmu = vcpu->arch.mmu;
3433         u64 pdptrs[4], pm_mask;
3434         gfn_t root_gfn, root_pgd;
3435         hpa_t root;
3436         unsigned i;
3437         int r;
3438
3439         root_pgd = mmu->get_guest_pgd(vcpu);
3440         root_gfn = root_pgd >> PAGE_SHIFT;
3441
3442         if (mmu_check_root(vcpu, root_gfn))
3443                 return 1;
3444
3445         /*
3446          * On SVM, reading PDPTRs might access guest memory, which might fault
3447          * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
3448          */
3449         if (mmu->root_level == PT32E_ROOT_LEVEL) {
3450                 for (i = 0; i < 4; ++i) {
3451                         pdptrs[i] = mmu->get_pdptr(vcpu, i);
3452                         if (!(pdptrs[i] & PT_PRESENT_MASK))
3453                                 continue;
3454
3455                         if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3456                                 return 1;
3457                 }
3458         }
3459
3460         r = alloc_all_memslots_rmaps(vcpu->kvm);
3461         if (r)
3462                 return r;
3463
3464         write_lock(&vcpu->kvm->mmu_lock);
3465         r = make_mmu_pages_available(vcpu);
3466         if (r < 0)
3467                 goto out_unlock;
3468
3469         /*
3470          * Do we shadow a long mode page table? If so we need to
3471          * write-protect the guests page table root.
3472          */
3473         if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3474                 root = mmu_alloc_root(vcpu, root_gfn, 0,
3475                                       mmu->shadow_root_level, false);
3476                 mmu->root_hpa = root;
3477                 goto set_root_pgd;
3478         }
3479
3480         if (WARN_ON_ONCE(!mmu->pae_root)) {
3481                 r = -EIO;
3482                 goto out_unlock;
3483         }
3484
3485         /*
3486          * We shadow a 32 bit page table. This may be a legacy 2-level
3487          * or a PAE 3-level page table. In either case we need to be aware that
3488          * the shadow page table may be a PAE or a long mode page table.
3489          */
3490         pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3491         if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3492                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3493
3494                 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3495                         r = -EIO;
3496                         goto out_unlock;
3497                 }
3498                 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3499
3500                 if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
3501                         if (WARN_ON_ONCE(!mmu->pml5_root)) {
3502                                 r = -EIO;
3503                                 goto out_unlock;
3504                         }
3505                         mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3506                 }
3507         }
3508
3509         for (i = 0; i < 4; ++i) {
3510                 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3511
3512                 if (mmu->root_level == PT32E_ROOT_LEVEL) {
3513                         if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3514                                 mmu->pae_root[i] = INVALID_PAE_ROOT;
3515                                 continue;
3516                         }
3517                         root_gfn = pdptrs[i] >> PAGE_SHIFT;
3518                 }
3519
3520                 root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3521                                       PT32_ROOT_LEVEL, false);
3522                 mmu->pae_root[i] = root | pm_mask;
3523         }
3524
3525         if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
3526                 mmu->root_hpa = __pa(mmu->pml5_root);
3527         else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3528                 mmu->root_hpa = __pa(mmu->pml4_root);
3529         else
3530                 mmu->root_hpa = __pa(mmu->pae_root);
3531
3532 set_root_pgd:
3533         mmu->root_pgd = root_pgd;
3534 out_unlock:
3535         write_unlock(&vcpu->kvm->mmu_lock);
3536
3537         return 0;
3538 }
3539
3540 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3541 {
3542         struct kvm_mmu *mmu = vcpu->arch.mmu;
3543         bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3544         u64 *pml5_root = NULL;
3545         u64 *pml4_root = NULL;
3546         u64 *pae_root;
3547
3548         /*
3549          * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3550          * tables are allocated and initialized at root creation as there is no
3551          * equivalent level in the guest's NPT to shadow.  Allocate the tables
3552          * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3553          */
3554         if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3555             mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3556                 return 0;
3557
3558         /*
3559          * NPT, the only paging mode that uses this horror, uses a fixed number
3560          * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3561          * all MMus are 5-level.  Thus, this can safely require that pml5_root
3562          * is allocated if the other roots are valid and pml5 is needed, as any
3563          * prior MMU would also have required pml5.
3564          */
3565         if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3566                 return 0;
3567
3568         /*
3569          * The special roots should always be allocated in concert.  Yell and
3570          * bail if KVM ends up in a state where only one of the roots is valid.
3571          */
3572         if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3573                          (need_pml5 && mmu->pml5_root)))
3574                 return -EIO;
3575
3576         /*
3577          * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3578          * doesn't need to be decrypted.
3579          */
3580         pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3581         if (!pae_root)
3582                 return -ENOMEM;
3583
3584 #ifdef CONFIG_X86_64
3585         pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3586         if (!pml4_root)
3587                 goto err_pml4;
3588
3589         if (need_pml5) {
3590                 pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3591                 if (!pml5_root)
3592                         goto err_pml5;
3593         }
3594 #endif
3595
3596         mmu->pae_root = pae_root;
3597         mmu->pml4_root = pml4_root;
3598         mmu->pml5_root = pml5_root;
3599
3600         return 0;
3601
3602 #ifdef CONFIG_X86_64
3603 err_pml5:
3604         free_page((unsigned long)pml4_root);
3605 err_pml4:
3606         free_page((unsigned long)pae_root);
3607         return -ENOMEM;
3608 #endif
3609 }
3610
3611 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3612 {
3613         int i;
3614         struct kvm_mmu_page *sp;
3615
3616         if (vcpu->arch.mmu->direct_map)
3617                 return;
3618
3619         if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3620                 return;
3621
3622         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3623
3624         if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3625                 hpa_t root = vcpu->arch.mmu->root_hpa;
3626                 sp = to_shadow_page(root);
3627
3628                 /*
3629                  * Even if another CPU was marking the SP as unsync-ed
3630                  * simultaneously, any guest page table changes are not
3631                  * guaranteed to be visible anyway until this VCPU issues a TLB
3632                  * flush strictly after those changes are made. We only need to
3633                  * ensure that the other CPU sets these flags before any actual
3634                  * changes to the page tables are made. The comments in
3635                  * mmu_try_to_unsync_pages() describe what could go wrong if
3636                  * this requirement isn't satisfied.
3637                  */
3638                 if (!smp_load_acquire(&sp->unsync) &&
3639                     !smp_load_acquire(&sp->unsync_children))
3640                         return;
3641
3642                 write_lock(&vcpu->kvm->mmu_lock);
3643                 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3644
3645                 mmu_sync_children(vcpu, sp, true);
3646
3647                 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3648                 write_unlock(&vcpu->kvm->mmu_lock);
3649                 return;
3650         }
3651
3652         write_lock(&vcpu->kvm->mmu_lock);
3653         kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3654
3655         for (i = 0; i < 4; ++i) {
3656                 hpa_t root = vcpu->arch.mmu->pae_root[i];
3657
3658                 if (IS_VALID_PAE_ROOT(root)) {
3659                         root &= PT64_BASE_ADDR_MASK;
3660                         sp = to_shadow_page(root);
3661                         mmu_sync_children(vcpu, sp, true);
3662                 }
3663         }
3664
3665         kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3666         write_unlock(&vcpu->kvm->mmu_lock);
3667 }
3668
3669 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3670                                   u32 access, struct x86_exception *exception)
3671 {
3672         if (exception)
3673                 exception->error_code = 0;
3674         return vaddr;
3675 }
3676
3677 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3678                                          u32 access,
3679                                          struct x86_exception *exception)
3680 {
3681         if (exception)
3682                 exception->error_code = 0;
3683         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3684 }
3685
3686 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3687 {
3688         /*
3689          * A nested guest cannot use the MMIO cache if it is using nested
3690          * page tables, because cr2 is a nGPA while the cache stores GPAs.
3691          */
3692         if (mmu_is_nested(vcpu))
3693                 return false;
3694
3695         if (direct)
3696                 return vcpu_match_mmio_gpa(vcpu, addr);
3697
3698         return vcpu_match_mmio_gva(vcpu, addr);
3699 }
3700
3701 /*
3702  * Return the level of the lowest level SPTE added to sptes.
3703  * That SPTE may be non-present.
3704  *
3705  * Must be called between walk_shadow_page_lockless_{begin,end}.
3706  */
3707 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3708 {
3709         struct kvm_shadow_walk_iterator iterator;
3710         int leaf = -1;
3711         u64 spte;
3712
3713         for (shadow_walk_init(&iterator, vcpu, addr),
3714              *root_level = iterator.level;
3715              shadow_walk_okay(&iterator);
3716              __shadow_walk_next(&iterator, spte)) {
3717                 leaf = iterator.level;
3718                 spte = mmu_spte_get_lockless(iterator.sptep);
3719
3720                 sptes[leaf] = spte;
3721         }
3722
3723         return leaf;
3724 }
3725
3726 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3727 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3728 {
3729         u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3730         struct rsvd_bits_validate *rsvd_check;
3731         int root, leaf, level;
3732         bool reserved = false;
3733
3734         walk_shadow_page_lockless_begin(vcpu);
3735
3736         if (is_tdp_mmu(vcpu->arch.mmu))
3737                 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3738         else
3739                 leaf = get_walk(vcpu, addr, sptes, &root);
3740
3741         walk_shadow_page_lockless_end(vcpu);
3742
3743         if (unlikely(leaf < 0)) {
3744                 *sptep = 0ull;
3745                 return reserved;
3746         }
3747
3748         *sptep = sptes[leaf];
3749
3750         /*
3751          * Skip reserved bits checks on the terminal leaf if it's not a valid
3752          * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
3753          * design, always have reserved bits set.  The purpose of the checks is
3754          * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
3755          */
3756         if (!is_shadow_present_pte(sptes[leaf]))
3757                 leaf++;
3758
3759         rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3760
3761         for (level = root; level >= leaf; level--)
3762                 reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3763
3764         if (reserved) {
3765                 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3766                        __func__, addr);
3767                 for (level = root; level >= leaf; level--)
3768                         pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3769                                sptes[level], level,
3770                                get_rsvd_bits(rsvd_check, sptes[level], level));
3771         }
3772
3773         return reserved;
3774 }
3775
3776 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3777 {
3778         u64 spte;
3779         bool reserved;
3780
3781         if (mmio_info_in_cache(vcpu, addr, direct))
3782                 return RET_PF_EMULATE;
3783
3784         reserved = get_mmio_spte(vcpu, addr, &spte);
3785         if (WARN_ON(reserved))
3786                 return -EINVAL;
3787
3788         if (is_mmio_spte(spte)) {
3789                 gfn_t gfn = get_mmio_spte_gfn(spte);
3790                 unsigned int access = get_mmio_spte_access(spte);
3791
3792                 if (!check_mmio_spte(vcpu, spte))
3793                         return RET_PF_INVALID;
3794
3795                 if (direct)
3796                         addr = 0;
3797
3798                 trace_handle_mmio_page_fault(addr, gfn, access);
3799                 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3800                 return RET_PF_EMULATE;
3801         }
3802
3803         /*
3804          * If the page table is zapped by other cpus, let CPU fault again on
3805          * the address.
3806          */
3807         return RET_PF_RETRY;
3808 }
3809
3810 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3811                                          struct kvm_page_fault *fault)
3812 {
3813         if (unlikely(fault->rsvd))
3814                 return false;
3815
3816         if (!fault->present || !fault->write)
3817                 return false;
3818
3819         /*
3820          * guest is writing the page which is write tracked which can
3821          * not be fixed by page fault handler.
3822          */
3823         if (kvm_slot_page_track_is_active(fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
3824                 return true;
3825
3826         return false;
3827 }
3828
3829 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3830 {
3831         struct kvm_shadow_walk_iterator iterator;
3832         u64 spte;
3833
3834         walk_shadow_page_lockless_begin(vcpu);
3835         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3836                 clear_sp_write_flooding_count(iterator.sptep);
3837         walk_shadow_page_lockless_end(vcpu);
3838 }
3839
3840 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3841                                     gfn_t gfn)
3842 {
3843         struct kvm_arch_async_pf arch;
3844
3845         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3846         arch.gfn = gfn;
3847         arch.direct_map = vcpu->arch.mmu->direct_map;
3848         arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3849
3850         return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3851                                   kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3852 }
3853
3854 static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
3855 {
3856         struct kvm_memory_slot *slot = fault->slot;
3857         bool async;
3858
3859         /*
3860          * Retry the page fault if the gfn hit a memslot that is being deleted
3861          * or moved.  This ensures any existing SPTEs for the old memslot will
3862          * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3863          */
3864         if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3865                 goto out_retry;
3866
3867         if (!kvm_is_visible_memslot(slot)) {
3868                 /* Don't expose private memslots to L2. */
3869                 if (is_guest_mode(vcpu)) {
3870                         fault->slot = NULL;
3871                         fault->pfn = KVM_PFN_NOSLOT;
3872                         fault->map_writable = false;
3873                         return false;
3874                 }
3875                 /*
3876                  * If the APIC access page exists but is disabled, go directly
3877                  * to emulation without caching the MMIO access or creating a
3878                  * MMIO SPTE.  That way the cache doesn't need to be purged
3879                  * when the AVIC is re-enabled.
3880                  */
3881                 if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
3882                     !kvm_apicv_activated(vcpu->kvm)) {
3883                         *r = RET_PF_EMULATE;
3884                         return true;
3885                 }
3886         }
3887
3888         async = false;
3889         fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
3890                                           fault->write, &fault->map_writable,
3891                                           &fault->hva);
3892         if (!async)
3893                 return false; /* *pfn has correct page already */
3894
3895         if (!fault->prefault && kvm_can_do_async_pf(vcpu)) {
3896                 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
3897                 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
3898                         trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
3899                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3900                         goto out_retry;
3901                 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
3902                         goto out_retry;
3903         }
3904
3905         fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
3906                                           fault->write, &fault->map_writable,
3907                                           &fault->hva);
3908
3909 out_retry:
3910         *r = RET_PF_RETRY;
3911         return true;
3912 }
3913
3914 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3915 {
3916         bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3917
3918         unsigned long mmu_seq;
3919         int r;
3920
3921         fault->gfn = fault->addr >> PAGE_SHIFT;
3922         fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
3923
3924         if (page_fault_handle_page_track(vcpu, fault))
3925                 return RET_PF_EMULATE;
3926
3927         r = fast_page_fault(vcpu, fault);
3928         if (r != RET_PF_INVALID)
3929                 return r;
3930
3931         r = mmu_topup_memory_caches(vcpu, false);
3932         if (r)
3933                 return r;
3934
3935         mmu_seq = vcpu->kvm->mmu_notifier_seq;
3936         smp_rmb();
3937
3938         if (kvm_faultin_pfn(vcpu, fault, &r))
3939                 return r;
3940
3941         if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
3942                 return r;
3943
3944         r = RET_PF_RETRY;
3945
3946         if (is_tdp_mmu_fault)
3947                 read_lock(&vcpu->kvm->mmu_lock);
3948         else
3949                 write_lock(&vcpu->kvm->mmu_lock);
3950
3951         if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
3952                 goto out_unlock;
3953         r = make_mmu_pages_available(vcpu);
3954         if (r)
3955                 goto out_unlock;
3956
3957         if (is_tdp_mmu_fault)
3958                 r = kvm_tdp_mmu_map(vcpu, fault);
3959         else
3960                 r = __direct_map(vcpu, fault);
3961
3962 out_unlock:
3963         if (is_tdp_mmu_fault)
3964                 read_unlock(&vcpu->kvm->mmu_lock);
3965         else
3966                 write_unlock(&vcpu->kvm->mmu_lock);
3967         kvm_release_pfn_clean(fault->pfn);
3968         return r;
3969 }
3970
3971 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
3972                                 struct kvm_page_fault *fault)
3973 {
3974         pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
3975
3976         /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
3977         fault->max_level = PG_LEVEL_2M;
3978         return direct_page_fault(vcpu, fault);
3979 }
3980
3981 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3982                                 u64 fault_address, char *insn, int insn_len)
3983 {
3984         int r = 1;
3985         u32 flags = vcpu->arch.apf.host_apf_flags;
3986
3987 #ifndef CONFIG_X86_64
3988         /* A 64-bit CR2 should be impossible on 32-bit KVM. */
3989         if (WARN_ON_ONCE(fault_address >> 32))
3990                 return -EFAULT;
3991 #endif
3992
3993         vcpu->arch.l1tf_flush_l1d = true;
3994         if (!flags) {
3995                 trace_kvm_page_fault(fault_address, error_code);
3996
3997                 if (kvm_event_needs_reinjection(vcpu))
3998                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3999                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4000                                 insn_len);
4001         } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4002                 vcpu->arch.apf.host_apf_flags = 0;
4003                 local_irq_disable();
4004                 kvm_async_pf_task_wait_schedule(fault_address);
4005                 local_irq_enable();
4006         } else {
4007                 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4008         }
4009
4010         return r;
4011 }
4012 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4013
4014 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4015 {
4016         while (fault->max_level > PG_LEVEL_4K) {
4017                 int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
4018                 gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
4019
4020                 if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4021                         break;
4022
4023                 --fault->max_level;
4024         }
4025
4026         return direct_page_fault(vcpu, fault);
4027 }
4028
4029 static void nonpaging_init_context(struct kvm_mmu *context)
4030 {
4031         context->page_fault = nonpaging_page_fault;
4032         context->gva_to_gpa = nonpaging_gva_to_gpa;
4033         context->sync_page = nonpaging_sync_page;
4034         context->invlpg = NULL;
4035         context->direct_map = true;
4036 }
4037
4038 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4039                                   union kvm_mmu_page_role role)
4040 {
4041         return (role.direct || pgd == root->pgd) &&
4042                VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
4043                role.word == to_shadow_page(root->hpa)->role.word;
4044 }
4045
4046 /*
4047  * Find out if a previously cached root matching the new pgd/role is available.
4048  * The current root is also inserted into the cache.
4049  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
4050  * returned.
4051  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
4052  * false is returned. This root should now be freed by the caller.
4053  */
4054 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4055                                   union kvm_mmu_page_role new_role)
4056 {
4057         uint i;
4058         struct kvm_mmu_root_info root;
4059         struct kvm_mmu *mmu = vcpu->arch.mmu;
4060
4061         root.pgd = mmu->root_pgd;
4062         root.hpa = mmu->root_hpa;
4063
4064         if (is_root_usable(&root, new_pgd, new_role))
4065                 return true;
4066
4067         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4068                 swap(root, mmu->prev_roots[i]);
4069
4070                 if (is_root_usable(&root, new_pgd, new_role))
4071                         break;
4072         }
4073
4074         mmu->root_hpa = root.hpa;
4075         mmu->root_pgd = root.pgd;
4076
4077         return i < KVM_MMU_NUM_PREV_ROOTS;
4078 }
4079
4080 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4081                             union kvm_mmu_page_role new_role)
4082 {
4083         struct kvm_mmu *mmu = vcpu->arch.mmu;
4084
4085         /*
4086          * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4087          * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4088          * later if necessary.
4089          */
4090         if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4091             mmu->root_level >= PT64_ROOT_4LEVEL)
4092                 return cached_root_available(vcpu, new_pgd, new_role);
4093
4094         return false;
4095 }
4096
4097 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4098                               union kvm_mmu_page_role new_role)
4099 {
4100         if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4101                 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4102                 return;
4103         }
4104
4105         /*
4106          * It's possible that the cached previous root page is obsolete because
4107          * of a change in the MMU generation number. However, changing the
4108          * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4109          * free the root set here and allocate a new one.
4110          */
4111         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4112
4113         if (force_flush_and_sync_on_reuse) {
4114                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4115                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4116         }
4117
4118         /*
4119          * The last MMIO access's GVA and GPA are cached in the VCPU. When
4120          * switching to a new CR3, that GVA->GPA mapping may no longer be
4121          * valid. So clear any cached MMIO info even when we don't need to sync
4122          * the shadow page tables.
4123          */
4124         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4125
4126         /*
4127          * If this is a direct root page, it doesn't have a write flooding
4128          * count. Otherwise, clear the write flooding count.
4129          */
4130         if (!new_role.direct)
4131                 __clear_sp_write_flooding_count(
4132                                 to_shadow_page(vcpu->arch.mmu->root_hpa));
4133 }
4134
4135 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4136 {
4137         __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4138 }
4139 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4140
4141 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4142 {
4143         return kvm_read_cr3(vcpu);
4144 }
4145
4146 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4147                            unsigned int access)
4148 {
4149         if (unlikely(is_mmio_spte(*sptep))) {
4150                 if (gfn != get_mmio_spte_gfn(*sptep)) {
4151                         mmu_spte_clear_no_track(sptep);
4152                         return true;
4153                 }
4154
4155                 mark_mmio_spte(vcpu, sptep, gfn, access);
4156                 return true;
4157         }
4158
4159         return false;
4160 }
4161
4162 #define PTTYPE_EPT 18 /* arbitrary */
4163 #define PTTYPE PTTYPE_EPT
4164 #include "paging_tmpl.h"
4165 #undef PTTYPE
4166
4167 #define PTTYPE 64
4168 #include "paging_tmpl.h"
4169 #undef PTTYPE
4170
4171 #define PTTYPE 32
4172 #include "paging_tmpl.h"
4173 #undef PTTYPE
4174
4175 static void
4176 __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4177                         u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4178                         bool pse, bool amd)
4179 {
4180         u64 gbpages_bit_rsvd = 0;
4181         u64 nonleaf_bit8_rsvd = 0;
4182         u64 high_bits_rsvd;
4183
4184         rsvd_check->bad_mt_xwr = 0;
4185
4186         if (!gbpages)
4187                 gbpages_bit_rsvd = rsvd_bits(7, 7);
4188
4189         if (level == PT32E_ROOT_LEVEL)
4190                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4191         else
4192                 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4193
4194         /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4195         if (!nx)
4196                 high_bits_rsvd |= rsvd_bits(63, 63);
4197
4198         /*
4199          * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4200          * leaf entries) on AMD CPUs only.
4201          */
4202         if (amd)
4203                 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4204
4205         switch (level) {
4206         case PT32_ROOT_LEVEL:
4207                 /* no rsvd bits for 2 level 4K page table entries */
4208                 rsvd_check->rsvd_bits_mask[0][1] = 0;
4209                 rsvd_check->rsvd_bits_mask[0][0] = 0;
4210                 rsvd_check->rsvd_bits_mask[1][0] =
4211                         rsvd_check->rsvd_bits_mask[0][0];
4212
4213                 if (!pse) {
4214                         rsvd_check->rsvd_bits_mask[1][1] = 0;
4215                         break;
4216                 }
4217
4218                 if (is_cpuid_PSE36())
4219                         /* 36bits PSE 4MB page */
4220                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4221                 else
4222                         /* 32 bits PSE 4MB page */
4223                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4224                 break;
4225         case PT32E_ROOT_LEVEL:
4226                 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4227                                                    high_bits_rsvd |
4228                                                    rsvd_bits(5, 8) |
4229                                                    rsvd_bits(1, 2);     /* PDPTE */
4230                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;      /* PDE */
4231                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;      /* PTE */
4232                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4233                                                    rsvd_bits(13, 20);   /* large page */
4234                 rsvd_check->rsvd_bits_mask[1][0] =
4235                         rsvd_check->rsvd_bits_mask[0][0];
4236                 break;
4237         case PT64_ROOT_5LEVEL:
4238                 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4239                                                    nonleaf_bit8_rsvd |
4240                                                    rsvd_bits(7, 7);
4241                 rsvd_check->rsvd_bits_mask[1][4] =
4242                         rsvd_check->rsvd_bits_mask[0][4];
4243                 fallthrough;
4244         case PT64_ROOT_4LEVEL:
4245                 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4246                                                    nonleaf_bit8_rsvd |
4247                                                    rsvd_bits(7, 7);
4248                 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4249                                                    gbpages_bit_rsvd;
4250                 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4251                 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4252                 rsvd_check->rsvd_bits_mask[1][3] =
4253                         rsvd_check->rsvd_bits_mask[0][3];
4254                 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4255                                                    gbpages_bit_rsvd |
4256                                                    rsvd_bits(13, 29);
4257                 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4258                                                    rsvd_bits(13, 20); /* large page */
4259                 rsvd_check->rsvd_bits_mask[1][0] =
4260                         rsvd_check->rsvd_bits_mask[0][0];
4261                 break;
4262         }
4263 }
4264
4265 static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
4266 {
4267         /*
4268          * If TDP is enabled, let the guest use GBPAGES if they're supported in
4269          * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
4270          * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
4271          * walk for performance and complexity reasons.  Not to mention KVM
4272          * _can't_ solve the problem because GVA->GPA walks aren't visible to
4273          * KVM once a TDP translation is installed.  Mimic hardware behavior so
4274          * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
4275          */
4276         return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
4277                              guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
4278 }
4279
4280 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4281                                   struct kvm_mmu *context)
4282 {
4283         __reset_rsvds_bits_mask(&context->guest_rsvd_check,
4284                                 vcpu->arch.reserved_gpa_bits,
4285                                 context->root_level, is_efer_nx(context),
4286                                 guest_can_use_gbpages(vcpu),
4287                                 is_cr4_pse(context),
4288                                 guest_cpuid_is_amd_or_hygon(vcpu));
4289 }
4290
4291 static void
4292 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4293                             u64 pa_bits_rsvd, bool execonly)
4294 {
4295         u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4296         u64 bad_mt_xwr;
4297
4298         rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4299         rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4300         rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
4301         rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
4302         rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4303
4304         /* large page */
4305         rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4306         rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4307         rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
4308         rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4309         rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4310
4311         bad_mt_xwr = 0xFFull << (2 * 8);        /* bits 3..5 must not be 2 */
4312         bad_mt_xwr |= 0xFFull << (3 * 8);       /* bits 3..5 must not be 3 */
4313         bad_mt_xwr |= 0xFFull << (7 * 8);       /* bits 3..5 must not be 7 */
4314         bad_mt_xwr |= REPEAT_BYTE(1ull << 2);   /* bits 0..2 must not be 010 */
4315         bad_mt_xwr |= REPEAT_BYTE(1ull << 6);   /* bits 0..2 must not be 110 */
4316         if (!execonly) {
4317                 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4318                 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4319         }
4320         rsvd_check->bad_mt_xwr = bad_mt_xwr;
4321 }
4322
4323 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4324                 struct kvm_mmu *context, bool execonly)
4325 {
4326         __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4327                                     vcpu->arch.reserved_gpa_bits, execonly);
4328 }
4329
4330 static inline u64 reserved_hpa_bits(void)
4331 {
4332         return rsvd_bits(shadow_phys_bits, 63);
4333 }
4334
4335 /*
4336  * the page table on host is the shadow page table for the page
4337  * table in guest or amd nested guest, its mmu features completely
4338  * follow the features in guest.
4339  */
4340 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4341                                         struct kvm_mmu *context)
4342 {
4343         /*
4344          * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4345          * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4346          * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4347          * The iTLB multi-hit workaround can be toggled at any time, so assume
4348          * NX can be used by any non-nested shadow MMU to avoid having to reset
4349          * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4350          */
4351         bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4352
4353         /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
4354         bool is_amd = true;
4355         /* KVM doesn't use 2-level page tables for the shadow MMU. */
4356         bool is_pse = false;
4357         struct rsvd_bits_validate *shadow_zero_check;
4358         int i;
4359
4360         WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
4361
4362         shadow_zero_check = &context->shadow_zero_check;
4363         __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4364                                 context->shadow_root_level, uses_nx,
4365                                 guest_can_use_gbpages(vcpu), is_pse, is_amd);
4366
4367         if (!shadow_me_mask)
4368                 return;
4369
4370         for (i = context->shadow_root_level; --i >= 0;) {
4371                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4372                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4373         }
4374
4375 }
4376
4377 static inline bool boot_cpu_is_amd(void)
4378 {
4379         WARN_ON_ONCE(!tdp_enabled);
4380         return shadow_x_mask == 0;
4381 }
4382
4383 /*
4384  * the direct page table on host, use as much mmu features as
4385  * possible, however, kvm currently does not do execution-protection.
4386  */
4387 static void
4388 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4389                                 struct kvm_mmu *context)
4390 {
4391         struct rsvd_bits_validate *shadow_zero_check;
4392         int i;
4393
4394         shadow_zero_check = &context->shadow_zero_check;
4395
4396         if (boot_cpu_is_amd())
4397                 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4398                                         context->shadow_root_level, false,
4399                                         boot_cpu_has(X86_FEATURE_GBPAGES),
4400                                         false, true);
4401         else
4402                 __reset_rsvds_bits_mask_ept(shadow_zero_check,
4403                                             reserved_hpa_bits(), false);
4404
4405         if (!shadow_me_mask)
4406                 return;
4407
4408         for (i = context->shadow_root_level; --i >= 0;) {
4409                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4410                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4411         }
4412 }
4413
4414 /*
4415  * as the comments in reset_shadow_zero_bits_mask() except it
4416  * is the shadow page table for intel nested guest.
4417  */
4418 static void
4419 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4420                                 struct kvm_mmu *context, bool execonly)
4421 {
4422         __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4423                                     reserved_hpa_bits(), execonly);
4424 }
4425
4426 #define BYTE_MASK(access) \
4427         ((1 & (access) ? 2 : 0) | \
4428          (2 & (access) ? 4 : 0) | \
4429          (3 & (access) ? 8 : 0) | \
4430          (4 & (access) ? 16 : 0) | \
4431          (5 & (access) ? 32 : 0) | \
4432          (6 & (access) ? 64 : 0) | \
4433          (7 & (access) ? 128 : 0))
4434
4435
4436 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4437 {
4438         unsigned byte;
4439
4440         const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4441         const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4442         const u8 u = BYTE_MASK(ACC_USER_MASK);
4443
4444         bool cr4_smep = is_cr4_smep(mmu);
4445         bool cr4_smap = is_cr4_smap(mmu);
4446         bool cr0_wp = is_cr0_wp(mmu);
4447         bool efer_nx = is_efer_nx(mmu);
4448
4449         for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4450                 unsigned pfec = byte << 1;
4451
4452                 /*
4453                  * Each "*f" variable has a 1 bit for each UWX value
4454                  * that causes a fault with the given PFEC.
4455                  */
4456
4457                 /* Faults from writes to non-writable pages */
4458                 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4459                 /* Faults from user mode accesses to supervisor pages */
4460                 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4461                 /* Faults from fetches of non-executable pages*/
4462                 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4463                 /* Faults from kernel mode fetches of user pages */
4464                 u8 smepf = 0;
4465                 /* Faults from kernel mode accesses of user pages */
4466                 u8 smapf = 0;
4467
4468                 if (!ept) {
4469                         /* Faults from kernel mode accesses to user pages */
4470                         u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4471
4472                         /* Not really needed: !nx will cause pte.nx to fault */
4473                         if (!efer_nx)
4474                                 ff = 0;
4475
4476                         /* Allow supervisor writes if !cr0.wp */
4477                         if (!cr0_wp)
4478                                 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4479
4480                         /* Disallow supervisor fetches of user code if cr4.smep */
4481                         if (cr4_smep)
4482                                 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4483
4484                         /*
4485                          * SMAP:kernel-mode data accesses from user-mode
4486                          * mappings should fault. A fault is considered
4487                          * as a SMAP violation if all of the following
4488                          * conditions are true:
4489                          *   - X86_CR4_SMAP is set in CR4
4490                          *   - A user page is accessed
4491                          *   - The access is not a fetch
4492                          *   - Page fault in kernel mode
4493                          *   - if CPL = 3 or X86_EFLAGS_AC is clear
4494                          *
4495                          * Here, we cover the first three conditions.
4496                          * The fourth is computed dynamically in permission_fault();
4497                          * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4498                          * *not* subject to SMAP restrictions.
4499                          */
4500                         if (cr4_smap)
4501                                 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4502                 }
4503
4504                 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4505         }
4506 }
4507
4508 /*
4509 * PKU is an additional mechanism by which the paging controls access to
4510 * user-mode addresses based on the value in the PKRU register.  Protection
4511 * key violations are reported through a bit in the page fault error code.
4512 * Unlike other bits of the error code, the PK bit is not known at the
4513 * call site of e.g. gva_to_gpa; it must be computed directly in
4514 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4515 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4516 *
4517 * In particular the following conditions come from the error code, the
4518 * page tables and the machine state:
4519 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4520 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4521 * - PK is always zero if U=0 in the page tables
4522 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4523 *
4524 * The PKRU bitmask caches the result of these four conditions.  The error
4525 * code (minus the P bit) and the page table's U bit form an index into the
4526 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4527 * with the two bits of the PKRU register corresponding to the protection key.
4528 * For the first three conditions above the bits will be 00, thus masking
4529 * away both AD and WD.  For all reads or if the last condition holds, WD
4530 * only will be masked away.
4531 */
4532 static void update_pkru_bitmask(struct kvm_mmu *mmu)
4533 {
4534         unsigned bit;
4535         bool wp;
4536
4537         if (!is_cr4_pke(mmu)) {
4538                 mmu->pkru_mask = 0;
4539                 return;
4540         }
4541
4542         wp = is_cr0_wp(mmu);
4543
4544         for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4545                 unsigned pfec, pkey_bits;
4546                 bool check_pkey, check_write, ff, uf, wf, pte_user;
4547
4548                 pfec = bit << 1;
4549                 ff = pfec & PFERR_FETCH_MASK;
4550                 uf = pfec & PFERR_USER_MASK;
4551                 wf = pfec & PFERR_WRITE_MASK;
4552
4553                 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4554                 pte_user = pfec & PFERR_RSVD_MASK;
4555
4556                 /*
4557                  * Only need to check the access which is not an
4558                  * instruction fetch and is to a user page.
4559                  */
4560                 check_pkey = (!ff && pte_user);
4561                 /*
4562                  * write access is controlled by PKRU if it is a
4563                  * user access or CR0.WP = 1.
4564                  */
4565                 check_write = check_pkey && wf && (uf || wp);
4566
4567                 /* PKRU.AD stops both read and write access. */
4568                 pkey_bits = !!check_pkey;
4569                 /* PKRU.WD stops write access. */
4570                 pkey_bits |= (!!check_write) << 1;
4571
4572                 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4573         }
4574 }
4575
4576 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4577                                         struct kvm_mmu *mmu)
4578 {
4579         if (!is_cr0_pg(mmu))
4580                 return;
4581
4582         reset_rsvds_bits_mask(vcpu, mmu);
4583         update_permission_bitmask(mmu, false);
4584         update_pkru_bitmask(mmu);
4585 }
4586
4587 static void paging64_init_context(struct kvm_mmu *context)
4588 {
4589         context->page_fault = paging64_page_fault;
4590         context->gva_to_gpa = paging64_gva_to_gpa;
4591         context->sync_page = paging64_sync_page;
4592         context->invlpg = paging64_invlpg;
4593         context->direct_map = false;
4594 }
4595
4596 static void paging32_init_context(struct kvm_mmu *context)
4597 {
4598         context->page_fault = paging32_page_fault;
4599         context->gva_to_gpa = paging32_gva_to_gpa;
4600         context->sync_page = paging32_sync_page;
4601         context->invlpg = paging32_invlpg;
4602         context->direct_map = false;
4603 }
4604
4605 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
4606                                                          struct kvm_mmu_role_regs *regs)
4607 {
4608         union kvm_mmu_extended_role ext = {0};
4609
4610         if (____is_cr0_pg(regs)) {
4611                 ext.cr0_pg = 1;
4612                 ext.cr4_pae = ____is_cr4_pae(regs);
4613                 ext.cr4_smep = ____is_cr4_smep(regs);
4614                 ext.cr4_smap = ____is_cr4_smap(regs);
4615                 ext.cr4_pse = ____is_cr4_pse(regs);
4616
4617                 /* PKEY and LA57 are active iff long mode is active. */
4618                 ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
4619                 ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4620         }
4621
4622         ext.valid = 1;
4623
4624         return ext;
4625 }
4626
4627 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4628                                                    struct kvm_mmu_role_regs *regs,
4629                                                    bool base_only)
4630 {
4631         union kvm_mmu_role role = {0};
4632
4633         role.base.access = ACC_ALL;
4634         if (____is_cr0_pg(regs)) {
4635                 role.base.efer_nx = ____is_efer_nx(regs);
4636                 role.base.cr0_wp = ____is_cr0_wp(regs);
4637         }
4638         role.base.smm = is_smm(vcpu);
4639         role.base.guest_mode = is_guest_mode(vcpu);
4640
4641         if (base_only)
4642                 return role;
4643
4644         role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4645
4646         return role;
4647 }
4648
4649 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4650 {
4651         /* tdp_root_level is architecture forced level, use it if nonzero */
4652         if (tdp_root_level)
4653                 return tdp_root_level;
4654
4655         /* Use 5-level TDP if and only if it's useful/necessary. */
4656         if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4657                 return 4;
4658
4659         return max_tdp_level;
4660 }
4661
4662 static union kvm_mmu_role
4663 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
4664                                 struct kvm_mmu_role_regs *regs, bool base_only)
4665 {
4666         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4667
4668         role.base.ad_disabled = (shadow_accessed_mask == 0);
4669         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4670         role.base.direct = true;
4671         role.base.gpte_is_8_bytes = true;
4672
4673         return role;
4674 }
4675
4676 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4677 {
4678         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4679         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4680         union kvm_mmu_role new_role =
4681                 kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4682
4683         if (new_role.as_u64 == context->mmu_role.as_u64)
4684                 return;
4685
4686         context->mmu_role.as_u64 = new_role.as_u64;
4687         context->page_fault = kvm_tdp_page_fault;
4688         context->sync_page = nonpaging_sync_page;
4689         context->invlpg = NULL;
4690         context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4691         context->direct_map = true;
4692         context->get_guest_pgd = get_cr3;
4693         context->get_pdptr = kvm_pdptr_read;
4694         context->inject_page_fault = kvm_inject_page_fault;
4695         context->root_level = role_regs_to_root_level(&regs);
4696
4697         if (!is_cr0_pg(context))
4698                 context->gva_to_gpa = nonpaging_gva_to_gpa;
4699         else if (is_cr4_pae(context))
4700                 context->gva_to_gpa = paging64_gva_to_gpa;
4701         else
4702                 context->gva_to_gpa = paging32_gva_to_gpa;
4703
4704         reset_guest_paging_metadata(vcpu, context);
4705         reset_tdp_shadow_zero_bits_mask(vcpu, context);
4706 }
4707
4708 static union kvm_mmu_role
4709 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
4710                                       struct kvm_mmu_role_regs *regs, bool base_only)
4711 {
4712         union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4713
4714         role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
4715         role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4716         role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4717
4718         return role;
4719 }
4720
4721 static union kvm_mmu_role
4722 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
4723                                    struct kvm_mmu_role_regs *regs, bool base_only)
4724 {
4725         union kvm_mmu_role role =
4726                 kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
4727
4728         role.base.direct = !____is_cr0_pg(regs);
4729
4730         if (!____is_efer_lma(regs))
4731                 role.base.level = PT32E_ROOT_LEVEL;
4732         else if (____is_cr4_la57(regs))
4733                 role.base.level = PT64_ROOT_5LEVEL;
4734         else
4735                 role.base.level = PT64_ROOT_4LEVEL;
4736
4737         return role;
4738 }
4739
4740 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4741                                     struct kvm_mmu_role_regs *regs,
4742                                     union kvm_mmu_role new_role)
4743 {
4744         if (new_role.as_u64 == context->mmu_role.as_u64)
4745                 return;
4746
4747         context->mmu_role.as_u64 = new_role.as_u64;
4748
4749         if (!is_cr0_pg(context))
4750                 nonpaging_init_context(context);
4751         else if (is_cr4_pae(context))
4752                 paging64_init_context(context);
4753         else
4754                 paging32_init_context(context);
4755         context->root_level = role_regs_to_root_level(regs);
4756
4757         reset_guest_paging_metadata(vcpu, context);
4758         context->shadow_root_level = new_role.base.level;
4759
4760         reset_shadow_zero_bits_mask(vcpu, context);
4761 }
4762
4763 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4764                                 struct kvm_mmu_role_regs *regs)
4765 {
4766         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4767         union kvm_mmu_role new_role =
4768                 kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
4769
4770         shadow_mmu_init_context(vcpu, context, regs, new_role);
4771 }
4772
4773 static union kvm_mmu_role
4774 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
4775                                    struct kvm_mmu_role_regs *regs)
4776 {
4777         union kvm_mmu_role role =
4778                 kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4779
4780         role.base.direct = false;
4781         role.base.level = kvm_mmu_get_tdp_level(vcpu);
4782
4783         return role;
4784 }
4785
4786 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4787                              unsigned long cr4, u64 efer, gpa_t nested_cr3)
4788 {
4789         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4790         struct kvm_mmu_role_regs regs = {
4791                 .cr0 = cr0,
4792                 .cr4 = cr4,
4793                 .efer = efer,
4794         };
4795         union kvm_mmu_role new_role;
4796
4797         new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4798
4799         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4800
4801         shadow_mmu_init_context(vcpu, context, &regs, new_role);
4802 }
4803 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4804
4805 static union kvm_mmu_role
4806 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4807                                    bool execonly, u8 level)
4808 {
4809         union kvm_mmu_role role = {0};
4810
4811         /* SMM flag is inherited from root_mmu */
4812         role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4813
4814         role.base.level = level;
4815         role.base.gpte_is_8_bytes = true;
4816         role.base.direct = false;
4817         role.base.ad_disabled = !accessed_dirty;
4818         role.base.guest_mode = true;
4819         role.base.access = ACC_ALL;
4820
4821         /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4822         role.ext.word = 0;
4823         role.ext.execonly = execonly;
4824         role.ext.valid = 1;
4825
4826         return role;
4827 }
4828
4829 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4830                              bool accessed_dirty, gpa_t new_eptp)
4831 {
4832         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4833         u8 level = vmx_eptp_page_walk_level(new_eptp);
4834         union kvm_mmu_role new_role =
4835                 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4836                                                    execonly, level);
4837
4838         __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4839
4840         if (new_role.as_u64 == context->mmu_role.as_u64)
4841                 return;
4842
4843         context->mmu_role.as_u64 = new_role.as_u64;
4844
4845         context->shadow_root_level = level;
4846
4847         context->ept_ad = accessed_dirty;
4848         context->page_fault = ept_page_fault;
4849         context->gva_to_gpa = ept_gva_to_gpa;
4850         context->sync_page = ept_sync_page;
4851         context->invlpg = ept_invlpg;
4852         context->root_level = level;
4853         context->direct_map = false;
4854
4855         update_permission_bitmask(context, true);
4856         update_pkru_bitmask(context);
4857         reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4858         reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4859 }
4860 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4861
4862 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4863 {
4864         struct kvm_mmu *context = &vcpu->arch.root_mmu;
4865         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4866
4867         kvm_init_shadow_mmu(vcpu, &regs);
4868
4869         context->get_guest_pgd     = get_cr3;
4870         context->get_pdptr         = kvm_pdptr_read;
4871         context->inject_page_fault = kvm_inject_page_fault;
4872 }
4873
4874 static union kvm_mmu_role
4875 kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4876 {
4877         union kvm_mmu_role role;
4878
4879         role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4880
4881         /*
4882          * Nested MMUs are used only for walking L2's gva->gpa, they never have
4883          * shadow pages of their own and so "direct" has no meaning.   Set it
4884          * to "true" to try to detect bogus usage of the nested MMU.
4885          */
4886         role.base.direct = true;
4887         role.base.level = role_regs_to_root_level(regs);
4888         return role;
4889 }
4890
4891 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4892 {
4893         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4894         union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4895         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4896
4897         if (new_role.as_u64 == g_context->mmu_role.as_u64)
4898                 return;
4899
4900         g_context->mmu_role.as_u64 = new_role.as_u64;
4901         g_context->get_guest_pgd     = get_cr3;
4902         g_context->get_pdptr         = kvm_pdptr_read;
4903         g_context->inject_page_fault = kvm_inject_page_fault;
4904         g_context->root_level        = new_role.base.level;
4905
4906         /*
4907          * L2 page tables are never shadowed, so there is no need to sync
4908          * SPTEs.
4909          */
4910         g_context->invlpg            = NULL;
4911
4912         /*
4913          * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4914          * L1's nested page tables (e.g. EPT12). The nested translation
4915          * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4916          * L2's page tables as the first level of translation and L1's
4917          * nested page tables as the second level of translation. Basically
4918          * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4919          */
4920         if (!is_paging(vcpu))
4921                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4922         else if (is_long_mode(vcpu))
4923                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4924         else if (is_pae(vcpu))
4925                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4926         else
4927                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4928
4929         reset_guest_paging_metadata(vcpu, g_context);
4930 }
4931
4932 void kvm_init_mmu(struct kvm_vcpu *vcpu)
4933 {
4934         if (mmu_is_nested(vcpu))
4935                 init_kvm_nested_mmu(vcpu);
4936         else if (tdp_enabled)
4937                 init_kvm_tdp_mmu(vcpu);
4938         else
4939                 init_kvm_softmmu(vcpu);
4940 }
4941 EXPORT_SYMBOL_GPL(kvm_init_mmu);
4942
4943 static union kvm_mmu_page_role
4944 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4945 {
4946         struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4947         union kvm_mmu_role role;
4948
4949         if (tdp_enabled)
4950                 role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4951         else
4952                 role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4953
4954         return role.base;
4955 }
4956
4957 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
4958 {
4959         /*
4960          * Invalidate all MMU roles to force them to reinitialize as CPUID
4961          * information is factored into reserved bit calculations.
4962          */
4963         vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
4964         vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
4965         vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
4966         kvm_mmu_reset_context(vcpu);
4967
4968         /*
4969          * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
4970          * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
4971          * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
4972          * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
4973          * sweep the problem under the rug.
4974          *
4975          * KVM's horrific CPUID ABI makes the problem all but impossible to
4976          * solve, as correctly handling multiple vCPU models (with respect to
4977          * paging and physical address properties) in a single VM would require
4978          * tracking all relevant CPUID information in kvm_mmu_page_role.  That
4979          * is very undesirable as it would double the memory requirements for
4980          * gfn_track (see struct kvm_mmu_page_role comments), and in practice
4981          * no sane VMM mucks with the core vCPU model on the fly.
4982          */
4983         if (vcpu->arch.last_vmentry_cpu != -1) {
4984                 pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
4985                 pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
4986         }
4987 }
4988
4989 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4990 {
4991         kvm_mmu_unload(vcpu);
4992         kvm_init_mmu(vcpu);
4993 }
4994 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4995
4996 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4997 {
4998         int r;
4999
5000         r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5001         if (r)
5002                 goto out;
5003         r = mmu_alloc_special_roots(vcpu);
5004         if (r)
5005                 goto out;
5006         if (vcpu->arch.mmu->direct_map)
5007                 r = mmu_alloc_direct_roots(vcpu);
5008         else
5009                 r = mmu_alloc_shadow_roots(vcpu);
5010         if (r)
5011                 goto out;
5012
5013         kvm_mmu_sync_roots(vcpu);
5014
5015         kvm_mmu_load_pgd(vcpu);
5016         static_call(kvm_x86_tlb_flush_current)(vcpu);
5017 out:
5018         return r;
5019 }
5020
5021 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5022 {
5023         kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5024         WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5025         kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5026         WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5027 }
5028
5029 static bool need_remote_flush(u64 old, u64 new)
5030 {
5031         if (!is_shadow_present_pte(old))
5032                 return false;
5033         if (!is_shadow_present_pte(new))
5034                 return true;
5035         if ((old ^ new) & PT64_BASE_ADDR_MASK)
5036                 return true;
5037         old ^= shadow_nx_mask;
5038         new ^= shadow_nx_mask;
5039         return (old & ~new & PT64_PERM_MASK) != 0;
5040 }
5041
5042 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5043                                     int *bytes)
5044 {
5045         u64 gentry = 0;
5046         int r;
5047
5048         /*
5049          * Assume that the pte write on a page table of the same type
5050          * as the current vcpu paging mode since we update the sptes only
5051          * when they have the same mode.
5052          */
5053         if (is_pae(vcpu) && *bytes == 4) {
5054                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5055                 *gpa &= ~(gpa_t)7;
5056                 *bytes = 8;
5057         }
5058
5059         if (*bytes == 4 || *bytes == 8) {
5060                 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5061                 if (r)
5062                         gentry = 0;
5063         }
5064
5065         return gentry;
5066 }
5067
5068 /*
5069  * If we're seeing too many writes to a page, it may no longer be a page table,
5070  * or we may be forking, in which case it is better to unmap the page.
5071  */
5072 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5073 {
5074         /*
5075          * Skip write-flooding detected for the sp whose level is 1, because
5076          * it can become unsync, then the guest page is not write-protected.
5077          */
5078         if (sp->role.level == PG_LEVEL_4K)
5079                 return false;
5080
5081         atomic_inc(&sp->write_flooding_count);
5082         return atomic_read(&sp->write_flooding_count) >= 3;
5083 }
5084
5085 /*
5086  * Misaligned accesses are too much trouble to fix up; also, they usually
5087  * indicate a page is not used as a page table.
5088  */
5089 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5090                                     int bytes)
5091 {
5092         unsigned offset, pte_size, misaligned;
5093
5094         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5095                  gpa, bytes, sp->role.word);
5096
5097         offset = offset_in_page(gpa);
5098         pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5099
5100         /*
5101          * Sometimes, the OS only writes the last one bytes to update status
5102          * bits, for example, in linux, andb instruction is used in clear_bit().
5103          */
5104         if (!(offset & (pte_size - 1)) && bytes == 1)
5105                 return false;
5106
5107         misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5108         misaligned |= bytes < 4;
5109
5110         return misaligned;
5111 }
5112
5113 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5114 {
5115         unsigned page_offset, quadrant;
5116         u64 *spte;
5117         int level;
5118
5119         page_offset = offset_in_page(gpa);
5120         level = sp->role.level;
5121         *nspte = 1;
5122         if (!sp->role.gpte_is_8_bytes) {
5123                 page_offset <<= 1;      /* 32->64 */
5124                 /*
5125                  * A 32-bit pde maps 4MB while the shadow pdes map
5126                  * only 2MB.  So we need to double the offset again
5127                  * and zap two pdes instead of one.
5128                  */
5129                 if (level == PT32_ROOT_LEVEL) {
5130                         page_offset &= ~7; /* kill rounding error */
5131                         page_offset <<= 1;
5132                         *nspte = 2;
5133                 }
5134                 quadrant = page_offset >> PAGE_SHIFT;
5135                 page_offset &= ~PAGE_MASK;
5136                 if (quadrant != sp->role.quadrant)
5137                         return NULL;
5138         }
5139
5140         spte = &sp->spt[page_offset / sizeof(*spte)];
5141         return spte;
5142 }
5143
5144 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5145                               const u8 *new, int bytes,
5146                               struct kvm_page_track_notifier_node *node)
5147 {
5148         gfn_t gfn = gpa >> PAGE_SHIFT;
5149         struct kvm_mmu_page *sp;
5150         LIST_HEAD(invalid_list);
5151         u64 entry, gentry, *spte;
5152         int npte;
5153         bool flush = false;
5154
5155         /*
5156          * If we don't have indirect shadow pages, it means no page is
5157          * write-protected, so we can exit simply.
5158          */
5159         if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5160                 return;
5161
5162         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5163
5164         /*
5165          * No need to care whether allocation memory is successful
5166          * or not since pte prefetch is skipped if it does not have
5167          * enough objects in the cache.
5168          */
5169         mmu_topup_memory_caches(vcpu, true);
5170
5171         write_lock(&vcpu->kvm->mmu_lock);
5172
5173         gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5174
5175         ++vcpu->kvm->stat.mmu_pte_write;
5176         kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5177
5178         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5179                 if (detect_write_misaligned(sp, gpa, bytes) ||
5180                       detect_write_flooding(sp)) {
5181                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5182                         ++vcpu->kvm->stat.mmu_flooded;
5183                         continue;
5184                 }
5185
5186                 spte = get_written_sptes(sp, gpa, &npte);
5187                 if (!spte)
5188                         continue;
5189
5190                 while (npte--) {
5191                         entry = *spte;
5192                         mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5193                         if (gentry && sp->role.level != PG_LEVEL_4K)
5194                                 ++vcpu->kvm->stat.mmu_pde_zapped;
5195                         if (need_remote_flush(entry, *spte))
5196                                 flush = true;
5197                         ++spte;
5198                 }
5199         }
5200         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5201         kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5202         write_unlock(&vcpu->kvm->mmu_lock);
5203 }
5204
5205 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5206                        void *insn, int insn_len)
5207 {
5208         int r, emulation_type = EMULTYPE_PF;
5209         bool direct = vcpu->arch.mmu->direct_map;
5210
5211         if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5212                 return RET_PF_RETRY;
5213
5214         r = RET_PF_INVALID;
5215         if (unlikely(error_code & PFERR_RSVD_MASK)) {
5216                 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5217                 if (r == RET_PF_EMULATE)
5218                         goto emulate;
5219         }
5220
5221         if (r == RET_PF_INVALID) {
5222                 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5223                                           lower_32_bits(error_code), false);
5224                 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5225                         return -EIO;
5226         }
5227
5228         if (r < 0)
5229                 return r;
5230         if (r != RET_PF_EMULATE)
5231                 return 1;
5232
5233         /*
5234          * Before emulating the instruction, check if the error code
5235          * was due to a RO violation while translating the guest page.
5236          * This can occur when using nested virtualization with nested
5237          * paging in both guests. If true, we simply unprotect the page
5238          * and resume the guest.
5239          */
5240         if (vcpu->arch.mmu->direct_map &&
5241             (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5242                 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5243                 return 1;
5244         }
5245
5246         /*
5247          * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5248          * optimistically try to just unprotect the page and let the processor
5249          * re-execute the instruction that caused the page fault.  Do not allow
5250          * retrying MMIO emulation, as it's not only pointless but could also
5251          * cause us to enter an infinite loop because the processor will keep
5252          * faulting on the non-existent MMIO address.  Retrying an instruction
5253          * from a nested guest is also pointless and dangerous as we are only
5254          * explicitly shadowing L1's page tables, i.e. unprotecting something
5255          * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5256          */
5257         if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5258                 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5259 emulate:
5260         return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5261                                        insn_len);
5262 }
5263 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5264
5265 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5266                             gva_t gva, hpa_t root_hpa)
5267 {
5268         int i;
5269
5270         /* It's actually a GPA for vcpu->arch.guest_mmu.  */
5271         if (mmu != &vcpu->arch.guest_mmu) {
5272                 /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5273                 if (is_noncanonical_address(gva, vcpu))
5274                         return;
5275
5276                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5277         }
5278
5279         if (!mmu->invlpg)
5280                 return;
5281
5282         if (root_hpa == INVALID_PAGE) {
5283                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5284
5285                 /*
5286                  * INVLPG is required to invalidate any global mappings for the VA,
5287                  * irrespective of PCID. Since it would take us roughly similar amount
5288                  * of work to determine whether any of the prev_root mappings of the VA
5289                  * is marked global, or to just sync it blindly, so we might as well
5290                  * just always sync it.
5291                  *
5292                  * Mappings not reachable via the current cr3 or the prev_roots will be
5293                  * synced when switching to that cr3, so nothing needs to be done here
5294                  * for them.
5295                  */
5296                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5297                         if (VALID_PAGE(mmu->prev_roots[i].hpa))
5298                                 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5299         } else {
5300                 mmu->invlpg(vcpu, gva, root_hpa);
5301         }
5302 }
5303
5304 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5305 {
5306         kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5307         ++vcpu->stat.invlpg;
5308 }
5309 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5310
5311
5312 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5313 {
5314         struct kvm_mmu *mmu = vcpu->arch.mmu;
5315         bool tlb_flush = false;
5316         uint i;
5317
5318         if (pcid == kvm_get_active_pcid(vcpu)) {
5319                 mmu->invlpg(vcpu, gva, mmu->root_hpa);
5320                 tlb_flush = true;
5321         }
5322
5323         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5324                 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5325                     pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5326                         mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5327                         tlb_flush = true;
5328                 }
5329         }
5330
5331         if (tlb_flush)
5332                 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5333
5334         ++vcpu->stat.invlpg;
5335
5336         /*
5337          * Mappings not reachable via the current cr3 or the prev_roots will be
5338          * synced when switching to that cr3, so nothing needs to be done here
5339          * for them.
5340          */
5341 }
5342
5343 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
5344                        int tdp_max_root_level, int tdp_huge_page_level)
5345 {
5346         tdp_enabled = enable_tdp;
5347         tdp_root_level = tdp_forced_root_level;
5348         max_tdp_level = tdp_max_root_level;
5349
5350         /*
5351          * max_huge_page_level reflects KVM's MMU capabilities irrespective
5352          * of kernel support, e.g. KVM may be capable of using 1GB pages when
5353          * the kernel is not.  But, KVM never creates a page size greater than
5354          * what is used by the kernel for any given HVA, i.e. the kernel's
5355          * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5356          */
5357         if (tdp_enabled)
5358                 max_huge_page_level = tdp_huge_page_level;
5359         else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5360                 max_huge_page_level = PG_LEVEL_1G;
5361         else
5362                 max_huge_page_level = PG_LEVEL_2M;
5363 }
5364 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5365
5366 /* The return value indicates if tlb flush on all vcpus is needed. */
5367 typedef bool (*slot_level_handler) (struct kvm *kvm,
5368                                     struct kvm_rmap_head *rmap_head,
5369                                     const struct kvm_memory_slot *slot);
5370
5371 /* The caller should hold mmu-lock before calling this function. */
5372 static __always_inline bool
5373 slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5374                         slot_level_handler fn, int start_level, int end_level,
5375                         gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
5376                         bool flush)
5377 {
5378         struct slot_rmap_walk_iterator iterator;
5379
5380         for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5381                         end_gfn, &iterator) {
5382                 if (iterator.rmap)
5383                         flush |= fn(kvm, iterator.rmap, memslot);
5384
5385                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5386                         if (flush && flush_on_yield) {
5387                                 kvm_flush_remote_tlbs_with_address(kvm,
5388                                                 start_gfn,
5389                                                 iterator.gfn - start_gfn + 1);
5390                                 flush = false;
5391                         }
5392                         cond_resched_rwlock_write(&kvm->mmu_lock);
5393                 }
5394         }
5395
5396         return flush;
5397 }
5398
5399 static __always_inline bool
5400 slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5401                   slot_level_handler fn, int start_level, int end_level,
5402                   bool flush_on_yield)
5403 {
5404         return slot_handle_level_range(kvm, memslot, fn, start_level,
5405                         end_level, memslot->base_gfn,
5406                         memslot->base_gfn + memslot->npages - 1,
5407                         flush_on_yield, false);
5408 }
5409
5410 static __always_inline bool
5411 slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5412                  slot_level_handler fn, bool flush_on_yield)
5413 {
5414         return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5415                                  PG_LEVEL_4K, flush_on_yield);
5416 }
5417
5418 static void free_mmu_pages(struct kvm_mmu *mmu)
5419 {
5420         if (!tdp_enabled && mmu->pae_root)
5421                 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5422         free_page((unsigned long)mmu->pae_root);
5423         free_page((unsigned long)mmu->pml4_root);
5424         free_page((unsigned long)mmu->pml5_root);
5425 }
5426
5427 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5428 {
5429         struct page *page;
5430         int i;
5431
5432         mmu->root_hpa = INVALID_PAGE;
5433         mmu->root_pgd = 0;
5434         mmu->translate_gpa = translate_gpa;
5435         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5436                 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5437
5438         /*
5439          * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5440          * while the PDP table is a per-vCPU construct that's allocated at MMU
5441          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5442          * x86_64.  Therefore we need to allocate the PDP table in the first
5443          * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5444          * generally doesn't use PAE paging and can skip allocating the PDP
5445          * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5446          * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5447          * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5448          */
5449         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5450                 return 0;
5451
5452         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5453         if (!page)
5454                 return -ENOMEM;
5455
5456         mmu->pae_root = page_address(page);
5457
5458         /*
5459          * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5460          * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5461          * that KVM's writes and the CPU's reads get along.  Note, this is
5462          * only necessary when using shadow paging, as 64-bit NPT can get at
5463          * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5464          * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5465          */
5466         if (!tdp_enabled)
5467                 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5468         else
5469                 WARN_ON_ONCE(shadow_me_mask);
5470
5471         for (i = 0; i < 4; ++i)
5472                 mmu->pae_root[i] = INVALID_PAE_ROOT;
5473
5474         return 0;
5475 }
5476
5477 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5478 {
5479         int ret;
5480
5481         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5482         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5483
5484         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5485         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5486
5487         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5488
5489         vcpu->arch.mmu = &vcpu->arch.root_mmu;
5490         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5491
5492         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5493
5494         ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5495         if (ret)
5496                 return ret;
5497
5498         ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5499         if (ret)
5500                 goto fail_allocate_root;
5501
5502         return ret;
5503  fail_allocate_root:
5504         free_mmu_pages(&vcpu->arch.guest_mmu);
5505         return ret;
5506 }
5507
5508 #define BATCH_ZAP_PAGES 10
5509 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5510 {
5511         struct kvm_mmu_page *sp, *node;
5512         int nr_zapped, batch = 0;
5513
5514 restart:
5515         list_for_each_entry_safe_reverse(sp, node,
5516               &kvm->arch.active_mmu_pages, link) {
5517                 /*
5518                  * No obsolete valid page exists before a newly created page
5519                  * since active_mmu_pages is a FIFO list.
5520                  */
5521                 if (!is_obsolete_sp(kvm, sp))
5522                         break;
5523
5524                 /*
5525                  * Invalid pages should never land back on the list of active
5526                  * pages.  Skip the bogus page, otherwise we'll get stuck in an
5527                  * infinite loop if the page gets put back on the list (again).
5528                  */
5529                 if (WARN_ON(sp->role.invalid))
5530                         continue;
5531
5532                 /*
5533                  * No need to flush the TLB since we're only zapping shadow
5534                  * pages with an obsolete generation number and all vCPUS have
5535                  * loaded a new root, i.e. the shadow pages being zapped cannot
5536                  * be in active use by the guest.
5537                  */
5538                 if (batch >= BATCH_ZAP_PAGES &&
5539                     cond_resched_rwlock_write(&kvm->mmu_lock)) {
5540                         batch = 0;
5541                         goto restart;
5542                 }
5543
5544                 if (__kvm_mmu_prepare_zap_page(kvm, sp,
5545                                 &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5546                         batch += nr_zapped;
5547                         goto restart;
5548                 }
5549         }
5550
5551         /*
5552          * Trigger a remote TLB flush before freeing the page tables to ensure
5553          * KVM is not in the middle of a lockless shadow page table walk, which
5554          * may reference the pages.
5555          */
5556         kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5557 }
5558
5559 /*
5560  * Fast invalidate all shadow pages and use lock-break technique
5561  * to zap obsolete pages.
5562  *
5563  * It's required when memslot is being deleted or VM is being
5564  * destroyed, in these cases, we should ensure that KVM MMU does
5565  * not use any resource of the being-deleted slot or all slots
5566  * after calling the function.
5567  */
5568 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5569 {
5570         lockdep_assert_held(&kvm->slots_lock);
5571
5572         write_lock(&kvm->mmu_lock);
5573         trace_kvm_mmu_zap_all_fast(kvm);
5574
5575         /*
5576          * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5577          * held for the entire duration of zapping obsolete pages, it's
5578          * impossible for there to be multiple invalid generations associated
5579          * with *valid* shadow pages at any given time, i.e. there is exactly
5580          * one valid generation and (at most) one invalid generation.
5581          */
5582         kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5583
5584         /* In order to ensure all threads see this change when
5585          * handling the MMU reload signal, this must happen in the
5586          * same critical section as kvm_reload_remote_mmus, and
5587          * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5588          * could drop the MMU lock and yield.
5589          */
5590         if (is_tdp_mmu_enabled(kvm))
5591                 kvm_tdp_mmu_invalidate_all_roots(kvm);
5592
5593         /*
5594          * Notify all vcpus to reload its shadow page table and flush TLB.
5595          * Then all vcpus will switch to new shadow page table with the new
5596          * mmu_valid_gen.
5597          *
5598          * Note: we need to do this under the protection of mmu_lock,
5599          * otherwise, vcpu would purge shadow page but miss tlb flush.
5600          */
5601         kvm_reload_remote_mmus(kvm);
5602
5603         kvm_zap_obsolete_pages(kvm);
5604
5605         write_unlock(&kvm->mmu_lock);
5606
5607         if (is_tdp_mmu_enabled(kvm)) {
5608                 read_lock(&kvm->mmu_lock);
5609                 kvm_tdp_mmu_zap_invalidated_roots(kvm);
5610                 read_unlock(&kvm->mmu_lock);
5611         }
5612 }
5613
5614 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5615 {
5616         return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5617 }
5618
5619 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5620                         struct kvm_memory_slot *slot,
5621                         struct kvm_page_track_notifier_node *node)
5622 {
5623         kvm_mmu_zap_all_fast(kvm);
5624 }
5625
5626 void kvm_mmu_init_vm(struct kvm *kvm)
5627 {
5628         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5629
5630         spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5631
5632         if (!kvm_mmu_init_tdp_mmu(kvm))
5633                 /*
5634                  * No smp_load/store wrappers needed here as we are in
5635                  * VM init and there cannot be any memslots / other threads
5636                  * accessing this struct kvm yet.
5637                  */
5638                 kvm->arch.memslots_have_rmaps = true;
5639
5640         node->track_write = kvm_mmu_pte_write;
5641         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5642         kvm_page_track_register_notifier(kvm, node);
5643 }
5644
5645 void kvm_mmu_uninit_vm(struct kvm *kvm)
5646 {
5647         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5648
5649         kvm_page_track_unregister_notifier(kvm, node);
5650
5651         kvm_mmu_uninit_tdp_mmu(kvm);
5652 }
5653
5654 /*
5655  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
5656  * (not including it)
5657  */
5658 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5659 {
5660         struct kvm_memslots *slots;
5661         struct kvm_memory_slot *memslot;
5662         int i;
5663         bool flush = false;
5664
5665         write_lock(&kvm->mmu_lock);
5666
5667         kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
5668
5669         if (kvm_memslots_have_rmaps(kvm)) {
5670                 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5671                         slots = __kvm_memslots(kvm, i);
5672                         kvm_for_each_memslot(memslot, slots) {
5673                                 gfn_t start, end;
5674
5675                                 start = max(gfn_start, memslot->base_gfn);
5676                                 end = min(gfn_end, memslot->base_gfn + memslot->npages);
5677                                 if (start >= end)
5678                                         continue;
5679
5680                                 flush = slot_handle_level_range(kvm,
5681                                                 (const struct kvm_memory_slot *) memslot,
5682                                                 kvm_zap_rmapp, PG_LEVEL_4K,
5683                                                 KVM_MAX_HUGEPAGE_LEVEL, start,
5684                                                 end - 1, true, flush);
5685                         }
5686                 }
5687                 if (flush)
5688                         kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5689                                                            gfn_end - gfn_start);
5690         }
5691
5692         if (is_tdp_mmu_enabled(kvm)) {
5693                 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
5694                         flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
5695                                                           gfn_end, flush);
5696                 if (flush)
5697                         kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5698                                                            gfn_end - gfn_start);
5699         }
5700
5701         if (flush)
5702                 kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
5703
5704         kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
5705
5706         write_unlock(&kvm->mmu_lock);
5707 }
5708
5709 static bool slot_rmap_write_protect(struct kvm *kvm,
5710                                     struct kvm_rmap_head *rmap_head,
5711                                     const struct kvm_memory_slot *slot)
5712 {
5713         return __rmap_write_protect(kvm, rmap_head, false);
5714 }
5715
5716 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5717                                       const struct kvm_memory_slot *memslot,
5718                                       int start_level)
5719 {
5720         bool flush = false;
5721
5722         if (kvm_memslots_have_rmaps(kvm)) {
5723                 write_lock(&kvm->mmu_lock);
5724                 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5725                                           start_level, KVM_MAX_HUGEPAGE_LEVEL,
5726                                           false);
5727                 write_unlock(&kvm->mmu_lock);
5728         }
5729
5730         if (is_tdp_mmu_enabled(kvm)) {
5731                 read_lock(&kvm->mmu_lock);
5732                 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
5733                 read_unlock(&kvm->mmu_lock);
5734         }
5735
5736         /*
5737          * We can flush all the TLBs out of the mmu lock without TLB
5738          * corruption since we just change the spte from writable to
5739          * readonly so that we only need to care the case of changing
5740          * spte from present to present (changing the spte from present
5741          * to nonpresent will flush all the TLBs immediately), in other
5742          * words, the only case we care is mmu_spte_update() where we
5743          * have checked Host-writable | MMU-writable instead of
5744          * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
5745          * anymore.
5746          */
5747         if (flush)
5748                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5749 }
5750
5751 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5752                                          struct kvm_rmap_head *rmap_head,
5753                                          const struct kvm_memory_slot *slot)
5754 {
5755         u64 *sptep;
5756         struct rmap_iterator iter;
5757         int need_tlb_flush = 0;
5758         kvm_pfn_t pfn;
5759         struct kvm_mmu_page *sp;
5760
5761 restart:
5762         for_each_rmap_spte(rmap_head, &iter, sptep) {
5763                 sp = sptep_to_sp(sptep);
5764                 pfn = spte_to_pfn(*sptep);
5765
5766                 /*
5767                  * We cannot do huge page mapping for indirect shadow pages,
5768                  * which are found on the last rmap (level = 1) when not using
5769                  * tdp; such shadow pages are synced with the page table in
5770                  * the guest, and the guest page table is using 4K page size
5771                  * mapping if the indirect sp has level = 1.
5772                  */
5773                 if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5774                     sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
5775                                                                pfn, PG_LEVEL_NUM)) {
5776                         pte_list_remove(kvm, rmap_head, sptep);
5777
5778                         if (kvm_available_flush_tlb_with_range())
5779                                 kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5780                                         KVM_PAGES_PER_HPAGE(sp->role.level));
5781                         else
5782                                 need_tlb_flush = 1;
5783
5784                         goto restart;
5785                 }
5786         }
5787
5788         return need_tlb_flush;
5789 }
5790
5791 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5792                                    const struct kvm_memory_slot *slot)
5793 {
5794         bool flush = false;
5795
5796         if (kvm_memslots_have_rmaps(kvm)) {
5797                 write_lock(&kvm->mmu_lock);
5798                 flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5799                 if (flush)
5800                         kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5801                 write_unlock(&kvm->mmu_lock);
5802         }
5803
5804         if (is_tdp_mmu_enabled(kvm)) {
5805                 read_lock(&kvm->mmu_lock);
5806                 flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
5807                 if (flush)
5808                         kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5809                 read_unlock(&kvm->mmu_lock);
5810         }
5811 }
5812
5813 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5814                                         const struct kvm_memory_slot *memslot)
5815 {
5816         /*
5817          * All current use cases for flushing the TLBs for a specific memslot
5818          * related to dirty logging, and many do the TLB flush out of mmu_lock.
5819          * The interaction between the various operations on memslot must be
5820          * serialized by slots_locks to ensure the TLB flush from one operation
5821          * is observed by any other operation on the same memslot.
5822          */
5823         lockdep_assert_held(&kvm->slots_lock);
5824         kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5825                                            memslot->npages);
5826 }
5827
5828 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5829                                    const struct kvm_memory_slot *memslot)
5830 {
5831         bool flush = false;
5832
5833         if (kvm_memslots_have_rmaps(kvm)) {
5834                 write_lock(&kvm->mmu_lock);
5835                 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5836                                          false);
5837                 write_unlock(&kvm->mmu_lock);
5838         }
5839
5840         if (is_tdp_mmu_enabled(kvm)) {
5841                 read_lock(&kvm->mmu_lock);
5842                 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5843                 read_unlock(&kvm->mmu_lock);
5844         }
5845
5846         /*
5847          * It's also safe to flush TLBs out of mmu lock here as currently this
5848          * function is only used for dirty logging, in which case flushing TLB
5849          * out of mmu lock also guarantees no dirty pages will be lost in
5850          * dirty_bitmap.
5851          */
5852         if (flush)
5853                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5854 }
5855
5856 void kvm_mmu_zap_all(struct kvm *kvm)
5857 {
5858         struct kvm_mmu_page *sp, *node;
5859         LIST_HEAD(invalid_list);
5860         int ign;
5861
5862         write_lock(&kvm->mmu_lock);
5863 restart:
5864         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5865                 if (WARN_ON(sp->role.invalid))
5866                         continue;
5867                 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5868                         goto restart;
5869                 if (cond_resched_rwlock_write(&kvm->mmu_lock))
5870                         goto restart;
5871         }
5872
5873         kvm_mmu_commit_zap_page(kvm, &invalid_list);
5874
5875         if (is_tdp_mmu_enabled(kvm))
5876                 kvm_tdp_mmu_zap_all(kvm);
5877
5878         write_unlock(&kvm->mmu_lock);
5879 }
5880
5881 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5882 {
5883         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5884
5885         gen &= MMIO_SPTE_GEN_MASK;
5886
5887         /*
5888          * Generation numbers are incremented in multiples of the number of
5889          * address spaces in order to provide unique generations across all
5890          * address spaces.  Strip what is effectively the address space
5891          * modifier prior to checking for a wrap of the MMIO generation so
5892          * that a wrap in any address space is detected.
5893          */
5894         gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5895
5896         /*
5897          * The very rare case: if the MMIO generation number has wrapped,
5898          * zap all shadow pages.
5899          */
5900         if (unlikely(gen == 0)) {
5901                 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5902                 kvm_mmu_zap_all_fast(kvm);
5903         }
5904 }
5905
5906 static unsigned long
5907 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5908 {
5909         struct kvm *kvm;
5910         int nr_to_scan = sc->nr_to_scan;
5911         unsigned long freed = 0;
5912
5913         mutex_lock(&kvm_lock);
5914
5915         list_for_each_entry(kvm, &vm_list, vm_list) {
5916                 int idx;
5917                 LIST_HEAD(invalid_list);
5918
5919                 /*
5920                  * Never scan more than sc->nr_to_scan VM instances.
5921                  * Will not hit this condition practically since we do not try
5922                  * to shrink more than one VM and it is very unlikely to see
5923                  * !n_used_mmu_pages so many times.
5924                  */
5925                 if (!nr_to_scan--)
5926                         break;
5927                 /*
5928                  * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5929                  * here. We may skip a VM instance errorneosly, but we do not
5930                  * want to shrink a VM that only started to populate its MMU
5931                  * anyway.
5932                  */
5933                 if (!kvm->arch.n_used_mmu_pages &&
5934                     !kvm_has_zapped_obsolete_pages(kvm))
5935                         continue;
5936
5937                 idx = srcu_read_lock(&kvm->srcu);
5938                 write_lock(&kvm->mmu_lock);
5939
5940                 if (kvm_has_zapped_obsolete_pages(kvm)) {
5941                         kvm_mmu_commit_zap_page(kvm,
5942                               &kvm->arch.zapped_obsolete_pages);
5943                         goto unlock;
5944                 }
5945
5946                 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5947
5948 unlock:
5949                 write_unlock(&kvm->mmu_lock);
5950                 srcu_read_unlock(&kvm->srcu, idx);
5951
5952                 /*
5953                  * unfair on small ones
5954                  * per-vm shrinkers cry out
5955                  * sadness comes quickly
5956                  */
5957                 list_move_tail(&kvm->vm_list, &vm_list);
5958                 break;
5959         }
5960
5961         mutex_unlock(&kvm_lock);
5962         return freed;
5963 }
5964
5965 static unsigned long
5966 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5967 {
5968         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5969 }
5970
5971 static struct shrinker mmu_shrinker = {
5972         .count_objects = mmu_shrink_count,
5973         .scan_objects = mmu_shrink_scan,
5974         .seeks = DEFAULT_SEEKS * 10,
5975 };
5976
5977 static void mmu_destroy_caches(void)
5978 {
5979         kmem_cache_destroy(pte_list_desc_cache);
5980         kmem_cache_destroy(mmu_page_header_cache);
5981 }
5982
5983 static bool get_nx_auto_mode(void)
5984 {
5985         /* Return true when CPU has the bug, and mitigations are ON */
5986         return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5987 }
5988
5989 static void __set_nx_huge_pages(bool val)
5990 {
5991         nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5992 }
5993
5994 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5995 {
5996         bool old_val = nx_huge_pages;
5997         bool new_val;
5998
5999         /* In "auto" mode deploy workaround only if CPU has the bug. */
6000         if (sysfs_streq(val, "off"))
6001                 new_val = 0;
6002         else if (sysfs_streq(val, "force"))
6003                 new_val = 1;
6004         else if (sysfs_streq(val, "auto"))
6005                 new_val = get_nx_auto_mode();
6006         else if (strtobool(val, &new_val) < 0)
6007                 return -EINVAL;
6008
6009         __set_nx_huge_pages(new_val);
6010
6011         if (new_val != old_val) {
6012                 struct kvm *kvm;
6013
6014                 mutex_lock(&kvm_lock);
6015
6016                 list_for_each_entry(kvm, &vm_list, vm_list) {
6017                         mutex_lock(&kvm->slots_lock);
6018                         kvm_mmu_zap_all_fast(kvm);
6019                         mutex_unlock(&kvm->slots_lock);
6020
6021                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6022                 }
6023                 mutex_unlock(&kvm_lock);
6024         }
6025
6026         return 0;
6027 }
6028
6029 int kvm_mmu_module_init(void)
6030 {
6031         int ret = -ENOMEM;
6032
6033         if (nx_huge_pages == -1)
6034                 __set_nx_huge_pages(get_nx_auto_mode());
6035
6036         /*
6037          * MMU roles use union aliasing which is, generally speaking, an
6038          * undefined behavior. However, we supposedly know how compilers behave
6039          * and the current status quo is unlikely to change. Guardians below are
6040          * supposed to let us know if the assumption becomes false.
6041          */
6042         BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6043         BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6044         BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6045
6046         kvm_mmu_reset_all_pte_masks();
6047
6048         pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6049                                             sizeof(struct pte_list_desc),
6050                                             0, SLAB_ACCOUNT, NULL);
6051         if (!pte_list_desc_cache)
6052                 goto out;
6053
6054         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6055                                                   sizeof(struct kvm_mmu_page),
6056                                                   0, SLAB_ACCOUNT, NULL);
6057         if (!mmu_page_header_cache)
6058                 goto out;
6059
6060         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6061                 goto out;
6062
6063         ret = register_shrinker(&mmu_shrinker);
6064         if (ret)
6065                 goto out;
6066
6067         return 0;
6068
6069 out:
6070         mmu_destroy_caches();
6071         return ret;
6072 }
6073
6074 /*
6075  * Calculate mmu pages needed for kvm.
6076  */
6077 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6078 {
6079         unsigned long nr_mmu_pages;
6080         unsigned long nr_pages = 0;
6081         struct kvm_memslots *slots;
6082         struct kvm_memory_slot *memslot;
6083         int i;
6084
6085         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6086                 slots = __kvm_memslots(kvm, i);
6087
6088                 kvm_for_each_memslot(memslot, slots)
6089                         nr_pages += memslot->npages;
6090         }
6091
6092         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6093         nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6094
6095         return nr_mmu_pages;
6096 }
6097
6098 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6099 {
6100         kvm_mmu_unload(vcpu);
6101         free_mmu_pages(&vcpu->arch.root_mmu);
6102         free_mmu_pages(&vcpu->arch.guest_mmu);
6103         mmu_free_memory_caches(vcpu);
6104 }
6105
6106 void kvm_mmu_module_exit(void)
6107 {
6108         mmu_destroy_caches();
6109         percpu_counter_destroy(&kvm_total_used_mmu_pages);
6110         unregister_shrinker(&mmu_shrinker);
6111         mmu_audit_disable();
6112 }
6113
6114 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6115 {
6116         unsigned int old_val;
6117         int err;
6118
6119         old_val = nx_huge_pages_recovery_ratio;
6120         err = param_set_uint(val, kp);
6121         if (err)
6122                 return err;
6123
6124         if (READ_ONCE(nx_huge_pages) &&
6125             !old_val && nx_huge_pages_recovery_ratio) {
6126                 struct kvm *kvm;
6127
6128                 mutex_lock(&kvm_lock);
6129
6130                 list_for_each_entry(kvm, &vm_list, vm_list)
6131                         wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6132
6133                 mutex_unlock(&kvm_lock);
6134         }
6135
6136         return err;
6137 }
6138
6139 static void kvm_recover_nx_lpages(struct kvm *kvm)
6140 {
6141         unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6142         int rcu_idx;
6143         struct kvm_mmu_page *sp;
6144         unsigned int ratio;
6145         LIST_HEAD(invalid_list);
6146         bool flush = false;
6147         ulong to_zap;
6148
6149         rcu_idx = srcu_read_lock(&kvm->srcu);
6150         write_lock(&kvm->mmu_lock);
6151
6152         ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6153         to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6154         for ( ; to_zap; --to_zap) {
6155                 if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
6156                         break;
6157
6158                 /*
6159                  * We use a separate list instead of just using active_mmu_pages
6160                  * because the number of lpage_disallowed pages is expected to
6161                  * be relatively small compared to the total.
6162                  */
6163                 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6164                                       struct kvm_mmu_page,
6165                                       lpage_disallowed_link);
6166                 WARN_ON_ONCE(!sp->lpage_disallowed);
6167                 if (is_tdp_mmu_page(sp)) {
6168                         flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6169                 } else {
6170                         kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6171                         WARN_ON_ONCE(sp->lpage_disallowed);
6172                 }
6173
6174                 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6175                         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6176                         cond_resched_rwlock_write(&kvm->mmu_lock);
6177                         flush = false;
6178                 }
6179         }
6180         kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6181
6182         write_unlock(&kvm->mmu_lock);
6183         srcu_read_unlock(&kvm->srcu, rcu_idx);
6184 }
6185
6186 static long get_nx_lpage_recovery_timeout(u64 start_time)
6187 {
6188         return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6189                 ? start_time + 60 * HZ - get_jiffies_64()
6190                 : MAX_SCHEDULE_TIMEOUT;
6191 }
6192
6193 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6194 {
6195         u64 start_time;
6196         long remaining_time;
6197
6198         while (true) {
6199                 start_time = get_jiffies_64();
6200                 remaining_time = get_nx_lpage_recovery_timeout(start_time);
6201
6202                 set_current_state(TASK_INTERRUPTIBLE);
6203                 while (!kthread_should_stop() && remaining_time > 0) {
6204                         schedule_timeout(remaining_time);
6205                         remaining_time = get_nx_lpage_recovery_timeout(start_time);
6206                         set_current_state(TASK_INTERRUPTIBLE);
6207                 }
6208
6209                 set_current_state(TASK_RUNNING);
6210
6211                 if (kthread_should_stop())
6212                         return 0;
6213
6214                 kvm_recover_nx_lpages(kvm);
6215         }
6216 }
6217
6218 int kvm_mmu_post_init_vm(struct kvm *kvm)
6219 {
6220         int err;
6221
6222         err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6223                                           "kvm-nx-lpage-recovery",
6224                                           &kvm->arch.nx_lpage_recovery_thread);
6225         if (!err)
6226                 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6227
6228         return err;
6229 }
6230
6231 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6232 {
6233         if (kvm->arch.nx_lpage_recovery_thread)
6234                 kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6235 }